repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
rajatscibi/chitra
[ "1543805f1401c571e516e47ab1c8a83b93dd657c" ]
[ "tests/image/test_image.py" ]
[ "from unittest.mock import MagicMock\n\nimport numpy as np\nfrom PIL import Image\n\nfrom chitra.image.image import Chitra, _cache_image\n\nurl = (\n \"https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/logo.png\"\n)\nimage = Chitra(url, cache=True)\n\n\ndef test__load_image():\n url = \"https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/logo.png\"\n image = Chitra(url, cache=True)\n assert isinstance(image.image, Image.Image)\n\n\ndef test_numpy():\n assert isinstance(image.numpy(), np.ndarray)\n\n\ndef test_to_tensor():\n assert True\n\n\ndef test_shape():\n assert len(image.shape) == 3\n\n\ndef test_size():\n assert len(image.size) == 2\n\n\ndef test_imshow():\n assert True\n\n\ndef test_draw_boxes():\n assert True\n\n\ndef test_resize_image_with_bbox():\n box = [10, 20, 30, 40]\n label = [\"chitra\"]\n dummy = np.random.randn(100, 100, 3).astype(\"uint8\")\n image = Chitra(dummy, bboxes=box, labels=label)\n image.resize_image_with_bbox((10, 10))\n rescaled_bounding_box = image.bboxes[0]\n\n assert np.isclose(rescaled_bounding_box.x1, 1)\n assert np.isclose(rescaled_bounding_box.y1, 2)\n assert np.isclose(rescaled_bounding_box.x2, 3)\n assert np.isclose(rescaled_bounding_box.y2, 4)\n\n\ndef test__cache_image():\n image = MagicMock()\n image.save = MagicMock()\n _cache_image(image, \"test_image.jpg\")\n image.save.assert_called_once()\n\n\ndef test_image_resize():\n image = Chitra(url, cache=True)\n image.resize((224, 224))\n assert image.shape[:2] == (224, 224)\n" ]
[ [ "numpy.random.randn", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
notZaki/niworkflows
[ "c2d7fade510abed47d3af16258a6a3e30c4e0040" ]
[ "niworkflows/interfaces/nilearn.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Utilities based on nilearn.\"\"\"\nimport os\nimport nibabel as nb\nimport numpy as np\nfrom skimage import morphology as sim\nfrom scipy.ndimage.morphology import binary_fill_holes, binary_dilation\n\nfrom nilearn import __version__ as NILEARN_VERSION\nfrom nilearn.masking import compute_epi_mask\nfrom nilearn.image import concat_imgs\n\nfrom nipype import logging\nfrom nipype.utils.filemanip import fname_presuffix\nfrom nipype.interfaces.base import (\n traits,\n isdefined,\n TraitedSpec,\n BaseInterfaceInputSpec,\n File,\n InputMultiPath,\n SimpleInterface,\n)\nfrom nipype.interfaces.mixins import reporting\nfrom .reportlets import base as nrb\n\n\nLOGGER = logging.getLogger(\"nipype.interface\")\n__all__ = [\"NILEARN_VERSION\", \"MaskEPI\", \"Merge\", \"ComputeEPIMask\"]\n\n\nclass _MaskEPIInputSpec(BaseInterfaceInputSpec):\n in_files = InputMultiPath(\n File(exists=True), mandatory=True, desc=\"input EPI or list of files\"\n )\n lower_cutoff = traits.Float(0.2, usedefault=True)\n upper_cutoff = traits.Float(0.85, usedefault=True)\n connected = traits.Bool(True, usedefault=True)\n enhance_t2 = traits.Bool(\n False, usedefault=True, desc=\"enhance T2 contrast on image\"\n )\n opening = traits.Int(2, usedefault=True)\n closing = traits.Bool(True, usedefault=True)\n fill_holes = traits.Bool(True, usedefault=True)\n exclude_zeros = traits.Bool(False, usedefault=True)\n ensure_finite = traits.Bool(True, usedefault=True)\n target_affine = traits.Either(\n None, traits.File(exists=True), default=None, usedefault=True\n )\n target_shape = traits.Either(\n None, traits.File(exists=True), default=None, usedefault=True\n )\n no_sanitize = traits.Bool(False, usedefault=True)\n\n\nclass _MaskEPIOutputSpec(TraitedSpec):\n out_mask = File(exists=True, desc=\"output mask\")\n\n\nclass MaskEPI(SimpleInterface):\n \"\"\"Run Nilearn's compute_epi_mask.\"\"\"\n\n input_spec = _MaskEPIInputSpec\n output_spec = _MaskEPIOutputSpec\n\n def _run_interface(self, runtime):\n\n in_files = self.inputs.in_files\n\n if self.inputs.enhance_t2:\n in_files = [_enhance_t2_contrast(f, newpath=runtime.cwd) for f in in_files]\n\n masknii = compute_epi_mask(\n in_files,\n lower_cutoff=self.inputs.lower_cutoff,\n upper_cutoff=self.inputs.upper_cutoff,\n connected=self.inputs.connected,\n opening=self.inputs.opening,\n exclude_zeros=self.inputs.exclude_zeros,\n ensure_finite=self.inputs.ensure_finite,\n target_affine=self.inputs.target_affine,\n target_shape=self.inputs.target_shape,\n )\n\n if self.inputs.closing:\n closed = sim.binary_closing(\n np.asanyarray(masknii.dataobj).astype(np.uint8), sim.ball(1)\n ).astype(np.uint8)\n masknii = masknii.__class__(closed, masknii.affine, masknii.header)\n\n if self.inputs.fill_holes:\n filled = binary_fill_holes(\n np.asanyarray(masknii.dataobj).astype(np.uint8), sim.ball(6)\n ).astype(np.uint8)\n masknii = masknii.__class__(filled, masknii.affine, masknii.header)\n\n if self.inputs.no_sanitize:\n in_file = self.inputs.in_files\n if isinstance(in_file, list):\n in_file = in_file[0]\n nii = nb.load(in_file)\n qform, code = nii.get_qform(coded=True)\n masknii.set_qform(qform, int(code))\n sform, code = nii.get_sform(coded=True)\n masknii.set_sform(sform, int(code))\n\n self._results[\"out_mask\"] = fname_presuffix(\n self.inputs.in_files[0], suffix=\"_mask\", newpath=runtime.cwd\n )\n masknii.to_filename(self._results[\"out_mask\"])\n return runtime\n\n\nclass _MergeInputSpec(BaseInterfaceInputSpec):\n in_files = InputMultiPath(\n File(exists=True), mandatory=True, desc=\"input list of files to merge\"\n )\n dtype = traits.Enum(\n \"f4\",\n \"f8\",\n \"u1\",\n \"u2\",\n \"u4\",\n \"i2\",\n \"i4\",\n usedefault=True,\n desc=\"numpy dtype of output image\",\n )\n header_source = File(\n exists=True, desc=\"a Nifti file from which the header should be copied\"\n )\n compress = traits.Bool(\n True, usedefault=True, desc=\"Use gzip compression on .nii output\"\n )\n\n\nclass _MergeOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc=\"output merged file\")\n\n\nclass Merge(SimpleInterface):\n \"\"\"Run Nilearn's concat_imgs.\"\"\"\n\n input_spec = _MergeInputSpec\n output_spec = _MergeOutputSpec\n\n def _run_interface(self, runtime):\n ext = \".nii.gz\" if self.inputs.compress else \".nii\"\n self._results[\"out_file\"] = fname_presuffix(\n self.inputs.in_files[0],\n suffix=\"_merged\" + ext,\n newpath=runtime.cwd,\n use_ext=False,\n )\n new_nii = concat_imgs(self.inputs.in_files, dtype=self.inputs.dtype)\n\n if isdefined(self.inputs.header_source):\n src_hdr = nb.load(self.inputs.header_source).header\n new_nii.header.set_xyzt_units(t=src_hdr.get_xyzt_units()[-1])\n new_nii.header.set_zooms(\n list(new_nii.header.get_zooms()[:3]) + [src_hdr.get_zooms()[3]]\n )\n\n new_nii.to_filename(self._results[\"out_file\"])\n\n return runtime\n\n\nclass _ComputeEPIMaskInputSpec(nrb._SVGReportCapableInputSpec, BaseInterfaceInputSpec):\n in_file = File(exists=True, desc=\"3D or 4D EPI file\")\n dilation = traits.Int(desc=\"binary dilation on the nilearn output\")\n\n\nclass _ComputeEPIMaskOutputSpec(reporting.ReportCapableOutputSpec):\n mask_file = File(exists=True, desc=\"Binary brain mask\")\n\n\nclass ComputeEPIMask(nrb.SegmentationRC):\n input_spec = _ComputeEPIMaskInputSpec\n output_spec = _ComputeEPIMaskOutputSpec\n\n def _run_interface(self, runtime):\n orig_file_nii = nb.load(self.inputs.in_file)\n in_file_data = orig_file_nii.get_fdata()\n\n # pad the data to avoid the mask estimation running into edge effects\n in_file_data_padded = np.pad(\n in_file_data, (1, 1), \"constant\", constant_values=(0, 0)\n )\n\n padded_nii = nb.Nifti1Image(\n in_file_data_padded, orig_file_nii.affine, orig_file_nii.header\n )\n\n mask_nii = compute_epi_mask(padded_nii, exclude_zeros=True)\n\n mask_data = np.asanyarray(mask_nii.dataobj).astype(np.uint8)\n if isdefined(self.inputs.dilation):\n mask_data = binary_dilation(mask_data).astype(np.uint8)\n\n # reverse image padding\n mask_data = mask_data[1:-1, 1:-1, 1:-1]\n\n # exclude zero and NaN voxels\n mask_data[in_file_data == 0] = 0\n mask_data[np.isnan(in_file_data)] = 0\n\n better_mask = nb.Nifti1Image(\n mask_data, orig_file_nii.affine, orig_file_nii.header\n )\n better_mask.set_data_dtype(np.uint8)\n better_mask.to_filename(\"mask_file.nii.gz\")\n\n self._mask_file = os.path.join(runtime.cwd, \"mask_file.nii.gz\")\n\n runtime.returncode = 0\n return super(ComputeEPIMask, self)._run_interface(runtime)\n\n def _list_outputs(self):\n outputs = super(ComputeEPIMask, self)._list_outputs()\n outputs[\"mask_file\"] = self._mask_file\n return outputs\n\n def _post_run_hook(self, runtime):\n \"\"\"Prepare report generation post-hook.\"\"\"\n self._anat_file = self.inputs.in_file\n self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file\n self._seg_files = [self._mask_file]\n self._masked = True\n\n LOGGER.info(\n 'Generating report for nilearn.compute_epi_mask. file \"%s\", and mask file \"%s\"',\n self._anat_file,\n self._mask_file,\n )\n\n return super(ComputeEPIMask, self)._post_run_hook(runtime)\n\n\ndef _enhance_t2_contrast(in_file, newpath=None, offset=0.5):\n \"\"\"\n Enhance the T2* contrast of an EPI dataset.\n\n Performs a logarithmic transformation of intensity that\n effectively splits brain and background and makes the\n overall distribution more Gaussian.\n \"\"\"\n out_file = fname_presuffix(in_file, suffix=\"_t1enh\", newpath=newpath)\n nii = nb.load(in_file)\n data = nii.get_fdata()\n maxd = data.max()\n newdata = np.log(offset + data / maxd)\n newdata -= newdata.min()\n newdata *= maxd / newdata.max()\n nii = nii.__class__(newdata, nii.affine, nii.header)\n nii.to_filename(out_file)\n return out_file\n" ]
[ [ "numpy.log", "scipy.ndimage.morphology.binary_dilation", "numpy.pad", "numpy.isnan", "numpy.asanyarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
skyeeiskowitz/MLPrimitives
[ "276926afb518c70d9445cac926587f1b1e398e6e" ]
[ "tests/custom/test_timeseries_preprocessing.py" ]
[ "from unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.testing import assert_allclose\n\nfrom mlprimitives.custom.timeseries_preprocessing import (\n cutoff_window_sequences, intervals_to_mask, rolling_window_sequences, time_segments_aggregate,\n time_segments_average)\n\n\nclass IntervalsToMaskTest(TestCase):\n\n def _run(self, index, intervals, expected):\n mask = intervals_to_mask(index, intervals)\n\n assert_allclose(mask, expected)\n\n def test_no_intervals(self):\n index = np.array([1, 2, 3, 4])\n intervals = None\n expected = np.array([False, False, False, False])\n self._run(index, intervals, expected)\n\n def test_empty_list(self):\n index = np.array([1, 2, 3, 4])\n intervals = list()\n expected = np.array([False, False, False, False])\n self._run(index, intervals, expected)\n\n def test_empty_array(self):\n index = np.array([1, 2, 3, 4])\n intervals = np.array([])\n expected = np.array([False, False, False, False])\n self._run(index, intervals, expected)\n\n def test_one_interval(self):\n index = np.array([1, 2, 3, 4])\n intervals = np.array([[2, 3]])\n expected = np.array([False, True, True, False])\n self._run(index, intervals, expected)\n\n def test_two_intervals(self):\n index = np.array([1, 2, 3, 4, 5, 6, 7])\n intervals = np.array([[2, 3], [5, 6]])\n expected = np.array([False, True, True, False, True, True, False])\n self._run(index, intervals, expected)\n\n def test_two_intervals_list(self):\n index = np.array([1, 2, 3, 4, 5, 6, 7])\n intervals = [[2, 3], [5, 6]]\n expected = np.array([False, True, True, False, True, True, False])\n self._run(index, intervals, expected)\n\n def test_start_index(self):\n index = np.array([1, 2, 3, 4])\n intervals = [[1, 2]]\n expected = np.array([True, True, False, False])\n self._run(index, intervals, expected)\n\n def test_end_index(self):\n index = np.array([1, 2, 3, 4])\n intervals = [[3, 4]]\n expected = np.array([False, False, True, True])\n self._run(index, intervals, expected)\n\n def test_whole_index(self):\n index = np.array([1, 2, 3, 4])\n intervals = [[1, 4]]\n expected = np.array([True, True, True, True])\n self._run(index, intervals, expected)\n\n def test_exceed_index_start(self):\n index = np.array([2, 3, 4])\n intervals = [[1, 3]]\n expected = np.array([True, True, False])\n self._run(index, intervals, expected)\n\n def test_exceed_index_end(self):\n index = np.array([2, 3, 4])\n intervals = [[3, 5]]\n expected = np.array([False, True, True])\n self._run(index, intervals, expected)\n\n def test_exceed_index(self):\n index = np.array([2, 3, 4])\n intervals = [[1, 5]]\n expected = np.array([True, True, True])\n self._run(index, intervals, expected)\n\n\nclass RollingWindowSequencesTest(TestCase):\n\n def _run(self, X, index, expected_X, expected_y, expected_X_index, expected_y_index,\n window_size=2, target_size=1, step_size=1, target_column=0, drop=None,\n drop_windows=False):\n X, y, X_index, y_index = rolling_window_sequences(X, index, window_size, target_size,\n step_size, target_column, drop,\n drop_windows)\n assert_allclose(X.astype(float), expected_X)\n assert_allclose(y.astype(float), expected_y)\n assert_allclose(X_index, expected_X_index)\n assert_allclose(y_index, expected_y_index)\n\n def test_no_drop(self):\n X = np.array([[0.5], [1], [0.5], [1]])\n index = np.array([1, 2, 3, 4])\n expected_X = np.array([[[0.5], [1]], [[1], [0.5]]])\n expected_y = np.array([[0.5], [1]])\n expected_X_index = np.array([1, 2])\n expected_y_index = np.array([3, 4])\n self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index)\n\n def test_drop_mask(self):\n X = np.array([[0.5], [1], [0.5], [1], [0.5], [1], [0.5], [1], [0.5]])\n index = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n drop = np.array([False, False, False, True, True, False, False, False, False])\n expected_X = np.array([[[0.5], [1]], [[1], [0.5]], [[0.5], [1]]])\n expected_y = np.array([[0.5], [1], [0.5]])\n expected_X_index = np.array([1, 6, 7])\n expected_y_index = np.array([3, 8, 9])\n self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,\n drop=drop, drop_windows=True)\n\n def test_drop_float(self):\n X = np.array([[0.5], [0.5], [0.5], [1.0], [1.0], [0.5], [0.5], [0.5]])\n index = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n drop = 1.0\n expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])\n expected_y = np.array([[0.5], [0.5]])\n expected_X_index = np.array([1, 6])\n expected_y_index = np.array([3, 8])\n self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,\n drop=drop, drop_windows=True)\n\n def test_drop_None(self):\n X = np.array([[0.5], [0.5], [0.5], [None], [None], [0.5], [0.5], [0.5]])\n index = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n drop = None\n expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])\n expected_y = np.array([[0.5], [0.5]])\n expected_X_index = np.array([1, 6])\n expected_y_index = np.array([3, 8])\n self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,\n drop=drop, drop_windows=True)\n\n def test_drop_float_nan(self):\n X = np.array([[0.5], [0.5], [0.5], ['nan'], ['nan'], [0.5], [0.5], [0.5]]).astype(float)\n index = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n drop = float('nan')\n expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])\n expected_y = np.array([[0.5], [0.5]])\n expected_X_index = np.array([1, 6])\n expected_y_index = np.array([3, 8])\n self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,\n drop=drop, drop_windows=True)\n\n def test_drop_str(self):\n X = np.array([[0.5], [0.5], [0.5], ['test'], ['test'], [0.5], [0.5], [0.5]])\n index = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n drop = \"test\"\n expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])\n expected_y = np.array([[0.5], [0.5]])\n expected_X_index = np.array([1, 6])\n expected_y_index = np.array([3, 8])\n self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,\n drop=drop, drop_windows=True)\n\n def test_drop_bool(self):\n X = np.array([[0.5], [0.5], [0.5], [False], [False], [0.5], [0.5], [0.5]])\n index = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n drop = False\n expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])\n expected_y = np.array([[0.5], [0.5]])\n expected_X_index = np.array([1, 6])\n expected_y_index = np.array([3, 8])\n self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,\n drop=drop, drop_windows=True)\n\n\nclass TimeSegmentsAverageTest(TestCase):\n\n def _run(self, X, interval, expected_values, expected_index, time_column):\n values, index = time_segments_average(X, interval, time_column)\n\n assert_allclose(values, expected_values)\n assert_allclose(index, expected_index)\n\n def test_array(self):\n X = np.array([[1, 1], [2, 3], [3, 1], [4, 3]])\n interval = 2\n expected_values = np.array([[2], [2]])\n expected_index = np.array([1, 3])\n self._run(X, interval, expected_values, expected_index, time_column=0)\n\n def test_pandas_dataframe(self):\n X = pd.DataFrame([\n [1, 1],\n [2, 3],\n [3, 1],\n [4, 3]\n ], columns=['timestamp', 'value'])\n interval = 2\n expected_values = np.array([[2], [2]])\n expected_index = np.array([1, 3])\n self._run(X, interval, expected_values, expected_index, time_column=\"timestamp\")\n\n\nclass TimeSegmentsAggregateTest(TestCase):\n\n def _run(self, X, interval, expected_values, expected_index, time_column, method=['mean']):\n values, index = time_segments_aggregate(X, interval, time_column, method=method)\n\n assert_allclose(values, expected_values)\n assert_allclose(index, expected_index)\n\n def test_array(self):\n X = np.array([[1, 1], [2, 3], [3, 1], [4, 3]])\n interval = 2\n expected_values = np.array([[2], [2]])\n expected_index = np.array([1, 3])\n self._run(X, interval, expected_values, expected_index, time_column=0)\n\n def test_pandas_dataframe(self):\n X = pd.DataFrame([\n [1, 1],\n [2, 3],\n [3, 1],\n [4, 3]\n ], columns=['timestamp', 'value'])\n interval = 2\n expected_values = np.array([[2], [2]])\n expected_index = np.array([1, 3])\n self._run(X, interval, expected_values, expected_index, time_column=\"timestamp\")\n\n def test_multiple(self):\n X = np.array([[1, 1], [2, 3], [3, 1], [4, 3]])\n interval = 2\n expected_values = np.array([[2, 2], [2, 2]])\n expected_index = np.array([1, 3])\n self._run(X, interval, expected_values, expected_index, time_column=0,\n method=['mean', 'median'])\n\n\nclass CutoffWindowSequencesTest(TestCase):\n\n def setUp(self):\n self.X = pd.DataFrame({\n 'id1': [1, 2],\n 'cutoff': pd.to_datetime(['2020-01-05', '2020-01-07'])\n }).set_index('cutoff')\n self.timeseries = pd.DataFrame({\n 'timestamp': list(pd.date_range(\n start='2020-01-01',\n end='2020-01-10',\n freq='1d'\n )) * 2,\n 'value1': np.arange(1, 21),\n 'value2': np.arange(21, 41),\n 'id1': [1] * 10 + [2] * 10\n }).set_index('timestamp')\n\n def test_cutoff_time_column(self):\n \"\"\"Passing cutoff_time. The indicated column will be used as the cutoff time.\"\"\"\n # setup\n timeseries = self.timeseries\n X = self.X.reset_index()\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size=3,\n cutoff_time='cutoff',\n )\n\n # assert\n expected_array = np.array([\n [[2, 22],\n [3, 23],\n [4, 24]],\n [[14, 34],\n [15, 35],\n [16, 36]]\n ])\n\n assert_allclose(array, expected_array)\n\n def test_time_index_column(self):\n \"\"\"Passing time_index. The indicated column will be used as the timeseries index.\"\"\"\n # setup\n X = self.X\n timeseries = self.timeseries.reset_index()\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size=3,\n time_index='timestamp',\n )\n\n # assert\n expected_array = np.array([\n [[2, 22],\n [3, 23],\n [4, 24]],\n [[14, 34],\n [15, 35],\n [16, 36]]\n ])\n\n assert_allclose(array, expected_array)\n\n def test_window_size_integer(self):\n \"\"\"window_size accepts integer.\"\"\"\n # setup\n X = self.X\n timeseries = self.timeseries\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size=3,\n )\n\n # assert\n expected_array = np.array([\n [[2, 22],\n [3, 23],\n [4, 24]],\n [[14, 34],\n [15, 35],\n [16, 36]]\n ])\n\n assert_allclose(array, expected_array)\n\n def test_window_size_string(self):\n \"\"\"window_size accepts string.\"\"\"\n # setup\n X = self.X\n timeseries = self.timeseries\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size='3d',\n )\n\n # assert\n expected_array = np.array([\n [[2, 22],\n [3, 23],\n [4, 24]],\n [[14, 34],\n [15, 35],\n [16, 36]]\n ])\n\n assert_allclose(array, expected_array)\n\n def test_window_size_timedelta(self):\n \"\"\"window_size accepts Timedelta object.\"\"\"\n # setup\n X = self.X\n timeseries = self.timeseries\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size=pd.Timedelta(days=3),\n )\n\n # assert\n expected_array = np.array([\n [[2, 22],\n [3, 23],\n [4, 24]],\n [[14, 34],\n [15, 35],\n [16, 36]]\n ])\n\n assert_allclose(array, expected_array)\n\n def test_not_enough_data(self):\n \"\"\"If there is not enough data for the given window_size, shape changes.\"\"\"\n # setup\n X = self.X\n timeseries = self.timeseries\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size=5,\n )\n\n # assert\n assert len(array) == 2\n\n expected_array = np.array([\n np.array([\n [1, 21],\n [2, 22],\n [3, 23],\n [4, 24]\n ]),\n np.array([\n [12, 32],\n [13, 33],\n [14, 34],\n [15, 35],\n [16, 36]\n ])\n ])\n\n assert_allclose(\n array[0],\n expected_array[0]\n )\n\n assert_allclose(\n array[1],\n expected_array[1]\n )\n\n def test_cutoff_time_only(self):\n \"\"\"Test X without any other column than cutoff_time.\"\"\"\n # setup\n X = self.X\n del X['id1']\n timeseries = self.timeseries\n del timeseries['id1']\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size=3,\n )\n\n # assert\n expected_array = np.array([\n [[12, 32],\n [13, 33],\n [14, 34]],\n [[14, 34],\n [15, 35],\n [16, 36]]\n ])\n\n assert_allclose(array, expected_array)\n\n def test_multiple_filter(self):\n \"\"\"Test X with two identifier columns.\"\"\"\n # setup\n X = self.X\n X['id2'] = [3, 4]\n timeseries = self.timeseries\n timeseries['id2'] = [3, 4] * 10\n\n # run\n array = cutoff_window_sequences(\n X,\n timeseries,\n window_size=2,\n )\n\n # assert\n expected_array = np.array([\n [[1, 21],\n [3, 23]],\n [[14, 34],\n [16, 36]]\n ])\n\n assert_allclose(array, expected_array)\n" ]
[ [ "pandas.to_datetime", "pandas.date_range", "numpy.arange", "pandas.Timedelta", "pandas.DataFrame", "numpy.testing.assert_allclose", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sotte/great_expectations
[ "5dd18f0d7803ef2c257d91178e8c74c1c11d7106" ]
[ "great_expectations/dataset/util.py" ]
[ "# Utility methods for dealing with Dataset objects\n\nfrom __future__ import division\n\nimport decimal\n\nfrom six import string_types, integer_types\n\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport numpy as np\nimport warnings\nimport sys\nimport copy\nimport datetime\n\nfrom functools import wraps\n\n\ndef parse_result_format(result_format):\n \"\"\"This is a simple helper utility that can be used to parse a string result_format into the dict format used\n internally by great_expectations. It is not necessary but allows shorthand for result_format in cases where\n there is no need to specify a custom partial_unexpected_count.\"\"\"\n if isinstance(result_format, string_types):\n result_format = {\n 'result_format': result_format,\n 'partial_unexpected_count': 20\n }\n else:\n if 'partial_unexpected_count' not in result_format:\n result_format['partial_unexpected_count'] = 20\n\n return result_format\n\nclass DotDict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n\n def __getattr__(self, attr):\n return self.get(attr)\n\n __setattr__= dict.__setitem__\n __delattr__= dict.__delitem__\n\n def __dir__(self):\n return self.keys()\n\n #Cargo-cultishly copied from: https://github.com/spindlelabs/pyes/commit/d2076b385c38d6d00cebfe0df7b0d1ba8df934bc\n def __deepcopy__(self, memo):\n return DotDict([(copy.deepcopy(k, memo), copy.deepcopy(v, memo)) for k, v in self.items()])\n\n\n\"\"\"Docstring inheriting descriptor. Note that this is not a docstring so that this is not added to @DocInherit-\\\ndecorated functions' hybrid docstrings.\n\nUsage::\n\n class Foo(object):\n def foo(self):\n \"Frobber\"\n pass\n\n class Bar(Foo):\n @DocInherit\n def foo(self):\n pass\n\n Now, Bar.foo.__doc__ == Bar().foo.__doc__ == Foo.foo.__doc__ == \"Frobber\"\n\n Original implementation cribbed from:\n https://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance,\n following a discussion on comp.lang.python that resulted in:\n http://code.activestate.com/recipes/576862/. Unfortunately, the\n original authors did not anticipate deep inheritance hierarchies, and\n we ran into a recursion issue when implementing custom subclasses of\n PandasDataset:\n https://github.com/great-expectations/great_expectations/issues/177.\n\n Our new homegrown implementation directly searches the MRO, instead\n of relying on super, and concatenates documentation together.\n\"\"\"\nclass DocInherit(object):\n\n def __init__(self, mthd):\n self.mthd = mthd\n self.name = mthd.__name__\n self.mthd_doc = mthd.__doc__\n\n def __get__(self, obj, cls):\n doc = self.mthd_doc if self.mthd_doc is not None else ''\n\n for parent in cls.mro():\n if self.name not in parent.__dict__:\n continue\n if parent.__dict__[self.name].__doc__ is not None:\n doc = doc + '\\n' + parent.__dict__[self.name].__doc__\n\n @wraps(self.mthd, assigned=('__name__', '__module__'))\n def f(*args, **kwargs):\n return self.mthd(obj, *args, **kwargs)\n\n f.__doc__ = doc\n return f\n\n\ndef recursively_convert_to_json_serializable(test_obj):\n \"\"\"\n Helper function to convert a dict object to one that is serializable\n\n Args:\n test_obj: an object to attempt to convert a corresponding json-serializable object\n\n Returns:\n (dict) A converted test_object\n\n Warning:\n test_obj may also be converted in place.\n\n \"\"\"\n # Validate that all aruguments are of approved types, coerce if it's easy, else exception\n # print(type(test_obj), test_obj)\n #Note: Not 100% sure I've resolved this correctly...\n try:\n if not isinstance(test_obj, list) and np.isnan(test_obj):\n # np.isnan is functionally vectorized, but we only want to apply this to single objects\n # Hence, why we test for `not isinstance(list))`\n return None\n except TypeError:\n pass\n except ValueError:\n pass\n\n if isinstance(test_obj, (string_types, integer_types, float, bool)):\n # No problem to encode json\n return test_obj\n\n elif isinstance(test_obj, dict):\n new_dict = {}\n for key in test_obj:\n new_dict[key] = recursively_convert_to_json_serializable(test_obj[key])\n\n return new_dict\n\n elif isinstance(test_obj, (list, tuple, set)):\n new_list = []\n for val in test_obj:\n new_list.append(recursively_convert_to_json_serializable(val))\n\n return new_list\n\n elif isinstance(test_obj, (np.ndarray, pd.Index)):\n #test_obj[key] = test_obj[key].tolist()\n ## If we have an array or index, convert it first to a list--causing coercion to float--and then round\n ## to the number of digits for which the string representation will equal the float representation\n return [recursively_convert_to_json_serializable(x) for x in test_obj.tolist()]\n\n #Note: This clause has to come after checking for np.ndarray or we get:\n # `ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()`\n elif test_obj == None:\n # No problem to encode json\n return test_obj\n\n elif isinstance(test_obj, (datetime.datetime, datetime.date)):\n return str(test_obj)\n\n # Use built in base type from numpy, https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html\n # https://github.com/numpy/numpy/pull/9505\n elif np.issubdtype(type(test_obj), np.bool_):\n return bool(test_obj)\n\n elif np.issubdtype(type(test_obj), np.integer) or np.issubdtype(type(test_obj), np.uint):\n return int(test_obj)\n\n elif np.issubdtype(type(test_obj), np.floating):\n # Note: Use np.floating to avoid FutureWarning from numpy\n return float(round(test_obj, sys.float_info.dig))\n\n # elif np.issubdtype(type(test_obj), np.complexfloating):\n # Note: Use np.complexfloating to avoid Future Warning from numpy\n # Complex numbers consist of two floating point numbers\n # return complex(\n # float(round(test_obj.real, sys.float_info.dig)),\n # float(round(test_obj.imag, sys.float_info.dig)))\n\n elif isinstance(test_obj, decimal.Decimal):\n return float(test_obj)\n\n else:\n raise TypeError('%s is of type %s which cannot be serialized.' % (str(test_obj), type(test_obj).__name__))\n\n\ndef is_valid_partition_object(partition_object):\n \"\"\"Tests whether a given object is a valid continuous or categorical partition object.\n :param partition_object: The partition_object to evaluate\n :return: Boolean\n \"\"\"\n if is_valid_continuous_partition_object(partition_object) or is_valid_categorical_partition_object(partition_object):\n return True\n return False\n\n\ndef is_valid_categorical_partition_object(partition_object):\n \"\"\"Tests whether a given object is a valid categorical partition object.\n :param partition_object: The partition_object to evaluate\n :return: Boolean\n \"\"\"\n if partition_object is None or (\"weights\" not in partition_object) or (\"values\" not in partition_object):\n return False\n # Expect the same number of values as weights; weights should sum to one\n if len(partition_object['values']) == len(partition_object['weights']) and \\\n np.allclose(np.sum(partition_object['weights']), 1):\n return True\n return False\n\n\ndef is_valid_continuous_partition_object(partition_object):\n \"\"\"Tests whether a given object is a valid continuous partition object.\n :param partition_object: The partition_object to evaluate\n :return: Boolean\n \"\"\"\n if (partition_object is None) or (\"weights\" not in partition_object) or (\"bins\" not in partition_object):\n return False\n # Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one\n if (len(partition_object['bins']) == (len(partition_object['weights']) + 1)) and \\\n np.all(np.diff(partition_object['bins']) > 0) and \\\n np.allclose(np.sum(partition_object['weights']), 1):\n return True\n return False\n\n\ndef categorical_partition_data(data):\n \"\"\"Convenience method for creating weights from categorical data.\n\n Args:\n data (list-like): The data from which to construct the estimate.\n\n Returns:\n A new partition object::\n\n {\n \"partition\": (list) The categorical values present in the data\n \"weights\": (list) The weights of the values in the partition.\n }\n \"\"\"\n\n # Make dropna explicit (even though it defaults to true)\n series = pd.Series(data)\n value_counts = series.value_counts(dropna=True)\n\n # Compute weights using denominator only of nonnull values\n null_indexes = series.isnull()\n nonnull_count = (null_indexes == False).sum()\n\n weights = value_counts.values / nonnull_count\n return {\n \"values\": value_counts.index.tolist(),\n \"weights\": weights\n }\n\n\ndef kde_partition_data(data, estimate_tails=True):\n \"\"\"Convenience method for building a partition and weights using a gaussian Kernel Density Estimate and default bandwidth.\n\n Args:\n data (list-like): The data from which to construct the estimate\n estimate_tails (bool): Whether to estimate the tails of the distribution to keep the partition object finite\n\n Returns:\n A new partition_object::\n\n {\n \"partition\": (list) The endpoints of the partial partition of reals,\n \"weights\": (list) The densities of the bins implied by the partition.\n }\n \"\"\"\n kde = stats.kde.gaussian_kde(data)\n evaluation_bins = np.linspace(start=np.min(data) - (kde.covariance_factor() / 2),\n stop=np.max(data) + (kde.covariance_factor() / 2),\n num=np.floor(((np.max(data) - np.min(data)) / kde.covariance_factor()) + 1 ).astype(int))\n cdf_vals = [kde.integrate_box_1d(-np.inf, x) for x in evaluation_bins]\n evaluation_weights = np.diff(cdf_vals)\n\n if estimate_tails:\n bins = np.concatenate(([np.min(data) - (1.5 * kde.covariance_factor())],\n evaluation_bins,\n [np.max(data) + (1.5 * kde.covariance_factor())]))\n else:\n bins = np.concatenate(([-np.inf], evaluation_bins, [np.inf]))\n\n weights = np.concatenate(([cdf_vals[0]], evaluation_weights, [1 - cdf_vals[-1]]))\n\n return {\n \"bins\": bins,\n \"weights\": weights\n }\n\n\ndef partition_data(data, bins='auto', n_bins=10):\n warnings.warn(\"partition_data is deprecated and will be removed. Use either continuous_partition_data or \\\n categorical_partition_data instead.\", DeprecationWarning)\n return continuous_partition_data(data, bins, n_bins)\n\n\ndef continuous_partition_data(data, bins='auto', n_bins=10):\n \"\"\"Convenience method for building a partition object on continuous data\n\n Args:\n data (list-like): The data from which to construct the estimate.\n bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)\n n_bins (int): Ignored if bins is auto.\n\n Returns:\n A new partition_object::\n\n {\n \"bins\": (list) The endpoints of the partial partition of reals,\n \"weights\": (list) The densities of the bins implied by the partition.\n }\n \"\"\"\n if bins == 'uniform':\n bins = np.linspace(start=np.min(data), stop=np.max(data), num = n_bins+1)\n elif bins =='ntile':\n bins = np.percentile(data, np.linspace(start=0, stop=100, num = n_bins+1))\n elif bins != 'auto':\n raise ValueError(\"Invalid parameter for bins argument\")\n\n hist, bin_edges = np.histogram(data, bins, density=False)\n\n return {\n \"bins\": bin_edges,\n \"weights\": hist / len(data)\n }\n\n\ndef infer_distribution_parameters(data, distribution, params=None):\n \"\"\"Convenience method for determining the shape parameters of a given distribution\n\n Args:\n data (list-like): The data to build shape parameters from.\n distribution (string): Scipy distribution, determines which parameters to build.\n params (dict or None): The known parameters. Parameters given here will not be altered. \\\n Keep as None to infer all necessary parameters from the data data.\n\n Returns:\n A dictionary of named parameters::\n\n {\n \"mean\": (float),\n \"std_dev\": (float),\n \"loc\": (float),\n \"scale\": (float),\n \"alpha\": (float),\n \"beta\": (float),\n \"min\": (float),\n \"max\": (float),\n \"df\": (float)\n }\n\n See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html#scipy.stats.kstest\n \"\"\"\n\n if params is None:\n params = dict()\n elif not isinstance(params, dict):\n raise TypeError(\"params must be a dictionary object, see great_expectations documentation\")\n\n if 'mean' not in params.keys():\n params['mean'] = data.mean()\n\n if 'std_dev' not in params.keys():\n params['std_dev'] = data.std()\n\n if distribution == \"beta\":\n # scipy cdf(x, a, b, loc=0, scale=1)\n if 'alpha' not in params.keys():\n # from https://stats.stackexchange.com/questions/12232/calculating-the-parameters-of-a-beta-distribution-using-the-mean-and-variance\n params['alpha'] = (params['mean'] ** 2) * (\n ((1 - params['mean']) / params['std_dev'] ** 2) - (1 / params['mean']))\n if 'beta' not in params.keys():\n params['beta'] = params['alpha'] * ((1 / params['mean']) - 1)\n\n elif distribution == 'gamma':\n # scipy cdf(x, a, loc=0, scale=1)\n if 'alpha' not in params.keys():\n # Using https://en.wikipedia.org/wiki/Gamma_distribution\n params['alpha'] = (params['mean'] / params.get('scale', 1))\n\n\n #elif distribution == 'poisson':\n # if 'lambda' not in params.keys():\n # params['lambda'] = params['mean']\n\n elif distribution == 'uniform':\n # scipy cdf(x, loc=0, scale=1)\n if 'min' not in params.keys():\n if 'loc' in params.keys():\n params['min'] = params['loc']\n else:\n params['min'] = min(data)\n if 'max' not in params.keys():\n if 'scale' in params.keys():\n params['max'] = params['scale']\n else:\n params['max'] = max(data) - params['min']\n\n elif distribution == 'chi2':\n # scipy cdf(x, df, loc=0, scale=1)\n if 'df' not in params.keys():\n # from https://en.wikipedia.org/wiki/Chi-squared_distribution\n params['df'] = params['mean']\n\n # Expon only uses loc and scale, use default\n #elif distribution == 'expon':\n # scipy cdf(x, loc=0, scale=1)\n # if 'lambda' in params.keys():\n # Lambda is optional\n # params['scale'] = 1 / params['lambda']\n elif distribution is not 'norm':\n raise AttributeError(\"Unsupported distribution type. Please refer to Great Expectations Documentation\")\n\n params['loc'] = params.get('loc', 0)\n params['scale'] = params.get('scale', 1)\n\n return params\n\ndef _scipy_distribution_positional_args_from_dict(distribution, params):\n \"\"\"Helper function that returns positional arguments for a scipy distribution using a dict of parameters.\n\n See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\\\n to see an example of scipy's positional arguments. This function returns the arguments specified by the \\\n scipy.stat.distribution.cdf() for tha distribution.\n\n Args:\n distribution (string): \\\n The scipy distribution name.\n params (dict): \\\n A dict of named parameters.\n\n Raises:\n AttributeError: \\\n If an unsupported distribution is provided.\n \"\"\"\n\n params['loc'] = params.get('loc', 0)\n if 'scale' not in params:\n params['scale'] = 1\n\n if distribution == 'norm':\n return params['mean'], params['std_dev']\n elif distribution == 'beta':\n return params['alpha'], params['beta'], params['loc'], params['scale']\n elif distribution == 'gamma':\n return params['alpha'], params['loc'], params['scale']\n #elif distribution == 'poisson':\n # return params['lambda'], params['loc']\n elif distribution == 'uniform':\n return params['min'], params['max']\n elif distribution == 'chi2':\n return params['df'], params['loc'], params['scale']\n elif distribution == 'expon':\n return params['loc'], params['scale']\n\n\ndef validate_distribution_parameters(distribution, params):\n \"\"\"Ensures that necessary parameters for a distribution are present and that all parameters are sensical.\n\n If parameters necessary to construct a distribution are missing or invalid, this function raises ValueError\\\n with an informative description. Note that 'loc' and 'scale' are optional arguments, and that 'scale'\\\n must be positive.\n\n Args:\n distribution (string): \\\n The scipy distribution name, e.g. normal distribution is 'norm'.\n params (dict or list): \\\n The distribution shape parameters in a named dictionary or positional list form following the scipy \\\n cdf argument scheme.\n\n params={'mean': 40, 'std_dev': 5} or params=[40, 5]\n\n Exceptions:\n ValueError: \\\n With an informative description, usually when necessary parameters are omitted or are invalid.\n\n \"\"\"\n\n norm_msg = \"norm distributions require 0 parameters and optionally 'mean', 'std_dev'.\"\n beta_msg = \"beta distributions require 2 positive parameters 'alpha', 'beta' and optionally 'loc', 'scale'.\"\n gamma_msg = \"gamma distributions require 1 positive parameter 'alpha' and optionally 'loc','scale'.\"\n # poisson_msg = \"poisson distributions require 1 positive parameter 'lambda' and optionally 'loc'.\"\n uniform_msg = \"uniform distributions require 0 parameters and optionally 'loc', 'scale'.\"\n chi2_msg = \"chi2 distributions require 1 positive parameter 'df' and optionally 'loc', 'scale'.\"\n expon_msg = \"expon distributions require 0 parameters and optionally 'loc', 'scale'.\"\n\n if (distribution not in ['norm', 'beta', 'gamma', 'poisson', 'uniform', 'chi2', 'expon']):\n raise AttributeError(\"Unsupported distribution provided: %s\" % distribution)\n\n if isinstance(params, dict):\n # `params` is a dictionary\n if params.get(\"std_dev\", 1) <= 0 or params.get('scale', 1) <= 0:\n raise ValueError(\"std_dev and scale must be positive.\")\n\n # alpha and beta are required and positive\n if distribution == 'beta' and (params.get('alpha', -1) <= 0 or params.get('beta', -1) <= 0):\n raise ValueError(\"Invalid parameters: %s\" %beta_msg)\n\n # alpha is required and positive\n elif distribution == 'gamma' and params.get('alpha', -1) <= 0:\n raise ValueError(\"Invalid parameters: %s\" %gamma_msg)\n\n # lambda is a required and positive\n #elif distribution == 'poisson' and params.get('lambda', -1) <= 0:\n # raise ValueError(\"Invalid parameters: %s\" %poisson_msg)\n\n # df is necessary and required to be positve\n elif distribution == 'chi2' and params.get('df', -1) <= 0:\n raise ValueError(\"Invalid parameters: %s:\" %chi2_msg)\n\n elif isinstance(params, tuple) or isinstance(params, list):\n scale = None\n\n # `params` is a tuple or a list\n if distribution == 'beta':\n if len(params) < 2:\n raise ValueError(\"Missing required parameters: %s\" %beta_msg)\n if params[0] <= 0 or params[1] <= 0:\n raise ValueError(\"Invalid parameters: %s\" %beta_msg)\n if len(params) == 4:\n scale = params[3]\n elif len(params) > 4:\n raise ValueError(\"Too many parameters provided: %s\" %beta_msg)\n\n elif distribution == 'norm':\n if len(params) > 2:\n raise ValueError(\"Too many parameters provided: %s\" %norm_msg)\n if len(params) == 2:\n scale = params[1]\n\n elif distribution == 'gamma':\n if len(params) < 1:\n raise ValueError(\"Missing required parameters: %s\" %gamma_msg)\n if len(params) == 3:\n scale = params[2]\n if len(params) > 3:\n raise ValueError(\"Too many parameters provided: %s\" % gamma_msg)\n elif params[0] <= 0:\n raise ValueError(\"Invalid parameters: %s\" %gamma_msg)\n\n #elif distribution == 'poisson':\n # if len(params) < 1:\n # raise ValueError(\"Missing required parameters: %s\" %poisson_msg)\n # if len(params) > 2:\n # raise ValueError(\"Too many parameters provided: %s\" %poisson_msg)\n # elif params[0] <= 0:\n # raise ValueError(\"Invalid parameters: %s\" %poisson_msg)\n\n elif distribution == 'uniform':\n if len(params) == 2:\n scale = params[1]\n if len(params) > 2:\n raise ValueError(\"Too many arguments provided: %s\" %uniform_msg)\n\n elif distribution == 'chi2':\n if len(params) < 1:\n raise ValueError(\"Missing required parameters: %s\" %chi2_msg)\n elif len(params) == 3:\n scale = params[2]\n elif len(params) > 3:\n raise ValueError(\"Too many arguments provided: %s\" %chi2_msg)\n if params[0] <= 0:\n raise ValueError(\"Invalid parameters: %s\" %chi2_msg)\n\n elif distribution == 'expon':\n\n if len(params) == 2:\n scale = params[1]\n if len(params) > 2:\n raise ValueError(\"Too many arguments provided: %s\" %expon_msg)\n\n if scale is not None and scale <= 0:\n raise ValueError(\"std_dev and scale must be positive.\")\n\n else:\n raise ValueError(\n \"params must be a dict or list, or use ge.dataset.util.infer_distribution_parameters(data, distribution)\")\n\n return\n\n\ndef create_multiple_expectations(df, columns, expectation_type, *args, **kwargs):\n \"\"\"Creates an identical expectation for each of the given columns with the specified arguments, if any.\n\n Args:\n df (great_expectations.dataset): A great expectations dataset object.\n columns (list): A list of column names represented as strings.\n expectation_type (string): The expectation type.\n\n Raises:\n KeyError if the provided column does not exist.\n AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.\n\n Returns:\n A list of expectation results.\n\n\n \"\"\"\n expectation = getattr(df, expectation_type)\n results = list()\n\n for column in columns:\n results.append(expectation(column, *args, **kwargs))\n\n return results\n" ]
[ [ "pandas.Series", "numpy.linspace", "numpy.min", "scipy.stats.kde.gaussian_kde", "numpy.isnan", "numpy.concatenate", "numpy.max", "numpy.diff", "numpy.histogram", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
ywz978020607/HESIC
[ "546e0c0788552caee4ac75a229558ff64f295916" ]
[ "compressai/ops/ops.py" ]
[ "# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\n\ndef ste_round(x):\n \"\"\"\n Rounding with non-zero gradients. Gradients are approximated by replacing\n the derivative by the identity function.\n\n Used in `\"Lossy Image Compression with Compressive Autoencoders\" <https://arxiv.org/abs/1703.00395>`_\n\n .. note::\n\n Implemented with the pytorch `detach()` reparametrization trick:\n\n `x_round = x_round - x.detach() + x`\n \"\"\"\n return torch.round(x) - x.detach() + x\n" ]
[ [ "torch.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cyndi5/analyze
[ "886200955ccc2bc2efd9d7eb1d02ff5a52318e68" ]
[ "analyze.py" ]
[ "import numpy as np\nfrom scipy import signal, optimize, stats, integrate, mean\nimport pandas as pd\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Uses Plotly Dash core to plot a CSV from Google Science Journal accelerometer readings.')\nparser.add_argument(\"--file\", required=True, default=None, type=str, help=\"path to CSV\")\n\nargs = parser.parse_args()\n\naccelerometer_df = pd.read_csv(args.file)\nprint(accelerometer_df.head(5))\nAccY = accelerometer_df.AccY - np.mean(accelerometer_df.AccY)\ntimestamp = np.subtract(accelerometer_df.timestamp, accelerometer_df.timestamp[0]) / 1000.0\nvel = (integrate.cumtrapz(AccY, timestamp) * 3600 / 1000).clip(0)\nprint(vel)\n\napp = dash.Dash(__name__)\napp.layout = html.Div(children=[\n html.H1(children='Google Science Accelerometer Recording Analysis'),\n\n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n\n dcc.Graph(\n id='example-graph',\n figure={\n 'data': [\n {'x': timestamp, 'y': AccY, 'type': 'line', 'name': 'AccY (m/s/s)'},\n {'x': timestamp, 'y': vel, 'type': 'line', 'name': 'Vel (km/h)'},\n ],\n 'layout': {\n 'title': 'Velocity and Acceleration of Automobile over Time'\n }\n }\n )\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "scipy.integrate.cumtrapz", "numpy.subtract", "pandas.read_csv", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
mdatres/quantlab
[ "09fb24ede78f49768f829afe0fac2ac291b8fd4f" ]
[ "systems/ILSVRC12/AlexNet/alexnet.py" ]
[ "# \n# alexnet.py\n# \n# Author(s):\n# Matteo Spallanzani <[email protected]>\n# \n# Copyright (c) 2020-2021 ETH Zurich.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n\nimport torch\nimport torch.nn as nn\n\n\nclass AlexNet(nn.Module):\n\n def __init__(self, use_bn: bool, num_classes: int = 1000, seed : int = -1) -> None:\n\n super(AlexNet, self).__init__()\n\n self.features = self._make_features(use_bn)\n self.avgpool = nn.AdaptiveAvgPool2d((6, 6))\n self.classifier = self._make_classifier(num_classes)\n self._initialize_weights(seed)\n\n def _make_features(self, use_bn: bool) -> nn.Sequential:\n\n modules = []\n\n # conv 1\n modules += [nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2, bias=not use_bn)]\n modules += [nn.BatchNorm2d(64)] if use_bn else []\n modules += [nn.ReLU(inplace=True)]\n # max pool\n modules += [nn.MaxPool2d(kernel_size=3, stride=2)]\n # conv 2\n modules += [nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=not use_bn)]\n modules += [nn.BatchNorm2d(192)] if use_bn else []\n modules += [nn.ReLU(inplace=True)]\n # max pool\n modules += [nn.MaxPool2d(kernel_size=3, stride=2)]\n # conv 3\n modules += [nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=not use_bn)]\n modules += [nn.BatchNorm2d(384)] if use_bn else []\n modules += [nn.ReLU(inplace=True)]\n # conv 4\n modules += [nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=not use_bn)]\n modules += [nn.BatchNorm2d(256)] if use_bn else []\n modules += [nn.ReLU(inplace=True)]\n # conv 5\n modules += [nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=not use_bn)]\n modules += [nn.BatchNorm2d(256)] if use_bn else []\n modules += [nn.ReLU(inplace=True)]\n # max pool\n modules += [nn.MaxPool2d(kernel_size=3, stride=2)]\n\n return nn.Sequential(*modules)\n\n def _make_classifier(self, num_classes: int) -> nn.Sequential:\n\n modules = []\n\n # dropout\n modules += [nn.Dropout()]\n # linear 1\n modules += [nn.Linear(256 * 6 * 6, 4096)]\n modules += [nn.ReLU(inplace=True)]\n # dropout\n modules += [nn.Dropout()]\n # linear 2\n modules += [nn.Linear(4096, 4096)]\n modules += [nn.ReLU(inplace=True)]\n # linear 3\n modules += [nn.Linear(4096, num_classes)]\n\n return nn.Sequential(*modules)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n\n x = self.features(x)\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n\n x = self.classifier(x)\n\n return x\n\n def _initialize_weights(self, seed: int = -1):\n\n if seed >= 0:\n torch.manual_seed(seed)\n\n for m in self.modules():\n\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.init.constant_", "torch.manual_seed", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.normal_", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kmkolasinski/model-optimization
[ "bd1ad8b72a5feb5d48bbedfaf85fe994d5c421db" ]
[ "tensorflow_model_optimization/python/core/quantization/keras/quantize_annotate_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Quantize Annotate Wrapper.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_model_optimization.python.core.quantization.keras import quantize_annotate\nfrom tensorflow_model_optimization.python.core.quantization.keras import quantize_config as quantize_config_mod\n\nkeras = tf.keras\ndeserialize_layer = tf.keras.layers.deserialize\nserialize_layer = tf.keras.layers.serialize\n\n\nclass QuantizeAnnotateTest(tf.test.TestCase):\n\n class TestQuantizeConfig(quantize_config_mod.QuantizeConfig):\n\n def get_weights_and_quantizers(self, layer):\n pass\n\n def get_activations_and_quantizers(self, layer):\n pass\n\n def set_quantize_weights(self, layer, quantize_weights):\n pass\n\n def set_quantize_activations(self, layer, quantize_activations):\n pass\n\n def get_output_quantizers(self, layer):\n pass\n\n def get_config(self):\n return {}\n\n def testAnnotatesKerasLayer(self):\n layer = keras.layers.Dense(5, activation='relu', input_shape=(10,))\n model = keras.Sequential([layer])\n\n quantize_config = self.TestQuantizeConfig()\n annotated_model = keras.Sequential([\n quantize_annotate.QuantizeAnnotate(\n layer, quantize_config=quantize_config, input_shape=(10,))\n ])\n\n annotated_layer = annotated_model.layers[0]\n self.assertEqual(layer, annotated_layer.layer)\n self.assertEqual(quantize_config, annotated_layer.quantize_config)\n\n # Annotated model should not affect computation. Returns same results.\n x_test = np.random.rand(10, 10)\n self.assertAllEqual(model.predict(x_test), annotated_model.predict(x_test))\n\n def testSerializationQuantizeAnnotate(self):\n input_shape = (2,)\n layer = keras.layers.Dense(3)\n wrapper = quantize_annotate.QuantizeAnnotate(\n layer=layer,\n quantize_config=self.TestQuantizeConfig(),\n input_shape=input_shape)\n\n custom_objects = {\n 'QuantizeAnnotate': quantize_annotate.QuantizeAnnotate,\n 'TestQuantizeConfig': self.TestQuantizeConfig\n }\n\n serialized_wrapper = serialize_layer(wrapper)\n with tf.keras.utils.custom_object_scope(custom_objects):\n wrapper_from_config = deserialize_layer(serialized_wrapper)\n\n self.assertEqual(wrapper_from_config.get_config(), wrapper.get_config())\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.random.rand", "tensorflow.keras.utils.custom_object_scope", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexandonian/lightning
[ "90350fd454cd7a51c35adadf5b9753868ac6dccd" ]
[ "lightning_classification/classification/make_lmdb.py" ]
[ "import numpy as np\nimport lmdb\nimport caffe\n\nN = 1000\n\n# Test Data\nX = np.zeros((N, 3, 32, 32), dtype=np.uint8)\ny = np.zeros(N, dtype=np.int64)\n\n# We need to prepare the database for the size. We'll set it 10 times\n# greater than what we theoretically need. There is little drawback to\n# setting this too big. If you still run into problem after raising\n# this, you might want to try saving fewer entries in a single\n# transaction.\nmap_size = X.nbytes * 10\n\nenv = lmdb.open('mylmdb', map_size=map_size)\n\nwith env.begin(write=True) as txn:\n # txn is a Transaction object\n for i in range(N):\n datum = caffe.proto.caffe_pb2.Datum()\n datum.channels = X.shape[1]\n datum.height = X.shape[2]\n datum.width = X.shape[3]\n datum.data = X[i].tobytes() # or .tostring() if numpy < 1.9\n datum.label = int(y[i])\n str_id = '{:08}'.format(i)\n\n # The encode is only essential in Python 3\n txn.put(str_id.encode('ascii'), datum.SerializeToString())" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eshanmherath/AV-Perception
[ "ec56065621141c436d8be39094f4505a6971e796" ]
[ "image_processing/basics/004_hough_transform.py" ]
[ "# You need to specify rho in units of pixels and theta in units of radians.\n\"\"\"\nSo, what are reasonable values? Well, rho takes a minimum value of 1,\nand a reasonable starting place for theta is 1 degree (pi/180 in radians).\nScale these values up to be more flexible in your definition of what constitutes a line\n\nThe threshold parameter specifies the minimum number of votes (intersections in a given grid cell) a candidate\nline needs to have to make it into the output. The empty np.array([]) is just a placeholder, no need to change it.\n min_line_length is the minimum length of a line (in pixels) that you will accept in the output,\n and max_line_gap is the maximum distance (again, in pixels) between segments that you will allow to be\n connected into a single line. You can then iterate through your output lines and draw them onto the\n image to see what you got!\n\"\"\"\n\n\n# Do relevant imports\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport os\n\nimage_path = os.path.join(os.getcwd(), \"../../samples/roads/road_1.jpg\")\n\nif not os.path.exists(image_path):\n print(\"Image does not exist!\")\n exit()\n\nimage = mpimg.imread(image_path)\n\n\ngray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)\n\n# Define a kernel size and apply Gaussian smoothing\nkernel_size = 5\nblur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)\n\n# Define our parameters for Canny and apply\nlow_threshold = 50\nhigh_threshold = 150\nmasked_edges = cv2.Canny(blur_gray, low_threshold, high_threshold)\n\n# Define the Hough transform parameters\n# Make a blank the same size as our image to draw on\nrho = 1\ntheta = np.pi/180\nthreshold = 15\nmin_line_length = 20\nmax_line_gap = 1\nline_image = np.copy(image)*0 #creating a blank to draw lines on\n\n# Run Hough on edge detected image\nlines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),\n min_line_length, max_line_gap)\n\n# Iterate over the output \"lines\" and draw lines on the blank\nfor line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)\n\n# Create a \"color\" binary image to combine with line image\ncolor_edges = np.dstack((masked_edges, masked_edges, masked_edges))\n\n# Draw the lines on the edge image\ncombo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)\nplt.imshow(combo)\nplt.show()" ]
[ [ "matplotlib.pyplot.imshow", "numpy.dstack", "matplotlib.image.imread", "numpy.copy", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
priya006/PythonProject
[ "fd8ac346e474f48a27cf2a7995fb9d18797bc5c6" ]
[ "2_ml/scores/score_logger.py" ]
[ "from statistics import mean\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom collections import deque\nimport os\nimport csv\nimport numpy as np\n\n# This code is from: https://github.com/gsurma/cartpole\n# Note: right now this is designed to run with only one sample exercise, so there are a few kinks for our usage\n\nSCORES_CSV_PATH = \"./scores/scores.csv\"\nSCORES_PNG_PATH = \"./scores/scores.png\"\nSOLVED_CSV_PATH = \"./scores/solved.csv\"\nSOLVED_PNG_PATH = \"./scores/solved.png\"\nAVERAGE_SCORE_TO_SOLVE = 195\nCONSECUTIVE_RUNS_TO_SOLVE = 100\n\n\nclass ScoreLogger:\n\n def __init__(self, env_name):\n self.scores = deque(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)\n self.env_name = env_name\n\n if os.path.exists(SCORES_PNG_PATH):\n os.remove(SCORES_PNG_PATH)\n if os.path.exists(SCORES_CSV_PATH):\n os.remove(SCORES_CSV_PATH)\n\n def add_score(self, score, run):\n self._save_csv(SCORES_CSV_PATH, score)\n self._save_png(input_path=SCORES_CSV_PATH,\n output_path=SCORES_PNG_PATH,\n x_label=\"runs\",\n y_label=\"scores\",\n average_of_n_last=CONSECUTIVE_RUNS_TO_SOLVE,\n show_goal=True,\n show_trend=True,\n show_legend=True)\n self.scores.append(score)\n mean_score = mean(self.scores)\n print (\"Scores: (min: \" + str(min(self.scores)) + \", avg: \" + str(mean_score) + \", max: \" + str(max(self.scores)) + \")\\n\")\n if mean_score >= AVERAGE_SCORE_TO_SOLVE and len(self.scores) >= CONSECUTIVE_RUNS_TO_SOLVE:\n solve_score = run-CONSECUTIVE_RUNS_TO_SOLVE\n print (\"Solved in \" + str(solve_score) + \" runs, \" + str(run) + \" total runs.\")\n self._save_csv(SOLVED_CSV_PATH, solve_score)\n self._save_png(input_path=SOLVED_CSV_PATH,\n output_path=SOLVED_PNG_PATH,\n x_label=\"trials\",\n y_label=\"steps before solve\",\n average_of_n_last=None,\n show_goal=False,\n show_trend=False,\n show_legend=False)\n exit()\n\n def _save_png(self, input_path, output_path, x_label, y_label, average_of_n_last, show_goal, show_trend, show_legend):\n x = []\n y = []\n with open(input_path, \"r\") as scores:\n reader = csv.reader(scores)\n data = list(reader)\n for i in range(0, len(data)):\n x.append(int(i))\n y.append(float(data[i][0]))\n\n plt.subplots()\n plt.plot(x, y, label=\"score per run\")\n\n average_range = average_of_n_last if average_of_n_last is not None else len(x)\n plt.plot(x[-average_range:], [np.mean(y[-average_range:])] * len(y[-average_range:]), linestyle=\"--\", label=\"last \" + str(average_range) + \" runs average\")\n\n if show_goal:\n plt.plot(x, [AVERAGE_SCORE_TO_SOLVE] * len(x), linestyle=\":\", label=str(AVERAGE_SCORE_TO_SOLVE) + \" score average goal\")\n\n if show_trend and len(x) > 1:\n trend_x = x[1:]\n z = np.polyfit(np.array(trend_x), np.array(y[1:]), 1)\n p = np.poly1d(z)\n plt.plot(trend_x, p(trend_x), linestyle=\"-.\", label=\"trend\")\n\n plt.title(self.env_name)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n if show_legend:\n plt.legend(loc=\"upper left\")\n\n plt.savefig(output_path, bbox_inches=\"tight\")\n plt.close()\n\n def _save_csv(self, path, score):\n if not os.path.exists(path):\n with open(path, \"w\"):\n pass\n scores_file = open(path, \"a\")\n with scores_file:\n writer = csv.writer(scores_file)\n writer.writerow([score])\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.poly1d", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stevenhurwitt/dealersocket-speedtest
[ "5a6536f4af686e3719e62b535681ce232ad653c1" ]
[ "IDR_Drop/db_connect.py" ]
[ "import numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom subprocess import Popen, PIPE\nimport matplotlib.pyplot as plt\nimport cx_Oracle\nimport time\nimport math\nimport os\n\n''' wrappers for database connections to SQL databases '''\n''' takes query & db as a string and returns a list of results '''\n\ndef OracleAPI(query, db):\n \n \n tppe = dict([('uid', 'tesi_interface'), ('pwd', 'peint88'), ('ip', '172.25.152.125'), ('port', '1700'), ('service_name', 'tppe.mytna.com')])\n \n lpss = dict([('uid', 'tesi_interface'), ('pwd', 'lpssint88'), ('ip', '172.25.152.12'), ('port', '1737'), ('service_name', 'tplpss.mytna.com')])\n \n tpint = dict([('uid', 'tesi_interface'), ('pwd', 'intint88'), ('ip', '172.25.152.12'), ('port', '1737'), ('service_name', 'tpint.mytna.com')])\n \n if db == 'tppe':\n auth = tppe\n \n elif db == 'lpss':\n auth = lpss\n \n elif db == 'tpint':\n auth = tpint\n \n else:\n print('database not recognized, try: tppe, lpss or tpint.')\n return(None)\n \n dsn = cx_Oracle.makedsn(auth['ip'], auth['port'], service_name=auth['service_name'])\n \n result_list = []\n con = cx_Oracle.connect(auth['uid'], auth['pwd'], auth['service_name'])\n cur = con.cursor()\n cur.execute(query)\n \n columns = [i[0] for i in cur.description]\n\n result_list = []\n \n for result in cur:\n result_list.append(result)\n i = len(result_list)\n if (i > 0 and i % 1000 == 0):\n print('done with {}.'.format(i))\n \n print('finished with {} results, outputting dataframe.'. format(len(result_list)))\n result = pd.DataFrame(result_list)\n result.columns = columns\n \n return(result)" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
limjongun95/AIProejct
[ "774a1d155f2917608896ac2e248051b6eb8a9601" ]
[ "settings/TrainSettings.py" ]
[ "import tensorflow as tf\nimport settings.DataSettings as dataSettings\n\n'''\n Following two variables control the shape of input\n data as the shape: [BATCH_SIZE*UNROLLED_SIZE, w, h, c].\n BATCH_SIZE: number of Videos in a batch.\n UNROLLED_SIZE: number of Frames in a Video.\n For the ConvNet part, the input will be the shape:\n [BATCH_SIZE*UNROLLED_SIZE, w, h, c].\n For the RNN part, the input will be the shape:\n [BATCH_SIZE, UNROLLED_SIZE, w, h, c] so that the\n tf.nn.rnn_cell.dynamic_rnn() can unroll the RNN.\n The output of the total network will be the shape:\n [BATCH_SIZE, UNROLLED_SIZE, NUMBER_OF_CATEGORIES]\n'''\n## 191128 Modify\n\nBATCH_SIZE = 2\nUNROLLED_SIZE = 24\n\n\n# BATCH_SIZE = 4\n# UNROLLED_SIZE = 40\n\n#BATCH_SIZE = 40\n#UNROLLED_SIZE = 2\n\nPRETRAIN_MODEL_PATH_NAME = \"\"\n#PRETRAIN_MODEL_PATH_NAME = \"temp/G2D19_P2OF_ResHB_1LSTM_dataAug_expLR/save_epoch_14/ViolenceNet.ckpt\"\n\n'''\n If one want to finetune, insert the LastLayer to the following list.\n ex: NAME_SCOPES_NOT_TO_RECOVER_FROM_CHECKPOINT = ['Conv4', 'Conv5']\n'''\nNAME_SCOPES_NOT_TO_RECOVER_FROM_CHECKPOINT = []\n\n\n## 191128 Modify\n\nMAX_TRAINING_EPOCH = 1\n\n# MAX_TRAINING_EPOCH = 30\n\nEPOCHS_TO_START_SAVE_MODEL = 1\nPATH_TO_SAVE_MODEL = \"./model/\"\nMAX_TRAINING_SAVE_MODEL = MAX_TRAINING_EPOCH\nPERFORM_DATA_AUGMENTATION = False\n\ndef GetOptimizer(learningRate_):\n\treturn tf.train.AdamOptimizer(learning_rate=learningRate_)\n\n'''\n Following list three different LearningRate decay methods:\n\t1. _stairLearningRate(),\n\t2. _exponentialDecayLearningRate()\n\t3. _polynomialDecayLearningRate()\n'''\ndef _stairLearningRate(currentEpoch_, currentStep_):\n\t#LIST_OF_EPOCH_LEARNING_RATE_PAIRS = [ (0, 1e-4), (5, 1e-5) ]\n\tLIST_OF_EPOCH_LEARNING_RATE_PAIRS = [ (0, 1e-6), (15, 5e-7), (25, 1e-7) ]\n\t#LIST_OF_EPOCH_LEARNING_RATE_PAIRS = [ (0, 3e-6), (5, 2.5e-6), (10, 2e-6), (15, 1.5e-6), (20, 1e-6) ]\n\n\tfor eachPair in reversed(LIST_OF_EPOCH_LEARNING_RATE_PAIRS):\n\t\tif currentEpoch_ >= eachPair[0]:\n\t\t\treturn eachPair[1]\n\n\t# If nothing matched, return the first pair.learningRate as default\n\treturn LIST_OF_EPOCH_LEARNING_RATE_PAIRS[0][1] \n\n\ndef _exponentialDecayLearningRate(currentEpoch_, currentStep_):\n\t'''\n\t Exponential Decay:\n\t\tlearningRate = INITIAL_LEARNING_RATE * DECAY_RATE ^ (currentStep_ / DECAY_STEP) + END_LEARNING_RATE\n\t'''\n\tINITIAL_LEARNING_RATE = 1e-5\n\tDECAY_RATE = 0.9\n\tNUMBER_OF_BATCHES_PER_EPOCH = 250\n\tNUMBER_OF_EPOCHS_PER_DECAY = 1\n\tDECAY_STEP = int(NUMBER_OF_BATCHES_PER_EPOCH * NUMBER_OF_EPOCHS_PER_DECAY)\n\tEND_LEARNING_RATE = 0.0\n\n\tlearningRate = INITIAL_LEARNING_RATE * DECAY_RATE ** (currentStep_ / DECAY_STEP) + END_LEARNING_RATE\n\n\treturn learningRate\n\ndef _polynomialDecayLearningRate(currentEpoch_, currentStep_):\n\t'''\n\t Polynomial Decay:\n\t\tstep = min(currentStep_, MAX_STEPS)\n\t\tlearningRate = (START_LEARNING_RATE - END_LEARNING_RATE) * (1 - step/MAX_STEPS)^(POWER) + END_LEARNING_RATE\n\t'''\n\tSTART_LEARNING_RATE = 2e-6\n\tEND_LEARNING_RATE = 1e-7\n\tMAX_STEPS = MAX_TRAINING_EPOCH * 125\n\tPOWER = 4\n\n\n\ndef GetLearningRate(currentEpoch_, currentStep_):\n#\treturn _stairLearningRate(currentEpoch_, currentStep_)\n\treturn _exponentialDecayLearningRate(currentEpoch_, currentStep_=currentStep_)\n\n\n\n#####################\n# Advenced Settings #\n#####################\n'''\n Following settings depend on (BATCH_SIZE, UNROLLED_SIZE, PERFORM_DATA_AUGMENTATION):\n if (4, 40, False), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 60\n\tLOADED_QUEUE_MAX_SIZE = 30\n\tNUMBER_OF_LOAD_DATA_THREADS=2\n\n if (4, 40, True), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 180\n\tLOADED_QUEUE_MAX_SIZE = 80\n\tNUMBER_OF_LOAD_DATA_THREADS=2\n\n if (40, 1, False), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 180\n\tLOADED_QUEUE_MAX_SIZE = 80\n\tNUMBER_OF_LOAD_DATA_THREADS=4\n\n if (40, 1, True), Recommend values:\n\tWAITING_QUEUE_MAX_SIZE = 180\n\tLOADED_QUEUE_MAX_SIZE = 80\n\tNUMBER_OF_LOAD_DATA_THREADS=4\n\n Note: The \"Averaged GetBatch Time\" that printed while you train an epoch, should be\n\t smaller than 0.001(s). Otherwise, increase NUMBER_OF_LOAD_DATA_THREADS.\n'''\nWAITING_QUEUE_MAX_SIZE = 180\nLOADED_QUEUE_MAX_SIZE = 80\nNUMBER_OF_LOAD_DATA_THREADS=4\n\nMAX_GRADIENT_VALUE = 5.0\nMIN_GRADIENT_VALUE = -5.0\n" ]
[ [ "tensorflow.train.AdamOptimizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Geson-anko/autokikitori6
[ "6d60b6bb91b49e85c720ca4b131ba7ca8b8b0668" ]
[ "Kikitori_fftx2_data.py" ]
[ "import torch\nfrom torchaudio.transforms import MelScale\nimport numpy as np\nimport h5py\nfrom pydub import AudioSegment\nimport config\nimport glob\n\nclass ToData:\n \n file_name:str = 'data/encoded_fftx2.h5'\n key_name:str = 'data'\n device = 'cpu'\n\n def __init__(self) -> None:\n self.mel_scaler = MelScale(\n config.mel_channels,\n config.frame_rate,\n n_stft=config.fft_channels\n )\n\n def load(self) -> list:\n files = glob.glob('data/voice_only/*.wav')\n\n return list(set(files))\n\n def save(self,data:np.ndarray,overwrite:bool=True) -> None:\n\n print(data.shape)\n idxes = np.random.permutation(len(data))\n data = data[idxes]\n with h5py.File(self.file_name,'a') as f:\n if self.key_name in f and overwrite:\n del f[self.key_name]\n f.create_dataset(name=self.key_name,data=data)\n else:\n f.create_dataset(name=self.key_name,data=data)\n\n\n def run(self,indata:str) -> np.ndarray:\n \n soundarray = self.load_sound(indata)\n sound = self.preprocess(soundarray)\n sound = sound.detach().cpu().half().numpy()\n return sound\n\n def load_sound(self,sound_file:str) -> np.ndarray:\n sound = AudioSegment.from_file(sound_file)\n if sound.frame_rate != config.frame_rate:\n sound = sound.set_frame_rate(config.frame_rate)\n if sound.channels !=config.channels:\n sound = sound.set_channels(config.channels)\n if sound.sample_width != config.sample_width:\n sound = sound.set_sample_width(config.sample_width)\n\n soundarray = np.array(sound.get_array_of_samples())/config.sample_range\n\n return soundarray\n\n def preprocess(self,sound:np.ndarray) -> torch.Tensor:\n mel_scaler = self.mel_scaler.to(self.device)\n sound = torch.from_numpy(sound).to(self.device)\n padlen = (config.overlap_length - (len(sound)%config.overlap_length))\n pad = torch.zeros(padlen,dtype=sound.dtype)\n sound = torch.cat([sound,pad]).unfold(0,config.recognize_length,config.overlap_length)\n sound = torch.fft.rfft(sound,dim=-1).abs().T.float()\n sound = mel_scaler(sound).T\n sound = torch.log1p(sound)\n sound = torch.fft.rfft(sound,dim=-1).abs().float()\n sound = torch.log1p(sound)\n return sound\n\nif __name__ == '__main__':\n from concurrent.futures import ProcessPoolExecutor\n todata = ToData()\n func = todata.run\n database = todata.load()\n print('process start!')\n with ProcessPoolExecutor(8) as p:\n result = p.map(func,database)\n result = np.concatenate(list(result))\n todata.save(result)\n # test\n #out = todata.run(database[0])\n #print(out.shape)" ]
[ [ "torch.cat", "torch.zeros", "torch.from_numpy", "torch.fft.rfft", "torch.log1p" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
logan-dunbar/pybullet_planning
[ "3b25fc7a0f350f4b46048be5c42f9cbf3ab2d6fb" ]
[ "src/pybullet_planning/interfaces/env_manager/pose_transformation.py" ]
[ "import math\nimport numpy as np\nimport pybullet as p\n\nfrom pybullet_planning.utils import get_client, unit_vector, quaternion_from_matrix, clip, euler_from_quaternion\n\n#####################################\n# Geometry\n\n#Pose = namedtuple('Pose', ['position', 'orientation'])\n\ndef Point(x=0., y=0., z=0.):\n \"\"\"Representing a point in 3D\n\n Parameters\n ----------\n x : float, optional\n [description], by default 0.\n y : float, optional\n [description], by default 0.\n z : float, optional\n [description], by default 0.\n\n Returns\n -------\n np array of three floats\n [description]\n \"\"\"\n return np.array([x, y, z])\n\ndef Euler(roll=0., pitch=0., yaw=0.):\n \"\"\"Representing a 3D rotation by Eulerian angles\n\n .. image:: ../images/roll_pitch_yaw.png\n :scale: 60 %\n :align: center\n\n `image source <https://devforum.roblox.com/t/take-out-pitch-from-rotation-matrix-while-preserving-yaw-and-roll/95204>`_\n\n Parameters\n ----------\n roll : float, optional\n [description], by default 0.\n pitch : float, optional\n [description], by default 0.\n yaw : float, optional\n [description], by default 0.\n\n Returns\n -------\n np array of three floats\n [description]\n \"\"\"\n return np.array([roll, pitch, yaw])\n\ndef Pose(point=None, euler=None):\n \"\"\"Representing a pose (or frame) in 3D\n\n Parameters\n ----------\n point : np array of three-floats, optional\n [description], by default None\n euler : np array of three eulerian angles, optional\n (roll, pitch, yaw), by default None\n\n Returns\n -------\n tuple of point, quaternion\n [description]\n \"\"\"\n point = Point() if point is None else point\n euler = Euler() if euler is None else euler\n return (point, quat_from_euler(euler))\n\n#def Pose2d(x=0., y=0., yaw=0.):\n# return np.array([x, y, yaw])\n\n#####################################\n\ndef invert(pose):\n (point, quat) = pose\n return p.invertTransform(point, quat)\n\ndef multiply(*poses):\n pose = poses[0]\n for next_pose in poses[1:]:\n pose = p.multiplyTransforms(pose[0], pose[1], *next_pose)\n return pose\n\ndef invert_quat(quat):\n pose = (unit_point(), quat)\n return quat_from_pose(invert(pose))\n\ndef multiply_quats(*quats):\n return quat_from_pose(multiply(*[(unit_point(), quat) for quat in quats]))\n\ndef unit_from_theta(theta):\n return np.array([np.cos(theta), np.sin(theta)])\n\ndef quat_from_euler(euler):\n return p.getQuaternionFromEuler(euler) # TODO: extrinsic (static) vs intrinsic (rotating)\n\ndef euler_from_quat(quat):\n return p.getEulerFromQuaternion(quat) # rotation around fixed axis\n\ndef intrinsic_euler_from_quat(quat):\n #axes = 'sxyz' if static else 'rxyz'\n return euler_from_quaternion(quat, axes='rxyz')\n\ndef unit_point():\n return (0., 0., 0.)\n\ndef unit_quat():\n return quat_from_euler([0, 0, 0]) # [X,Y,Z,W]\n\ndef quat_from_axis_angle(axis, angle): # axis-angle\n #return get_unit_vector(np.append(vec, [angle]))\n return np.append(math.sin(angle/2) * get_unit_vector(axis), [math.cos(angle / 2)])\n\ndef unit_pose():\n return (unit_point(), unit_quat())\n\ndef get_length(vec, norm=2):\n return np.linalg.norm(vec, ord=norm)\n\ndef get_difference(p1, p2):\n return np.array(p2) - np.array(p1)\n\ndef get_distance(p1, p2, **kwargs):\n return get_length(get_difference(p1, p2), **kwargs)\n\ndef angle_between(vec1, vec2):\n return np.math.acos(np.dot(vec1, vec2) / (get_length(vec1) * get_length(vec2)))\n\ndef get_angle(q1, q2):\n dx, dy = np.array(q2[:2]) - np.array(q1[:2])\n return np.math.atan2(dy, dx)\n\ndef get_unit_vector(vec):\n norm = get_length(vec)\n if norm == 0:\n return vec\n return np.array(vec) / norm\n\ndef z_rotation(theta):\n return quat_from_euler([0, 0, theta])\n\ndef matrix_from_quat(quat):\n return np.array(p.getMatrixFromQuaternion(quat, physicsClientId=get_client())).reshape(3, 3)\n\ndef quat_from_matrix(mat):\n matrix = np.eye(4)\n matrix[:3,:3] = mat\n return quaternion_from_matrix(matrix)\n\ndef point_from_tform(tform):\n return np.array(tform)[:3,3]\n\ndef matrix_from_tform(tform):\n return np.array(tform)[:3,:3]\n\ndef point_from_pose(pose):\n \"\"\"get the origin point from a pose\n\n Parameters\n ----------\n pose : Pose\n [description]\n\n Returns\n -------\n Point, np array of three floats\n [description]\n \"\"\"\n return pose[0]\n\ndef quat_from_pose(pose):\n \"\"\"get the quaternion from a pose\n\n Parameters\n ----------\n pose : [type]\n [description]\n\n Returns\n -------\n Quaternion, np array of four floats\n [description]\n \"\"\"\n return pose[1]\n\ndef tform_from_pose(pose):\n (point, quat) = pose\n tform = np.eye(4)\n tform[:3,3] = point\n tform[:3,:3] = matrix_from_quat(quat)\n return tform\n\ndef pose_from_tform(tform):\n return point_from_tform(tform), quat_from_matrix(matrix_from_tform(tform))\n\ndef wrap_angle(theta, lower=-np.pi): # [-np.pi, np.pi)\n return (theta - lower) % (2 * np.pi) + lower\n\ndef circular_difference(theta2, theta1):\n return wrap_angle(theta2 - theta1)\n\ndef base_values_from_pose(pose, tolerance=1e-3):\n (point, quat) = pose\n x, y, _ = point\n roll, pitch, yaw = euler_from_quat(quat)\n assert (abs(roll) < tolerance) and (abs(pitch) < tolerance)\n return (x, y, yaw)\n\npose2d_from_pose = base_values_from_pose\n\ndef pose_from_base_values(base_values, default_pose=unit_pose()):\n x, y, yaw = base_values\n _, _, z = point_from_pose(default_pose)\n roll, pitch, _ = euler_from_quat(quat_from_pose(default_pose))\n return (x, y, z), quat_from_euler([roll, pitch, yaw])\n\ndef quat_angle_between(quat0, quat1): # quaternion_slerp\n #p.computeViewMatrixFromYawPitchRoll()\n q0 = unit_vector(quat0[:4])\n q1 = unit_vector(quat1[:4])\n d = clip(np.dot(q0, q1), min_value=-1., max_value=+1.)\n angle = math.acos(d)\n # TODO: angle_between\n #delta = p.getDifferenceQuaternion(quat0, quat1)\n #angle = math.acos(delta[-1])\n return angle\n\ndef all_between(lower_limits, values, upper_limits):\n assert len(lower_limits) == len(values)\n assert len(values) == len(upper_limits)\n return np.less_equal(lower_limits, values).all() and \\\n np.less_equal(values, upper_limits).all()\n\ndef tform_point(affine, point):\n \"\"\"transform a given point\n\n Parameters\n ----------\n affine : Pose\n [description]\n point : [type]\n [description]\n\n Returns\n -------\n Point\n [description]\n \"\"\"\n return point_from_pose(multiply(affine, Pose(point=point)))\n\ndef apply_affine(affine, points):\n \"\"\"apply affine transformation on the given list of points\n\n Parameters\n ----------\n affine : Pose\n [description]\n points : [type]\n [description]\n\n Returns\n -------\n list of points\n [description]\n \"\"\"\n return [tform_point(affine, p) for p in points]\n\n##############################################\n# placing here to resolve cycly dependencies\n# more can be found in inferfaces.robots.body\n\ndef set_pose(body, pose):\n (point, quat) = pose\n p.resetBasePositionAndOrientation(body, point, quat, physicsClientId=get_client())\n\ndef get_pose(body):\n return p.getBasePositionAndOrientation(body, physicsClientId=get_client())\n #return np.concatenate([point, quat])\n" ]
[ [ "numpy.dot", "numpy.eye", "numpy.linalg.norm", "numpy.math.atan2", "numpy.cos", "numpy.sin", "numpy.less_equal", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ben-shor/IML.HUJI
[ "65bd0e1262356181bc4d9eb46b1b457b2fc44322" ]
[ "IMLearn/learners/classifiers/perceptron.py" ]
[ "from __future__ import annotations\nfrom typing import Callable\nfrom typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\n\n\ndef default_callback(fit: Perceptron, x: np.ndarray, y: int):\n pass\n\n\nclass Perceptron(BaseEstimator):\n \"\"\"\n Perceptron half-space classifier\n\n Finds a separating hyperplane for given linearly separable data.\n\n Attributes\n ----------\n include_intercept: bool, default = True\n Should fitted model include an intercept or not\n\n max_iter_: int, default = 1000\n Maximum number of passes over training data\n\n coefs_: ndarray of shape (n_features,) or (n_features+1,)\n Coefficients vector fitted by Perceptron algorithm. To be set in\n `Perceptron.fit` function.\n\n callback_: Callable[[Perceptron, np.ndarray, int], None]\n A callable to be called after each update of the model while fitting to given data\n Callable function should receive as input a Perceptron instance, current sample and current response\n \"\"\"\n def __init__(self,\n include_intercept: bool = True,\n max_iter: int = 1000,\n callback: Callable[[Perceptron, np.ndarray, int], None] = default_callback):\n \"\"\"\n Instantiate a Perceptron classifier\n\n Parameters\n ----------\n include_intercept: bool, default=True\n Should fitted model include an intercept or not\n\n max_iter: int, default = 1000\n Maximum number of passes over training data\n\n callback: Callable[[Perceptron, np.ndarray, int], None]\n A callable to be called after each update of the model while fitting to given data\n Callable function should receive as input a Perceptron instance, current sample and current response\n \"\"\"\n super().__init__()\n self.include_intercept_ = include_intercept\n self.max_iter_ = max_iter\n self.callback_ = callback\n self.coefs_ = None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit a halfspace to to given samples. Iterate over given data as long as there exists a sample misclassified\n or that did not reach `self.max_iter_`\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n\n Notes\n -----\n Fits model with or without an intercept depending on value of `self.fit_intercept_`\n \"\"\"\n if self.include_intercept_:\n X = np.c_[np.ones(X.shape[0]), X]\n\n self.coefs_ = np.zeros(X.shape[1])\n for _ in range(self.max_iter_):\n failed_xs_indexes = np.where((y * (X @ self.coefs_)) <= 0)[0]\n if len(failed_xs_indexes) == 0:\n return\n self.coefs_ = self.coefs_ + y[failed_xs_indexes[0]] * X[failed_xs_indexes[0]]\n self.fitted_ = True\n self.callback_(self, X[failed_xs_indexes[0]], X[failed_xs_indexes[0]] @ self.coefs_)\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n if self.include_intercept_:\n X = np.c_[np.ones(X.shape[0]), X]\n return np.array([1 if i else -1 for i in (X @ self.coefs_) > 0])\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n from ...metrics import misclassification_error\n return misclassification_error(y, self.predict(X))\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.where", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aakashb95/transformers
[ "224bde91caff4ccfd12277ab5e9bf97c61e22ee9" ]
[ "examples/tensorflow/multiple-choice/run_swag.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for multiple choice.\n\"\"\"\n# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport datasets\nimport tensorflow as tf\nfrom datasets import load_dataset\n\nimport transformers\nfrom transformers import (\n CONFIG_NAME,\n TF2_WEIGHTS_NAME,\n AutoConfig,\n AutoTokenizer,\n DefaultDataCollator,\n HfArgumentParser,\n TFAutoModelForMultipleChoice,\n TFTrainingArguments,\n create_optimizer,\n set_seed,\n)\nfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\nfrom transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.20.0.dev0\")\n\nlogger = logging.getLogger(__name__)\n\n\n# region Helper classes and functions\nclass SavePretrainedCallback(tf.keras.callbacks.Callback):\n # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary\n # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback\n # that saves the model with this method after each epoch.\n def __init__(self, output_dir, **kwargs):\n super().__init__()\n self.output_dir = output_dir\n\n def on_epoch_end(self, epoch, logs=None):\n self.model.save_pretrained(self.output_dir)\n\n\n@dataclass\nclass DataCollatorForMultipleChoice:\n \"\"\"\n Data collator that will dynamically pad the inputs for multiple choice received.\n\n Args:\n tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):\n The tokenizer used for encoding the data.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n\n def __call__(self, features):\n label_name = \"label\" if \"label\" in features[0].keys() else \"labels\"\n labels = [feature.pop(label_name) for feature in features]\n batch_size = len(features)\n num_choices = len(features[0][\"input_ids\"])\n flattened_features = [\n [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features\n ]\n flattened_features = list(chain(*flattened_features))\n\n batch = self.tokenizer.pad(\n flattened_features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=\"tf\",\n )\n\n # Un-flatten\n batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()}\n # Add back labels\n batch[\"labels\"] = tf.convert_to_tensor(labels, dtype=tf.int64)\n return batch\n\n\n# endregion\n\n# region Arguments\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n )\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a text file).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_seq_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"The maximum total input sequence length after tokenization. If passed, sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to pad all samples to the maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n )\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n )\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n )\n },\n )\n\n def __post_init__(self):\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n\n# endregion\n\n\ndef main():\n # region Argument parsing\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The\n # information sent is the one passed as arguments along with your Python/PyTorch versions.\n send_example_telemetry(\"run_swag\", model_args, data_args, framework=\"tensorflow\")\n\n output_dir = Path(training_args.output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n # endregion\n\n # region Logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n # endregion\n\n # region Checkpoints\n checkpoint = None\n if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:\n if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():\n checkpoint = output_dir\n logger.info(\n f\"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this\"\n \" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n else:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to continue regardless.\"\n )\n # endregion\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # region Load datasets\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.train_file is not None or data_args.validation_file is not None:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.train_file.split(\".\")[-1]\n raw_datasets = load_dataset(\n extension,\n data_files=data_files,\n cache_dir=model_args.cache_dir,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n else:\n # Downloading and loading the swag dataset from the hub.\n raw_datasets = load_dataset(\n \"swag\",\n \"regular\",\n cache_dir=model_args.cache_dir,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # When using your own dataset or a different dataset from swag, you will probably need to change this.\n ending_names = [f\"ending{i}\" for i in range(4)]\n context_name = \"sent1\"\n question_header_name = \"sent2\"\n # endregion\n\n # region Load model config and tokenizer\n if checkpoint is not None:\n config_path = training_args.output_dir\n elif model_args.config_name:\n config_path = model_args.config_name\n else:\n config_path = model_args.model_name_or_path\n\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n config_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # endregion\n\n # region Dataset preprocessing\n if data_args.max_seq_length is None:\n max_seq_length = tokenizer.model_max_length\n if max_seq_length > 1024:\n logger.warning(\n f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n \"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.\"\n )\n max_seq_length = 1024\n else:\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n def preprocess_function(examples):\n first_sentences = [[context] * 4 for context in examples[context_name]]\n question_headers = examples[question_header_name]\n second_sentences = [\n [f\"{header} {examples[end][i]}\" for end in ending_names] for i, header in enumerate(question_headers)\n ]\n\n # Flatten out\n first_sentences = list(chain(*first_sentences))\n second_sentences = list(chain(*second_sentences))\n\n # Tokenize\n tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length)\n # Un-flatten\n data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}\n return data\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n non_label_columns = [feature for feature in train_dataset.features if feature not in (\"label\", \"labels\")]\n if data_args.max_train_samples is not None:\n max_train_samples = min(len(train_dataset), data_args.max_train_samples)\n train_dataset = train_dataset.select(range(max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_eval:\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if not training_args.do_train:\n non_label_columns = [feature for feature in eval_dataset.features if feature not in (\"label\", \"labels\")]\n if data_args.max_eval_samples is not None:\n max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)\n eval_dataset = eval_dataset.select(range(max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if data_args.pad_to_max_length:\n data_collator = DefaultDataCollator(return_tensors=\"tf\")\n else:\n # custom class defined above, as HF has no data collator for multiple choice\n data_collator = DataCollatorForMultipleChoice(tokenizer)\n # endregion\n\n with training_args.strategy.scope():\n # region Build model\n if checkpoint is None:\n model_path = model_args.model_name_or_path\n else:\n model_path = checkpoint\n model = TFAutoModelForMultipleChoice.from_pretrained(\n model_path,\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n num_replicas = training_args.strategy.num_replicas_in_sync\n total_train_batch_size = training_args.per_device_train_batch_size * num_replicas\n total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas\n if training_args.do_train:\n total_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs)\n optimizer, lr_schedule = create_optimizer(\n init_lr=training_args.learning_rate, num_train_steps=int(total_train_steps), num_warmup_steps=0\n )\n else:\n optimizer = \"adam\" # Just put anything in here, since we're not using it anyway\n model.compile(\n optimizer=optimizer,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"accuracy\")],\n )\n # endregion\n\n # region Training\n if training_args.do_train:\n dataset_exclude_cols = set(non_label_columns + [\"label\"])\n tf_train_dataset = train_dataset.to_tf_dataset(\n columns=[col for col in train_dataset.column_names if col not in dataset_exclude_cols],\n shuffle=True,\n batch_size=total_train_batch_size,\n collate_fn=data_collator,\n drop_remainder=True,\n # `label_cols` is needed for user-defined losses, such as in this example\n label_cols=\"label\" if \"label\" in train_dataset.column_names else None,\n )\n\n if training_args.do_eval:\n validation_data = eval_dataset.to_tf_dataset(\n columns=[col for col in eval_dataset.column_names if col not in dataset_exclude_cols],\n shuffle=False,\n batch_size=total_eval_batch_size,\n collate_fn=data_collator,\n drop_remainder=True,\n # `label_cols` is needed for user-defined losses, such as in this example\n label_cols=\"label\" if \"label\" in eval_dataset.column_names else None,\n )\n else:\n validation_data = None\n model.fit(\n tf_train_dataset,\n validation_data=validation_data,\n epochs=int(training_args.num_train_epochs),\n callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)],\n )\n # endregion\n\n # region Evaluation\n if training_args.do_eval and not training_args.do_train:\n dataset_exclude_cols = set(non_label_columns + [\"label\"])\n # Do a standalone evaluation pass\n tf_eval_dataset = eval_dataset.to_tf_dataset(\n columns=[col for col in eval_dataset.column_names if col not in dataset_exclude_cols],\n shuffle=False,\n batch_size=total_eval_batch_size,\n collate_fn=data_collator,\n drop_remainder=True,\n # `label_cols` is needed for user-defined losses, such as in this example\n label_cols=\"label\" if \"label\" in eval_dataset.column_names else None,\n )\n model.evaluate(tf_eval_dataset)\n # endregion\n\n # region Push to hub\n if training_args.push_to_hub:\n model.push_to_hub(\n finetuned_from=model_args.model_name_or_path,\n tasks=\"multiple-choice\",\n dataset_tags=\"swag\",\n dataset_args=\"regular\",\n dataset=\"SWAG\",\n language=\"en\",\n )\n # endregion\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.reshape", "tensorflow.keras.losses.SparseCategoricalCrossentropy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
rheostat/pyfolio
[ "04ddc116c27f9a9c3265c604474eeb495e0ccc65" ]
[ "pyfolio/plotting.py" ]
[ "#\n# Copyright 2018 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import division\n\nimport datetime\nfrom collections import OrderedDict\nfrom functools import wraps\n\nimport empyrical as ep\nimport matplotlib\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pytz\nimport scipy as sp\nfrom matplotlib import figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.ticker import FuncFormatter\n\nfrom . import _seaborn as sns\nfrom . import capacity\nfrom . import pos\nfrom . import timeseries\nfrom . import txn\nfrom . import utils\nfrom .utils import (APPROX_BDAYS_PER_MONTH,\n MM_DISPLAY_UNIT)\n\n\ndef customize(func):\n \"\"\"\n Decorator to set plotting context and axes style during function call.\n \"\"\"\n @wraps(func)\n def call_w_context(*args, **kwargs):\n set_context = kwargs.pop('set_context', True)\n if set_context:\n with plotting_context(), axes_style():\n return func(*args, **kwargs)\n else:\n return func(*args, **kwargs)\n return call_w_context\n\n\ndef plotting_context(context='notebook', font_scale=1.5, rc=None):\n \"\"\"\n Create pyfolio default plotting style context.\n\n Under the hood, calls and returns seaborn.plotting_context() with\n some custom settings. Usually you would use in a with-context.\n\n Parameters\n ----------\n context : str, optional\n Name of seaborn context.\n font_scale : float, optional\n Scale font by factor font_scale.\n rc : dict, optional\n Config flags.\n By default, {'lines.linewidth': 1.5}\n is being used and will be added to any\n rc passed in, unless explicitly overriden.\n\n Returns\n -------\n seaborn plotting context\n\n Example\n -------\n >>> with pyfolio.plotting.plotting_context(font_scale=2):\n >>> pyfolio.create_full_tear_sheet(..., set_context=False)\n\n See also\n --------\n For more information, see seaborn.plotting_context().\n\n \"\"\"\n if rc is None:\n rc = {}\n\n rc_default = {'lines.linewidth': 1.5}\n\n # Add defaults if they do not exist\n for name, val in rc_default.items():\n rc.setdefault(name, val)\n\n return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)\n\n\ndef axes_style(style='darkgrid', rc=None):\n \"\"\"\n Create pyfolio default axes style context.\n\n Under the hood, calls and returns seaborn.axes_style() with\n some custom settings. Usually you would use in a with-context.\n\n Parameters\n ----------\n style : str, optional\n Name of seaborn style.\n rc : dict, optional\n Config flags.\n\n Returns\n -------\n seaborn plotting context\n\n Example\n -------\n >>> with pyfolio.plotting.axes_style(style='whitegrid'):\n >>> pyfolio.create_full_tear_sheet(..., set_context=False)\n\n See also\n --------\n For more information, see seaborn.plotting_context().\n\n \"\"\"\n if rc is None:\n rc = {}\n\n rc_default = {}\n\n # Add defaults if they do not exist\n for name, val in rc_default.items():\n rc.setdefault(name, val)\n\n return sns.axes_style(style=style, rc=rc)\n\n\ndef plot_monthly_returns_heatmap(returns, ax=None, **kwargs):\n \"\"\"\n Plots a heatmap of returns by month.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n monthly_ret_table = ep.aggregate_returns(returns, 'monthly')\n monthly_ret_table = monthly_ret_table.unstack().round(3)\n\n sns.heatmap(\n monthly_ret_table.fillna(0) *\n 100.0,\n annot=True,\n annot_kws={\"size\": 9},\n alpha=1.0,\n center=0.0,\n cbar=False,\n cmap=matplotlib.cm.RdYlGn,\n ax=ax, **kwargs)\n ax.set_ylabel('Year')\n ax.set_xlabel('Month')\n ax.set_title(\"Monthly returns (%)\")\n return ax\n\n\ndef plot_annual_returns(returns, ax=None, **kwargs):\n \"\"\"\n Plots a bar graph of returns by year.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n x_axis_formatter = FuncFormatter(utils.percentage)\n ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))\n ax.tick_params(axis='x', which='major')\n\n ann_ret_df = pd.DataFrame(\n ep.aggregate_returns(\n returns,\n 'yearly'))\n\n ax.axvline(\n 100 *\n ann_ret_df.values.mean(),\n color='steelblue',\n linestyle='--',\n lw=4,\n alpha=0.7)\n (100 * ann_ret_df.sort_index(ascending=False)\n ).plot(ax=ax, kind='barh', alpha=0.70, **kwargs)\n ax.axvline(0.0, color='black', linestyle='-', lw=3)\n\n ax.set_ylabel('Year')\n ax.set_xlabel('Returns')\n ax.set_title(\"Annual returns\")\n ax.legend(['Mean'], frameon=True, framealpha=0.5)\n return ax\n\n\ndef plot_monthly_returns_dist(returns, ax=None, **kwargs):\n \"\"\"\n Plots a distribution of monthly returns.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n x_axis_formatter = FuncFormatter(utils.percentage)\n ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))\n ax.tick_params(axis='x', which='major')\n\n monthly_ret_table = ep.aggregate_returns(returns, 'monthly')\n\n ax.hist(\n 100 * monthly_ret_table,\n color='orangered',\n alpha=0.80,\n bins=20,\n **kwargs)\n\n ax.axvline(\n 100 * monthly_ret_table.mean(),\n color='gold',\n linestyle='--',\n lw=4,\n alpha=1.0)\n\n ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)\n ax.legend(['Mean'], frameon=True, framealpha=0.5)\n ax.set_ylabel('Number of months')\n ax.set_xlabel('Returns')\n ax.set_title(\"Distribution of monthly returns\")\n return ax\n\n\ndef plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):\n \"\"\"\n Plots total amount of stocks with an active position, either short\n or long. Displays daily total, daily average per month, and\n all-time daily average.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n positions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n positions = positions.copy().drop('cash', axis='columns')\n df_holdings = positions.replace(0, np.nan).count(axis=1)\n df_holdings_by_month = df_holdings.resample('1M').mean()\n df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)\n df_holdings_by_month.plot(\n color='orangered',\n lw=2,\n ax=ax,\n **kwargs)\n ax.axhline(\n df_holdings.values.mean(),\n color='steelblue',\n ls='--',\n lw=3)\n\n ax.set_xlim((returns.index[0], returns.index[-1]))\n\n leg = ax.legend(['Daily holdings',\n 'Average daily holdings, by month',\n 'Average daily holdings, overall'],\n loc=legend_loc, frameon=True,\n framealpha=0.5)\n leg.get_frame().set_edgecolor('black')\n\n ax.set_title('Total holdings')\n ax.set_ylabel('Holdings')\n ax.set_xlabel('')\n return ax\n\n\ndef plot_long_short_holdings(returns, positions,\n legend_loc='upper left', ax=None, **kwargs):\n \"\"\"\n Plots total amount of stocks with an active position, breaking out\n short and long into transparent filled regions.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n positions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n positions = positions.drop('cash', axis='columns')\n positions = positions.replace(0, np.nan)\n df_longs = positions[positions > 0].count(axis=1)\n df_shorts = positions[positions < 0].count(axis=1)\n lf = ax.fill_between(df_longs.index, 0, df_longs.values,\n color='g', alpha=0.5, lw=2.0)\n sf = ax.fill_between(df_shorts.index, 0, df_shorts.values,\n color='r', alpha=0.5, lw=2.0)\n\n bf = patches.Rectangle([0, 0], 1, 1, color='darkgoldenrod')\n leg = ax.legend([lf, sf, bf],\n ['Long (max: %s, min: %s)' % (df_longs.max(),\n df_longs.min()),\n 'Short (max: %s, min: %s)' % (df_shorts.max(),\n df_shorts.min()),\n 'Overlap'], loc=legend_loc, frameon=True,\n framealpha=0.5)\n leg.get_frame().set_edgecolor('black')\n\n ax.set_xlim((returns.index[0], returns.index[-1]))\n ax.set_title('Long and short holdings')\n ax.set_ylabel('Holdings')\n ax.set_xlabel('')\n return ax\n\n\ndef plot_drawdown_periods(returns, top=10, ax=None, **kwargs):\n \"\"\"\n Plots cumulative returns highlighting top drawdown periods.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n top : int, optional\n Amount of top drawdowns periods to plot (default 10).\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n y_axis_formatter = FuncFormatter(utils.two_dec_places)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n\n df_cum_rets = ep.cum_returns(returns, starting_value=1.0)\n df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)\n\n df_cum_rets.plot(ax=ax, **kwargs)\n\n lim = ax.get_ylim()\n colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]\n for i, (peak, recovery) in df_drawdowns[\n ['Peak date', 'Recovery date']].iterrows():\n if pd.isnull(recovery):\n recovery = returns.index[-1]\n ax.fill_between((peak, recovery),\n lim[0],\n lim[1],\n alpha=.4,\n color=colors[i])\n ax.set_ylim(lim)\n ax.set_title('Top %i drawdown periods' % top)\n ax.set_ylabel('Cumulative returns')\n ax.legend(['Portfolio'], loc='upper left',\n frameon=True, framealpha=0.5)\n ax.set_xlabel('')\n return ax\n\n\ndef plot_drawdown_underwater(returns, ax=None, **kwargs):\n \"\"\"\n Plots how far underwaterr returns are over time, or plots current\n drawdown vs. date.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n y_axis_formatter = FuncFormatter(utils.percentage)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n\n df_cum_rets = ep.cum_returns(returns, starting_value=1.0)\n running_max = np.maximum.accumulate(df_cum_rets)\n underwater = -100 * ((running_max - df_cum_rets) / running_max)\n (underwater).plot(ax=ax, kind='area', color='coral', alpha=0.7, **kwargs)\n ax.set_ylabel('Drawdown')\n ax.set_title('Underwater plot')\n ax.set_xlabel('')\n return ax\n\n\ndef plot_perf_stats(returns, factor_returns, ax=None):\n \"\"\"\n Create box plot of some performance metrics of the strategy.\n The width of the box whiskers is determined by a bootstrap.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n bootstrap_values = timeseries.perf_stats_bootstrap(returns,\n factor_returns,\n return_stats=False)\n bootstrap_values = bootstrap_values.drop('Kurtosis', axis='columns')\n\n sns.boxplot(data=bootstrap_values, orient='h', ax=ax)\n\n return ax\n\n\nSTAT_FUNCS_PCT = [\n 'Annual return',\n 'Cumulative returns',\n 'Annual volatility',\n 'Max drawdown',\n 'Daily value at risk',\n 'Daily turnover'\n]\n\n\ndef show_perf_stats(returns, factor_returns=None, positions=None,\n transactions=None, turnover_denom='AGB',\n live_start_date=None, bootstrap=False,\n header_rows=None):\n \"\"\"\n Prints some performance metrics of the strategy.\n\n - Shows amount of time the strategy has been run in backtest and\n out-of-sample (in live trading).\n\n - Shows Omega ratio, max drawdown, Calmar ratio, annual return,\n stability, Sharpe ratio, annual volatility, alpha, and beta.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n positions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n transactions : pd.DataFrame, optional\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\n turnover_denom : str, optional\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\n live_start_date : datetime, optional\n The point in time when the strategy began live trading, after\n its backtest period.\n bootstrap : boolean, optional\n Whether to perform bootstrap analysis for the performance\n metrics.\n - For more information, see timeseries.perf_stats_bootstrap\n header_rows : dict or OrderedDict, optional\n Extra rows to display at the top of the displayed table.\n \"\"\"\n\n if bootstrap:\n perf_func = timeseries.perf_stats_bootstrap\n else:\n perf_func = timeseries.perf_stats\n\n perf_stats_all = perf_func(\n returns,\n factor_returns=factor_returns,\n positions=positions,\n transactions=transactions,\n turnover_denom=turnover_denom)\n\n date_rows = OrderedDict()\n if len(returns.index) > 0:\n date_rows['Start date'] = returns.index[0].strftime('%Y-%m-%d')\n date_rows['End date'] = returns.index[-1].strftime('%Y-%m-%d')\n\n if live_start_date is not None:\n live_start_date = ep.utils.get_utc_timestamp(live_start_date)\n returns_is = returns[returns.index < live_start_date]\n returns_oos = returns[returns.index >= live_start_date]\n\n positions_is = None\n positions_oos = None\n transactions_is = None\n transactions_oos = None\n\n if positions is not None:\n positions_is = positions[positions.index < live_start_date]\n positions_oos = positions[positions.index >= live_start_date]\n if transactions is not None:\n transactions_is = transactions[(transactions.index <\n live_start_date)]\n transactions_oos = transactions[(transactions.index >\n live_start_date)]\n\n perf_stats_is = perf_func(\n returns_is,\n factor_returns=factor_returns,\n positions=positions_is,\n transactions=transactions_is,\n turnover_denom=turnover_denom)\n\n perf_stats_oos = perf_func(\n returns_oos,\n factor_returns=factor_returns,\n positions=positions_oos,\n transactions=transactions_oos,\n turnover_denom=turnover_denom)\n if len(returns.index) > 0:\n date_rows['In-sample months'] = int(len(returns_is) /\n APPROX_BDAYS_PER_MONTH)\n date_rows['Out-of-sample months'] = int(len(returns_oos) /\n APPROX_BDAYS_PER_MONTH)\n\n perf_stats = pd.concat(OrderedDict([\n ('In-sample', perf_stats_is),\n ('Out-of-sample', perf_stats_oos),\n ('All', perf_stats_all),\n ]), axis=1)\n else:\n if len(returns.index) > 0:\n date_rows['Total months'] = int(len(returns) /\n APPROX_BDAYS_PER_MONTH)\n perf_stats = pd.DataFrame(perf_stats_all, columns=['Backtest'])\n\n for column in perf_stats.columns:\n for stat, value in perf_stats[column].iteritems():\n if stat in STAT_FUNCS_PCT:\n perf_stats.loc[stat, column] = str(np.round(value * 100,\n 3)) + '%'\n if header_rows is None:\n header_rows = date_rows\n else:\n header_rows = OrderedDict(header_rows)\n header_rows.update(date_rows)\n\n utils.print_table(\n perf_stats,\n float_format='{0:.2f}'.format,\n header_rows=header_rows,\n )\n\n\ndef plot_returns(returns,\n live_start_date=None,\n ax=None):\n \"\"\"\n Plots raw returns over time.\n\n Backtest returns are in green, and out-of-sample (live trading)\n returns are in red.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n live_start_date : datetime, optional\n The date when the strategy began live trading, after\n its backtest period. This date should be normalized.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n ax.set_label('')\n ax.set_ylabel('Returns')\n\n if live_start_date is not None:\n live_start_date = ep.utils.get_utc_timestamp(live_start_date)\n is_returns = returns.loc[returns.index < live_start_date]\n oos_returns = returns.loc[returns.index >= live_start_date]\n is_returns.plot(ax=ax, color='g')\n oos_returns.plot(ax=ax, color='r')\n\n else:\n returns.plot(ax=ax, color='g')\n\n return ax\n\n\ndef plot_rolling_returns(returns,\n factor_returns=None,\n live_start_date=None,\n logy=False,\n cone_std=None,\n legend_loc='best',\n volatility_match=False,\n cone_function=timeseries.forecast_cone_bootstrap,\n ax=None, **kwargs):\n \"\"\"\n Plots cumulative rolling returns versus some benchmarks'.\n\n Backtest returns are in green, and out-of-sample (live trading)\n returns are in red.\n\n Additionally, a non-parametric cone plot may be added to the\n out-of-sample returns region.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n live_start_date : datetime, optional\n The date when the strategy began live trading, after\n its backtest period. This date should be normalized.\n logy : bool, optional\n Whether to log-scale the y-axis.\n cone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - See timeseries.forecast_cone_bounds for more details.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n volatility_match : bool, optional\n Whether to normalize the volatility of the returns to those of the\n benchmark returns. This helps compare strategies with different\n volatilities. Requires passing of benchmark_rets.\n cone_function : function, optional\n Function to use when generating forecast probability cone.\n The function signiture must follow the form:\n def cone(in_sample_returns (pd.Series),\n days_to_project_forward (int),\n cone_std= (float, or tuple),\n starting_value= (int, or float))\n See timeseries.forecast_cone_bootstrap for an example.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n ax.set_xlabel('')\n ax.set_ylabel('Cumulative returns')\n ax.set_yscale('log' if logy else 'linear')\n\n if volatility_match and factor_returns is None:\n raise ValueError('volatility_match requires passing of '\n 'factor_returns.')\n elif volatility_match and factor_returns is not None:\n bmark_vol = factor_returns.loc[returns.index].std()\n returns = (returns / returns.std()) * bmark_vol\n\n cum_rets = ep.cum_returns(returns, 1.0)\n\n y_axis_formatter = FuncFormatter(utils.two_dec_places)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n\n if factor_returns is not None:\n cum_factor_returns = ep.cum_returns(\n factor_returns.loc[cum_rets.index], 1.0)\n cum_factor_returns.plot(lw=2, color='gray',\n label=factor_returns.name, alpha=0.60,\n ax=ax, **kwargs)\n\n if live_start_date is not None:\n live_start_date = ep.utils.get_utc_timestamp(live_start_date)\n is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date]\n oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date]\n else:\n is_cum_returns = cum_rets\n oos_cum_returns = pd.Series([])\n\n is_cum_returns.plot(lw=3, color='forestgreen', alpha=0.6,\n label='Backtest', ax=ax, **kwargs)\n\n if len(oos_cum_returns) > 0:\n oos_cum_returns.plot(lw=4, color='red', alpha=0.6,\n label='Live', ax=ax, **kwargs)\n\n if cone_std is not None:\n if isinstance(cone_std, (float, int)):\n cone_std = [cone_std]\n\n is_returns = returns.loc[returns.index < live_start_date]\n cone_bounds = cone_function(\n is_returns,\n len(oos_cum_returns),\n cone_std=cone_std,\n starting_value=is_cum_returns[-1])\n\n cone_bounds = cone_bounds.set_index(oos_cum_returns.index)\n for std in cone_std:\n ax.fill_between(cone_bounds.index,\n cone_bounds[float(std)],\n cone_bounds[float(-std)],\n color='steelblue', alpha=0.5)\n\n if legend_loc is not None:\n ax.legend(loc=legend_loc, frameon=True, framealpha=0.5)\n ax.axhline(1.0, linestyle='--', color='black', lw=2)\n\n return ax\n\n\ndef plot_rolling_beta(returns, factor_returns, legend_loc='best',\n ax=None, **kwargs):\n \"\"\"\n Plots the rolling 6-month and 12-month beta versus date.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n y_axis_formatter = FuncFormatter(utils.two_dec_places)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n\n ax.set_title(\"Rolling portfolio beta to \" + str(factor_returns.name))\n ax.set_ylabel('Beta')\n rb_1 = timeseries.rolling_beta(\n returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6)\n rb_1.plot(color='steelblue', lw=3, alpha=0.6, ax=ax, **kwargs)\n rb_2 = timeseries.rolling_beta(\n returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 12)\n rb_2.plot(color='grey', lw=3, alpha=0.4, ax=ax, **kwargs)\n ax.axhline(rb_1.mean(), color='steelblue', linestyle='--', lw=3)\n ax.axhline(0.0, color='black', linestyle='-', lw=2)\n\n ax.set_xlabel('')\n ax.legend(['6-mo',\n '12-mo'],\n loc=legend_loc, frameon=True, framealpha=0.5)\n ax.set_ylim((-1.0, 1.0))\n return ax\n\n\ndef plot_rolling_volatility(returns, factor_returns=None,\n rolling_window=APPROX_BDAYS_PER_MONTH * 6,\n legend_loc='best', ax=None, **kwargs):\n \"\"\"\n Plots the rolling volatility versus date.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor for which the\n benchmark rolling volatility is computed. Usually a benchmark such\n as market returns.\n - This is in the same style as returns.\n rolling_window : int, optional\n The days window over which to compute the volatility.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n y_axis_formatter = FuncFormatter(utils.two_dec_places)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n\n rolling_vol_ts = timeseries.rolling_volatility(\n returns, rolling_window)\n rolling_vol_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,\n **kwargs)\n if factor_returns is not None:\n rolling_vol_ts_factor = timeseries.rolling_volatility(\n factor_returns, rolling_window)\n rolling_vol_ts_factor.plot(alpha=.7, lw=3, color='grey', ax=ax,\n **kwargs)\n\n ax.set_title('Rolling volatility (6-month)')\n ax.axhline(\n rolling_vol_ts.mean(),\n color='steelblue',\n linestyle='--',\n lw=3)\n\n ax.axhline(0.0, color='black', linestyle='-', lw=2)\n\n ax.set_ylabel('Volatility')\n ax.set_xlabel('')\n if factor_returns is None:\n ax.legend(['Volatility', 'Average volatility'],\n loc=legend_loc, frameon=True, framealpha=0.5)\n else:\n ax.legend(['Volatility', 'Benchmark volatility', 'Average volatility'],\n loc=legend_loc, frameon=True, framealpha=0.5)\n return ax\n\n\ndef plot_rolling_sharpe(returns, factor_returns=None,\n rolling_window=APPROX_BDAYS_PER_MONTH * 6,\n legend_loc='best', ax=None, **kwargs):\n \"\"\"\n Plots the rolling Sharpe ratio versus date.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor for\n which the benchmark rolling Sharpe is computed. Usually\n a benchmark such as market returns.\n - This is in the same style as returns.\n rolling_window : int, optional\n The days window over which to compute the sharpe ratio.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n y_axis_formatter = FuncFormatter(utils.two_dec_places)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n\n rolling_sharpe_ts = timeseries.rolling_sharpe(\n returns, rolling_window)\n rolling_sharpe_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,\n **kwargs)\n\n if factor_returns is not None:\n rolling_sharpe_ts_factor = timeseries.rolling_sharpe(\n factor_returns, rolling_window)\n rolling_sharpe_ts_factor.plot(alpha=.7, lw=3, color='grey', ax=ax,\n **kwargs)\n\n ax.set_title('Rolling Sharpe ratio (6-month)')\n ax.axhline(\n rolling_sharpe_ts.mean(),\n color='steelblue',\n linestyle='--',\n lw=3)\n ax.axhline(0.0, color='black', linestyle='-', lw=3)\n\n ax.set_ylabel('Sharpe ratio')\n ax.set_xlabel('')\n if factor_returns is None:\n ax.legend(['Sharpe', 'Average'],\n loc=legend_loc, frameon=True, framealpha=0.5)\n else:\n ax.legend(['Sharpe', 'Benchmark Sharpe', 'Average'],\n loc=legend_loc, frameon=True, framealpha=0.5)\n\n return ax\n\n\ndef plot_gross_leverage(returns, positions, ax=None, **kwargs):\n \"\"\"\n Plots gross leverage versus date.\n\n Gross leverage is the sum of long and short exposure per share\n divided by net asset value.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n gl = timeseries.gross_lev(positions)\n gl.plot(lw=0.5, color='limegreen', legend=False, ax=ax, **kwargs)\n\n ax.axhline(gl.mean(), color='g', linestyle='--', lw=3)\n\n ax.set_title('Gross leverage')\n ax.set_ylabel('Gross leverage')\n ax.set_xlabel('')\n return ax\n\n\ndef plot_exposures(returns, positions, ax=None, **kwargs):\n \"\"\"\n Plots a cake chart of the long and short exposure.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n positions_alloc : pd.DataFrame\n Portfolio allocation of positions. See\n pos.get_percent_alloc.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n pos_no_cash = positions.drop('cash', axis=1)\n l_exp = pos_no_cash[pos_no_cash > 0].sum(axis=1) / positions.sum(axis=1)\n s_exp = pos_no_cash[pos_no_cash < 0].sum(axis=1) / positions.sum(axis=1)\n net_exp = pos_no_cash.sum(axis=1) / positions.sum(axis=1)\n\n ax.fill_between(l_exp.index,\n 0,\n l_exp.values,\n label='Long', color='green', alpha=0.5)\n ax.fill_between(s_exp.index,\n 0,\n s_exp.values,\n label='Short', color='red', alpha=0.5)\n ax.plot(net_exp.index, net_exp.values,\n label='Net', color='black', linestyle='dotted')\n\n ax.set_xlim((returns.index[0], returns.index[-1]))\n ax.set_title(\"Exposure\")\n ax.set_ylabel('Exposure')\n ax.legend(loc='lower left', frameon=True, framealpha=0.5)\n ax.set_xlabel('')\n return ax\n\n\ndef show_and_plot_top_positions(returns, positions_alloc,\n show_and_plot=2, hide_positions=False,\n legend_loc='real_best', ax=None,\n **kwargs):\n \"\"\"\n Prints and/or plots the exposures of the top 10 held positions of\n all time.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n positions_alloc : pd.DataFrame\n Portfolio allocation of positions. See pos.get_percent_alloc.\n show_and_plot : int, optional\n By default, this is 2, and both prints and plots.\n If this is 0, it will only plot; if 1, it will only print.\n hide_positions : bool, optional\n If True, will not output any symbol names.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n By default, the legend will display below the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes, conditional\n The axes that were plotted on.\n\n \"\"\"\n positions_alloc = positions_alloc.copy()\n positions_alloc.columns = positions_alloc.columns.map(utils.format_asset)\n\n df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs(\n positions_alloc)\n\n if show_and_plot == 1 or show_and_plot == 2:\n utils.print_table(pd.DataFrame(df_top_long * 100, columns=['max']),\n float_format='{0:.2f}%'.format,\n name='Top 10 long positions of all time')\n\n utils.print_table(pd.DataFrame(df_top_short * 100, columns=['max']),\n float_format='{0:.2f}%'.format,\n name='Top 10 short positions of all time')\n\n utils.print_table(pd.DataFrame(df_top_abs * 100, columns=['max']),\n float_format='{0:.2f}%'.format,\n name='Top 10 positions of all time')\n\n if show_and_plot == 0 or show_and_plot == 2:\n\n if ax is None:\n ax = plt.gca()\n\n positions_alloc[df_top_abs.index].plot(\n title='Portfolio allocation over time, only top 10 holdings',\n alpha=0.5, ax=ax, **kwargs)\n\n # Place legend below plot, shrink plot by 20%\n if legend_loc == 'real_best':\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n\n # Put a legend below current axis\n ax.legend(loc='upper center', frameon=True, framealpha=0.5,\n bbox_to_anchor=(0.5, -0.14), ncol=5)\n else:\n ax.legend(loc=legend_loc)\n\n ax.set_xlim((returns.index[0], returns.index[-1]))\n ax.set_ylabel('Exposure by holding')\n\n if hide_positions:\n ax.legend_.remove()\n\n return ax\n\n\ndef plot_max_median_position_concentration(positions, ax=None, **kwargs):\n \"\"\"\n Plots the max and median of long and short position concentrations\n over the time.\n\n Parameters\n ----------\n positions : pd.DataFrame\n The positions that the strategy takes over time.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n alloc_summary = pos.get_max_median_position_concentration(positions)\n colors = ['mediumblue', 'steelblue', 'tomato', 'firebrick']\n alloc_summary.plot(linewidth=1, color=colors, alpha=0.6, ax=ax)\n\n ax.legend(loc='center left', frameon=True, framealpha=0.5)\n ax.set_ylabel('Exposure')\n ax.set_title('Long/short max and median position concentration')\n\n return ax\n\n\ndef plot_sector_allocations(returns, sector_alloc, ax=None, **kwargs):\n \"\"\"\n Plots the sector exposures of the portfolio over time.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n sector_alloc : pd.DataFrame\n Portfolio allocation of positions. See pos.get_sector_alloc.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n sector_alloc.plot(title='Sector allocation over time',\n alpha=0.5, ax=ax, **kwargs)\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n\n # Put a legend below current axis\n ax.legend(loc='upper center', frameon=True, framealpha=0.5,\n bbox_to_anchor=(0.5, -0.14), ncol=5)\n\n ax.set_xlim((sector_alloc.index[0], sector_alloc.index[-1]))\n ax.set_ylabel('Exposure by sector')\n ax.set_xlabel('')\n\n return ax\n\n\ndef plot_return_quantiles(returns, live_start_date=None, ax=None, **kwargs):\n \"\"\"\n Creates a box plot of daily, weekly, and monthly return\n distributions.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n live_start_date : datetime, optional\n The point in time when the strategy began live trading, after\n its backtest period.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n is_returns = returns if live_start_date is None \\\n else returns.loc[returns.index < live_start_date]\n is_weekly = ep.aggregate_returns(is_returns, 'weekly')\n is_monthly = ep.aggregate_returns(is_returns, 'monthly')\n sns.boxplot(data=[is_returns, is_weekly, is_monthly],\n palette=[\"#4c72B0\", \"#55A868\", \"#CCB974\"],\n ax=ax, **kwargs)\n\n if live_start_date is not None:\n oos_returns = returns.loc[returns.index >= live_start_date]\n oos_weekly = ep.aggregate_returns(oos_returns, 'weekly')\n oos_monthly = ep.aggregate_returns(oos_returns, 'monthly')\n\n sns.swarmplot(data=[oos_returns, oos_weekly, oos_monthly], ax=ax,\n color=\"red\",\n marker=\"d\", **kwargs)\n red_dots = matplotlib.lines.Line2D([], [], color=\"red\", marker=\"d\",\n label=\"Out-of-sample data\",\n linestyle='')\n ax.legend(handles=[red_dots], frameon=True, framealpha=0.5)\n ax.set_xticklabels(['Daily', 'Weekly', 'Monthly'])\n ax.set_title('Return quantiles')\n\n return ax\n\n\ndef plot_turnover(returns, transactions, positions, turnover_denom='AGB',\n legend_loc='best', ax=None, **kwargs):\n \"\"\"\n Plots turnover vs. date.\n\n Turnover is the number of shares traded for a period as a fraction\n of total shares.\n\n Displays daily total, daily average per month, and all-time daily\n average.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n turnover_denom : str, optional\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\n legend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n y_axis_formatter = FuncFormatter(utils.two_dec_places)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n\n df_turnover = txn.get_turnover(positions, transactions, turnover_denom)\n df_turnover_by_month = df_turnover.resample(\"M\").mean()\n df_turnover.plot(color='steelblue', alpha=1.0, lw=0.5, ax=ax, **kwargs)\n df_turnover_by_month.plot(\n color='orangered',\n alpha=0.5,\n lw=2,\n ax=ax,\n **kwargs)\n ax.axhline(\n df_turnover.mean(), color='steelblue', linestyle='--', lw=3, alpha=1.0)\n ax.legend(['Daily turnover',\n 'Average daily turnover, by month',\n 'Average daily turnover, net'],\n loc=legend_loc, frameon=True, framealpha=0.5)\n ax.set_title('Daily turnover')\n ax.set_xlim((returns.index[0], returns.index[-1]))\n ax.set_ylim((0, 2))\n ax.set_ylabel('Turnover')\n ax.set_xlabel('')\n return ax\n\n\ndef plot_slippage_sweep(returns, positions, transactions,\n slippage_params=(3, 8, 10, 12, 15, 20, 50),\n ax=None, **kwargs):\n \"\"\"\n Plots equity curves at different per-dollar slippage assumptions.\n\n Parameters\n ----------\n returns : pd.Series\n Timeseries of portfolio returns to be adjusted for various\n degrees of slippage.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n slippage_params: tuple\n Slippage pameters to apply to the return time series (in\n basis points).\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n slippage_sweep = pd.DataFrame()\n for bps in slippage_params:\n adj_returns = txn.adjust_returns_for_slippage(returns, positions,\n transactions, bps)\n label = str(bps) + \" bps\"\n slippage_sweep[label] = ep.cum_returns(adj_returns, 1)\n\n slippage_sweep.plot(alpha=1.0, lw=0.5, ax=ax)\n\n ax.set_title('Cumulative returns given additional per-dollar slippage')\n ax.set_ylabel('')\n\n ax.legend(loc='center left', frameon=True, framealpha=0.5)\n\n return ax\n\n\ndef plot_slippage_sensitivity(returns, positions, transactions,\n ax=None, **kwargs):\n \"\"\"\n Plots curve relating per-dollar slippage to average annual returns.\n\n Parameters\n ----------\n returns : pd.Series\n Timeseries of portfolio returns to be adjusted for various\n degrees of slippage.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n avg_returns_given_slippage = pd.Series()\n for bps in range(1, 100):\n adj_returns = txn.adjust_returns_for_slippage(returns, positions,\n transactions, bps)\n avg_returns = ep.annual_return(adj_returns)\n avg_returns_given_slippage.loc[bps] = avg_returns\n\n avg_returns_given_slippage.plot(alpha=1.0, lw=2, ax=ax)\n\n ax.set_title('Average annual returns given additional per-dollar slippage')\n ax.set_xticks(np.arange(0, 100, 10))\n ax.set_ylabel('Average annual return')\n ax.set_xlabel('Per-dollar slippage (bps)')\n\n return ax\n\n\ndef plot_capacity_sweep(returns, transactions, market_data,\n bt_starting_capital,\n min_pv=100000,\n max_pv=300000000,\n step_size=1000000,\n ax=None):\n txn_daily_w_bar = capacity.daily_txns_with_bar_data(transactions,\n market_data)\n\n captial_base_sweep = pd.Series()\n for start_pv in range(min_pv, max_pv, step_size):\n adj_ret = capacity.apply_slippage_penalty(returns,\n txn_daily_w_bar,\n start_pv,\n bt_starting_capital)\n sharpe = ep.sharpe_ratio(adj_ret)\n if sharpe < -1:\n break\n captial_base_sweep.loc[start_pv] = sharpe\n captial_base_sweep.index = captial_base_sweep.index / MM_DISPLAY_UNIT\n\n if ax is None:\n ax = plt.gca()\n\n captial_base_sweep.plot(ax=ax)\n ax.set_xlabel('Capital base ($mm)')\n ax.set_ylabel('Sharpe ratio')\n ax.set_title('Capital base performance sweep')\n\n return ax\n\n\ndef plot_daily_turnover_hist(transactions, positions, turnover_denom='AGB',\n ax=None, **kwargs):\n \"\"\"\n Plots a histogram of daily turnover rates.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n positions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n turnover_denom : str, optional\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n turnover = txn.get_turnover(positions, transactions, turnover_denom)\n sns.distplot(turnover, ax=ax, **kwargs)\n ax.set_title('Distribution of daily turnover rates')\n ax.set_xlabel('Turnover rate')\n return ax\n\n\ndef plot_daily_volume(returns, transactions, ax=None, **kwargs):\n \"\"\"\n Plots trading volume per day vs. date.\n\n Also displays all-time daily average.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n daily_txn = txn.get_txn_vol(transactions)\n daily_txn.txn_shares.plot(alpha=1.0, lw=0.5, ax=ax, **kwargs)\n ax.axhline(daily_txn.txn_shares.mean(), color='steelblue',\n linestyle='--', lw=3, alpha=1.0)\n ax.set_title('Daily trading volume')\n ax.set_xlim((returns.index[0], returns.index[-1]))\n ax.set_ylabel('Amount of shares traded')\n ax.set_xlabel('')\n return ax\n\n\ndef plot_txn_time_hist(transactions, bin_minutes=5, tz='America/New_York',\n ax=None, **kwargs):\n \"\"\"\n Plots a histogram of transaction times, binning the times into\n buckets of a given duration.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\n bin_minutes : float, optional\n Sizes of the bins in minutes, defaults to 5 minutes.\n tz : str, optional\n Time zone to plot against. Note that if the specified\n zone does not apply daylight savings, the distribution\n may be partially offset.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n txn_time = transactions.copy()\n\n txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz))\n txn_time.index = txn_time.index.map(lambda x: x.hour * 60 + x.minute)\n txn_time['trade_value'] = (txn_time.amount * txn_time.price).abs()\n txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961))\n txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutes\n txn_time = txn_time.groupby(level=0).sum()\n\n txn_time['time_str'] = txn_time.index.map(lambda x:\n str(datetime.time(int(x / 60),\n x % 60))[:-3])\n\n trade_value_sum = txn_time.trade_value.sum()\n txn_time.trade_value = txn_time.trade_value.fillna(0) / trade_value_sum\n\n ax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs)\n\n ax.set_xlim(570, 960)\n ax.set_xticks(txn_time.index[::int(30 / bin_minutes)])\n ax.set_xticklabels(txn_time.time_str[::int(30 / bin_minutes)])\n ax.set_title('Transaction time distribution')\n ax.set_ylabel('Proportion')\n ax.set_xlabel('')\n return ax\n\n\ndef show_worst_drawdown_periods(returns, top=5):\n \"\"\"\n Prints information about the worst drawdown periods.\n\n Prints peak dates, valley dates, recovery dates, and net\n drawdowns.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n top : int, optional\n Amount of top drawdowns periods to plot (default 5).\n \"\"\"\n\n drawdown_df = timeseries.gen_drawdown_table(returns, top=top)\n utils.print_table(\n drawdown_df.sort_values('Net drawdown in %', ascending=False),\n name='Worst drawdown periods',\n float_format='{0:.2f}'.format,\n )\n\n\ndef plot_monthly_returns_timeseries(returns, ax=None, **kwargs):\n \"\"\"\n Plots monthly returns as a timeseries.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n **kwargs, optional\n Passed to seaborn plotting function.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n def cumulate_returns(x):\n return ep.cum_returns(x)[-1]\n\n if ax is None:\n ax = plt.gca()\n\n monthly_rets = returns.resample('M').apply(lambda x: cumulate_returns(x))\n monthly_rets = monthly_rets.to_period()\n\n sns.barplot(x=monthly_rets.index,\n y=monthly_rets.values,\n color='steelblue')\n\n _, labels = plt.xticks()\n plt.setp(labels, rotation=90)\n\n # only show x-labels on year boundary\n xticks_coord = []\n xticks_label = []\n count = 0\n for i in monthly_rets.index:\n if i.month == 1:\n xticks_label.append(i)\n xticks_coord.append(count)\n # plot yearly boundary line\n ax.axvline(count, color='gray', ls='--', alpha=0.3)\n\n count += 1\n\n ax.axhline(0.0, color='darkgray', ls='-')\n ax.set_xticks(xticks_coord)\n ax.set_xticklabels(xticks_label)\n\n return ax\n\n\ndef plot_round_trip_lifetimes(round_trips, disp_amount=16, lsize=18, ax=None):\n \"\"\"\n Plots timespans and directions of a sample of round trip trades.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n if ax is None:\n ax = plt.subplot()\n\n symbols_sample = round_trips.symbol.unique()\n np.random.seed(1)\n sample = np.random.choice(round_trips.symbol.unique(), replace=False,\n size=min(disp_amount, len(symbols_sample)))\n sample_round_trips = round_trips[round_trips.symbol.isin(sample)]\n\n symbol_idx = pd.Series(np.arange(len(sample)), index=sample)\n\n for symbol, sym_round_trips in sample_round_trips.groupby('symbol'):\n for _, row in sym_round_trips.iterrows():\n c = 'b' if row.long else 'r'\n y_ix = symbol_idx[symbol] + 0.05\n ax.plot([row['open_dt'], row['close_dt']],\n [y_ix, y_ix], color=c,\n linewidth=lsize, solid_capstyle='butt')\n\n ax.set_yticks(range(disp_amount))\n ax.set_yticklabels([utils.format_asset(s) for s in sample])\n\n ax.set_ylim((-0.5, min(len(sample), disp_amount) - 0.5))\n blue = patches.Rectangle([0, 0], 1, 1, color='b', label='Long')\n red = patches.Rectangle([0, 0], 1, 1, color='r', label='Short')\n leg = ax.legend(handles=[blue, red], loc='lower left',\n frameon=True, framealpha=0.5)\n leg.get_frame().set_edgecolor('black')\n ax.grid(False)\n\n return ax\n\n\ndef show_profit_attribution(round_trips):\n \"\"\"\n Prints the share of total PnL contributed by each\n traded name.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n total_pnl = round_trips['pnl'].sum()\n pnl_attribution = round_trips.groupby('symbol')['pnl'].sum() / total_pnl\n pnl_attribution.name = ''\n\n pnl_attribution.index = pnl_attribution.index.map(utils.format_asset)\n utils.print_table(\n pnl_attribution.sort_values(\n inplace=False,\n ascending=False,\n ),\n name='Profitability (PnL / PnL total) per name',\n float_format='{:.2%}'.format,\n )\n\n\ndef plot_prob_profit_trade(round_trips, ax=None):\n \"\"\"\n Plots a probability distribution for the event of making\n a profitable trade.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n\n Returns\n -------\n ax : matplotlib.Axes\n The axes that were plotted on.\n \"\"\"\n\n x = np.linspace(0, 1., 500)\n\n round_trips['profitable'] = round_trips.pnl > 0\n\n dist = sp.stats.beta(round_trips.profitable.sum(),\n (~round_trips.profitable).sum())\n y = dist.pdf(x)\n lower_perc = dist.ppf(.025)\n upper_perc = dist.ppf(.975)\n\n lower_plot = dist.ppf(.001)\n upper_plot = dist.ppf(.999)\n\n if ax is None:\n ax = plt.subplot()\n\n ax.plot(x, y)\n ax.axvline(lower_perc, color='0.5')\n ax.axvline(upper_perc, color='0.5')\n\n ax.set_xlabel('Probability of making a profitable decision')\n ax.set_ylabel('Belief')\n ax.set_xlim(lower_plot, upper_plot)\n ax.set_ylim((0, y.max() + 1.))\n\n return ax\n\n\ndef plot_cones(name, bounds, oos_returns, num_samples=1000, ax=None,\n cone_std=(1., 1.5, 2.), random_seed=None, num_strikes=3):\n \"\"\"\n Plots the upper and lower bounds of an n standard deviation\n cone of forecasted cumulative returns. Redraws a new cone when\n cumulative returns fall outside of last cone drawn.\n\n Parameters\n ----------\n name : str\n Account name to be used as figure title.\n bounds : pandas.core.frame.DataFrame\n Contains upper and lower cone boundaries. Column names are\n strings corresponding to the number of standard devations\n above (positive) or below (negative) the projected mean\n cumulative returns.\n oos_returns : pandas.core.frame.DataFrame\n Non-cumulative out-of-sample returns.\n num_samples : int\n Number of samples to draw from the in-sample daily returns.\n Each sample will be an array with length num_days.\n A higher number of samples will generate a more accurate\n bootstrap cone.\n ax : matplotlib.Axes, optional\n Axes upon which to plot.\n cone_std : list of int/float\n Number of standard devations to use in the boundaries of\n the cone. If multiple values are passed, cone bounds will\n be generated for each value.\n random_seed : int\n Seed for the pseudorandom number generator used by the pandas\n sample method.\n num_strikes : int\n Upper limit for number of cones drawn. Can be anything from 0 to 3.\n\n Returns\n -------\n Returns are either an ax or fig option, but not both. If a\n matplotlib.Axes instance is passed in as ax, then it will be modified\n and returned. This allows for users to plot interactively in jupyter\n notebook. When no ax object is passed in, a matplotlib.figure instance\n is generated and returned. This figure can then be used to save\n the plot as an image without viewing it.\n\n ax : matplotlib.Axes\n The axes that were plotted on.\n fig : matplotlib.figure\n The figure instance which contains all the plot elements.\n \"\"\"\n\n if ax is None:\n fig = figure.Figure(figsize=(10, 8))\n FigureCanvasAgg(fig)\n axes = fig.add_subplot(111)\n else:\n axes = ax\n\n returns = ep.cum_returns(oos_returns, starting_value=1.)\n bounds_tmp = bounds.copy()\n returns_tmp = returns.copy()\n cone_start = returns.index[0]\n colors = [\"green\", \"orange\", \"orangered\", \"darkred\"]\n\n for c in range(num_strikes + 1):\n if c > 0:\n tmp = returns.loc[cone_start:]\n bounds_tmp = bounds_tmp.iloc[0:len(tmp)]\n bounds_tmp = bounds_tmp.set_index(tmp.index)\n crossing = (tmp < bounds_tmp[float(-2.)].iloc[:len(tmp)])\n if crossing.sum() <= 0:\n break\n cone_start = crossing.loc[crossing].index[0]\n returns_tmp = returns.loc[cone_start:]\n bounds_tmp = (bounds - (1 - returns.loc[cone_start]))\n for std in cone_std:\n x = returns_tmp.index\n y1 = bounds_tmp[float(std)].iloc[:len(returns_tmp)]\n y2 = bounds_tmp[float(-std)].iloc[:len(returns_tmp)]\n axes.fill_between(x, y1, y2, color=colors[c], alpha=0.5)\n\n # Plot returns line graph\n label = 'Cumulative returns = {:.2f}%'.format((returns.iloc[-1] - 1) * 100)\n axes.plot(returns.index, returns.values, color='black', lw=3.,\n label=label)\n\n if name is not None:\n axes.set_title(name)\n axes.axhline(1, color='black', alpha=0.2)\n axes.legend(frameon=True, framealpha=0.5)\n\n if ax is None:\n return fig\n else:\n return axes\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.maximum.accumulate", "pandas.Series", "numpy.random.seed", "numpy.linspace", "pandas.isnull", "numpy.arange", "matplotlib.patches.Rectangle", "matplotlib.lines.Line2D", "matplotlib.figure.Figure", "matplotlib.backends.backend_agg.FigureCanvasAgg", "pandas.DataFrame", "numpy.round", "matplotlib.pyplot.subplot", "matplotlib.pyplot.setp", "matplotlib.ticker.FuncFormatter", "matplotlib.pyplot.xticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
isayevlab/DRACON
[ "b4dc6fcd27988bb4a20a5dade9a980e82acc0014" ]
[ "server/pages/infer_script.py" ]
[ "import torch\nimport pickle\nimport yaml\nimport pandas as pd\n\nfrom torch import nn\nfrom rdkit import Chem\nfrom lib.node_classification_model.models import RGCNNTrClassifier\nfrom lib.dataset.build_dgl_graph import get_bonds, get_nodes\nfrom lib.dataset.torch_dataset import Dataset\nfrom lib.general_utils import convert\nfrom lib.draw_utils import get_molecule_svg\nfrom lib.dataset.build_dataset import build_dataset\nfrom rdkit.Chem import rdDepictor\n\n\ndef infer(smiles, device='cpu'):\n with open('../experiments/MT_EGTBF_demo.yml', 'r') as ymlfile:\n config = yaml.load(ymlfile, Loader=yaml.FullLoader)\n state_dict = torch.load('../data/models/model_50_demo.pth', map_location=device)\n model_cfg = convert(config[\"model\"])\n data_cfg = convert(config[\"dataset\"])\n paths = convert(config[\"paths\"])\n meta = pickle.load(open(paths.dataset_path + '/meta.pkl', 'rb'))\n\n node2label = get_nodes(meta['node'], n_molecule_level=data_cfg.n_molecule_level,\n n_reaction_level=data_cfg.n_reaction_level)\n bond2label = get_bonds(meta['type'], n_molecule_level=data_cfg.n_molecule_level,\n n_reaction_level=data_cfg.n_reaction_level,\n self_bond=data_cfg.self_bond)\n num_rels = len(bond2label)\n pad_length = data_cfg.max_num_atoms + 15 * data_cfg.n_molecule_level + \\\n data_cfg.n_molecule_level * data_cfg.n_reaction_level\n num_nodes = pad_length\n\n model = RGCNNTrClassifier([len(node2label)] + data_cfg.feature_sizes,\n num_nodes,\n 1,\n [model_cfg.n_hidden] + [model_cfg.feature_embed_size] * len(data_cfg.feature_sizes),\n num_rels,\n model_cfg.num_conv_layers,\n model_cfg.num_trans_layers,\n model_cfg.num_fcn_layers,\n model_cfg.num_attention_heads,\n model_cfg.num_model_heads,\n )\n model = model.to(device)\n model.load_state_dict(state_dict)\n\n df = pd.DataFrame([smiles + '>>CC'], columns=['smarts'])\n dataset = build_dataset(df, atom_labels='False')\n length = len(dataset[0]['reactants']['nodes'])\n dataset[0]['reactants']['features'][-1] += 5\n print(dataset[0])\n dataset = Dataset(dataset, device=device, pad_length=pad_length,\n bond2label=bond2label, node2label=node2label, feature_idxs=data_cfg.feature_idxs,\n target_main_product=False, target_center=False,\n n_molecule_level=data_cfg.n_molecule_level, n_reaction_level=data_cfg.n_reaction_level)\n g = dataset[0]\n sigmoid = nn.Sigmoid()\n model.eval()\n with torch.no_grad():\n outputs = model(g)\n predicted_mp = (sigmoid(outputs[0]) > .5).float().cpu().detach().numpy()[0]\n predicted_c = (sigmoid(outputs[1]) > .5).float().cpu().detach().numpy()[0]\n predicted_c[predicted_mp == 0] = 0\n predicted = predicted_mp + predicted_c\n\n fontsize = 0.98\n gt_colors = {1: (0.8, 1, 0.8), 2: (0.5, 0.8, 1)}\n r_mol = Chem.MolFromSmiles(smiles.split('>')[0])\n rdDepictor.Compute2DCoords(r_mol)\n r_svg = get_molecule_svg(r_mol, target=predicted[:length], target_type='GT',\n gt_colors=gt_colors, dpa=100, fontsize=fontsize)\n return r_svg\n" ]
[ [ "torch.nn.Sigmoid", "torch.no_grad", "pandas.DataFrame", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
bskp/PlannedHoliday
[ "79df5bd392b2b0899893af9f5b095cd1739686a2" ]
[ "eq.py" ]
[ "import math\n\nimport numpy as np\nimport sounddevice as sd\nfrom lib import *\n\nscreen = SevenBySeven()\nsamplerate = sd.query_devices(None, 'input')['default_samplerate']\n\nlow, high = (100, 4000)\nbins = 7 \nblock_duration = 20 #ms\n\ndelta_f = (high - low) / (bins - 1)\nfftsize = math.ceil(samplerate / delta_f)\nlow_bin = math.floor(low / delta_f)\n\nnormalization_tau = 4 # seconds\nd = block_duration/normalization_tau/1000\n\navgs = np.ones(7)/100 # initial values\n\ncolors = 'black', 'blue', 'cyan', 'yellow', 'red'\n\ndef callback(indata, frames, time, status):\n global avgs\n\n if any(indata):\n\n ramp = np.linspace(0.0, 1.0, 7)\n #ramp = np.sqrt(ramp)\n ramp = ramp - 1\n ref = ramp.reshape((7,1)).repeat(7, axis=1)\n\n magnitude = np.abs(np.fft.rfft(indata[:, 0], n=fftsize))\n\n vals = magnitude[low_bin:low_bin + bins] / fftsize\n avgs = avgs*(1 - d) + d*vals\n normalized = vals/avgs*0.25\n\n i = gradient_map(ref + normalized, colors)\n old = decay(screen.current, 0.1)\n i = mix(old, i)\n\n for ch in avgs:\n print(\"%4.0f \" % (ch*1000), end='')\n print()\n\n screen.show(i)\n\n\n #flatten out averages\n #avgs = np.ones(7)*np.average(avgs)\n\n \n else:\n print('no input')\n\nwith sd.InputStream(device=None, channels=1, callback=callback,\n blocksize=int(samplerate * block_duration / 1000),\n samplerate=samplerate):\n while True:\n response = input()\n if response in ('', 'q', 'Q'):\n break\n for ch in response:\n if ch == '+':\n gain *= 2\n elif ch == '-':\n gain /= 2" ]
[ [ "numpy.fft.rfft", "numpy.linspace", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rnitin/python-ar_drone2
[ "64230f0fc39d42d86924a674512fd57796439111" ]
[ "libardrone/libardrone.py" ]
[ "# Copyright (c) 2011 Bastian Venthur\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n\"\"\"\nPython library for the AR.Drone.\n\nV.1 This module was tested with Python 2.6.6 and AR.Drone vanilla firmware 1.5.1.\nV.2.alpha\n\"\"\"\n\n# Thx to Bastian Venthur and jbpassot\n# https://github.com/venthur\n# https://github.com/jbpassot\n\nimport logging\nimport socket\nimport struct\nimport sys\nimport threading\nimport multiprocessing\n\nimport arnetwork\n\nimport time\nimport numpy as np\nfrom mutex import mutex\n\nARDRONE_NAVDATA_PORT = 5554\nARDRONE_VIDEO_PORT = 5555\nARDRONE_COMMAND_PORT = 5556\nARDRONE_CONTROL_PORT = 5559\n\nSESSION_ID = \"943dac23\"\nUSER_ID = \"36355d78\"\nAPP_ID = \"21d958e4\"\n\nDEBUG = True\n\n\nclass ARDrone(object):\n \"\"\"ARDrone Class.\n\n Instanciate this class to control your drone and receive decoded video and\n navdata.\n Possible value for video codec (drone2):\n NULL_CODEC = 0,\n UVLC_CODEC = 0x20, // codec_type value is used for START_CODE\n P264_CODEC = 0x40,\n MP4_360P_CODEC = 0x80,\n H264_360P_CODEC = 0x81,\n MP4_360P_H264_720P_CODEC = 0x82,\n H264_720P_CODEC = 0x83,\n MP4_360P_SLRS_CODEC = 0x84,\n H264_360P_SLRS_CODEC = 0x85,\n H264_720P_SLRS_CODEC = 0x86,\n H264_AUTO_RESIZE_CODEC = 0x87, // resolution is automatically adjusted according to bitrate\n MP4_360P_H264_360P_CODEC = 0x88,\n \"\"\"\n\n def __init__(self, is_ar_drone_2=True, hd=False):\n\n self.seq_nr = 1\n self.timer_t = 0.2\n self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)\n self.lock = threading.Lock()\n self.speed = 0.2\n\n self.image_shape = (720, 1080, 1)\n\n time.sleep(0.2)\n self.config_ids_string = [SESSION_ID, USER_ID, APP_ID]\n self.configure_multisession(SESSION_ID, USER_ID, APP_ID, self.config_ids_string)\n self.set_session_id (self.config_ids_string, SESSION_ID)\n time.sleep(0.2)\n self.set_profile_id(self.config_ids_string, USER_ID)\n time.sleep(0.2)\n self.set_app_id(self.config_ids_string, APP_ID)\n time.sleep(0.2)\n self.set_video_bitrate_control_mode(self.config_ids_string, \"1\")\n time.sleep(0.2)\n self.set_video_bitrate(self.config_ids_string, \"500\")\n time.sleep(0.2)\n self.set_max_bitrate(self.config_ids_string, \"500\")\n time.sleep(0.2)\n self.set_fps(self.config_ids_string, \"30\")\n time.sleep(0.2)\n\n self.set_video_codec(self.config_ids_string, 0x80)\n\n self.last_command_is_hovering = True\n self.com_pipe, com_pipe_other = multiprocessing.Pipe()\n\n self.navdata = dict()\n self.navdata[0] = dict(zip(['ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n\n self.network_process = arnetwork.ARDroneNetworkProcess(com_pipe_other, is_ar_drone_2, self)\n self.network_process.start()\n\n self.image = np.zeros(self.image_shape, np.uint8)\n self.time = 0\n\n self.last_command_is_hovering = True\n\n time.sleep(1.0)\n\n self.at(at_config_ids , self.config_ids_string)\n\n \n\n\n def takeoff(self):\n \"\"\"Make the drone takeoff.\"\"\"\n self.at(at_ftrim)\n self.at(at_config, \"control:altitude_max\", \"20000\")\n self.at(at_ref, True)\n\n def land(self):\n \"\"\"Make the drone land.\"\"\"\n self.at(at_ref, False)\n\n def hover(self):\n \"\"\"Make the drone hover.\"\"\"\n self.at(at_pcmd, False, 0, 0, 0, 0)\n\n def move_left(self):\n \"\"\"Make the drone move left.\"\"\"\n self.at(at_pcmd, True, -self.speed, 0, 0, 0)\n\n def move_right(self):\n \"\"\"Make the drone move right.\"\"\"\n self.at(at_pcmd, True, self.speed, 0, 0, 0)\n\n def move_up(self):\n \"\"\"Make the drone rise upwards.\"\"\"\n self.at(at_pcmd, True, 0, 0, self.speed, 0)\n\n def move_down(self):\n \"\"\"Make the drone decent downwards.\"\"\"\n self.at(at_pcmd, True, 0, 0, -self.speed, 0)\n\n def move_forward(self):\n \"\"\"Make the drone move forward.\"\"\"\n self.at(at_pcmd, True, 0, -self.speed, 0, 0)\n\n def move_backward(self):\n \"\"\"Make the drone move backwards.\"\"\"\n self.at(at_pcmd, True, 0, self.speed, 0, 0)\n\n def turn_left(self):\n \"\"\"Make the drone rotate left.\"\"\"\n self.at(at_pcmd, True, 0, 0, 0, -self.speed)\n\n def turn_right(self):\n \"\"\"Make the drone rotate right.\"\"\"\n self.at(at_pcmd, True, 0, 0, 0, self.speed)\n\n def reset(self):\n \"\"\"Toggle the drone's emergency state.\"\"\"\n self.at(at_ftrim)\n time.sleep(0.1)\n self.at(at_ref, False, True)\n time.sleep(0.1)\n self.at(at_ref, False, False)\n \n\n def trim(self):\n \"\"\"Flat trim the drone.\"\"\"\n self.at(at_ftrim)\n\n def set_speed(self, speed):\n \"\"\"Set the drone's speed.\n\n Valid values are floats from [0..1]\n \"\"\"\n self.speed = speed\n \n def event_boom(self):\n \"\"\"Boom event\"\"\"\n self.at(at_led, 13,2,4)\n self.at(at_anim, 3, 1000)\n \n def event_turnarround(self):\n \"\"\"Make the drone turnarround.\"\"\"\n self.at(at_led, 13,2,4)\n self.at(at_anim, 6, 5000)\n \n def event_yawshake(self):\n \"\"\"Make the drone execute yawshake YEAH !\"\"\"\n self.at(at_led, 13,2,4)\n self.at(at_anim, 8, 2000)\n \n def event_yawdance(self):\n \"\"\"Make the drone execute yawdance YEAH !\"\"\"\n self.at(at_led, 13,2,4)\n self.at(at_anim, 9, 5000)\n \n def event_thetamixed(self):\n \"\"\"Make the drone execute thetamixed !\"\"\"\n self.at(at_led, 13,2,4)\n self.at(at_anim, 14, 5000)\n\n\n\n def at(self, cmd, *args, **kwargs):\n \"\"\"Wrapper for the low level at commands.\n\n This method takes care that the sequence number is increased after each\n at command and the watchdog timer is started to make sure the drone\n receives a command at least every second.\n \"\"\"\n self.lock.acquire()\n self.com_watchdog_timer.cancel()\n cmd(self.seq_nr, *args, **kwargs)\n self.seq_nr += 1\n self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)\n self.com_watchdog_timer.start()\n self.lock.release()\n\n def configure_multisession(self, session_id, user_id, app_id, config_ids_string):\n self.at(at_config, \"custom:session_id\", session_id)\n self.at(at_config, \"custom:profile_id\", user_id)\n self.at(at_config, \"custom:application_id\", app_id)\n\n def set_session_id (self, config_ids_string, session_id):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"custom:session_id\", session_id)\n\n def set_profile_id (self, config_ids_string, profile_id):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"custom:profile_id\", profile_id)\n\n def set_app_id (self, config_ids_string, app_id):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"custom:application_id\", app_id)\n\n def set_video_bitrate_control_mode (self, config_ids_string, mode):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"video:bitrate_control_mode\", mode)\n\n def set_video_bitrate (self, config_ids_string, bitrate):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"video:bitrate\", bitrate)\n\n def set_max_bitrate(self, config_ids_string, max_bitrate):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"video:max_bitrate\", max_bitrate)\n\n def set_fps (self, config_ids_string, fps):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"video:codec_fps\", fps)\n\n def set_video_codec (self, config_ids_string, codec):\n self.at(at_config_ids , config_ids_string)\n self.at(at_config, \"video:video_codec\", codec)\n\n def commwdg(self):\n \"\"\"Communication watchdog signal.\n\n This needs to be send regulary to keep the communication w/ the drone\n alive.\n \"\"\"\n self.at(at_comwdg)\n\n def halt(self):\n \"\"\"Shutdown the drone.\n\n This method does not land or halt the actual drone, but the\n communication with the drone. You should call it at the end of your\n application to close all sockets, pipes, processes and threads related\n with this object.\n \"\"\"\n self.lock.acquire()\n self.com_watchdog_timer.cancel()\n self.com_pipe.send('die!')\n # self.network_process.terminate()\n self.network_process.join()\n #self.ipc_thread.stop()\n #self.ipc_thread.join()\n self.lock.release()\n\n def get_image(self):\n _im = np.copy(self.image)\n return _im\n\n def get_navdata(self):\n return self.navdata\n\n def set_navdata(self, navdata):\n self.navdata = navdata\n self.get_navdata()\n\n def set_image(self, image):\n if (image.shape == self.image_shape):\n self.image = image\n self.image = image\n\n def apply_command(self, command):\n available_commands = [\"emergency\",\n \"land\", \"takeoff\", \"move_left\", \"move_right\", \"move_down\", \"move_up\",\n \"move_backward\", \"move_forward\", \"turn_left\", \"turn_right\", \"hover\"]\n if command not in available_commands:\n logging.error(\"Command %s is not a recognized command\" % command)\n\n if command != \"hover\":\n self.last_command_is_hovering = False\n\n if (command == \"emergency\"):\n self.reset()\n elif (command == \"land\"):\n self.land()\n self.last_command_is_hovering = True\n elif (command == \"takeoff\"):\n self.takeoff()\n self.last_command_is_hovering = True\n elif (command == \"move_left\"):\n self.move_left()\n elif (command == \"move_right\"):\n self.move_right()\n elif (command == \"move_down\"):\n self.move_down()\n elif (command == \"move_up\"):\n self.move_up()\n elif (command == \"move_backward\"):\n self.move_backward()\n elif (command == \"move_forward\"):\n self.move_forward()\n elif (command == \"turn_left\"):\n self.turn_left()\n elif (command == \"turn_right\"):\n self.turn_right()\n elif (command == \"hover\" and not self.last_command_is_hovering):\n self.hover()\n self.last_command_is_hovering = True\n\nclass ARDrone2(ARDrone):\n def __init__(self, hd=False):\n ARDrone.__init__(self, True, hd)\n\n###############################################################################\n### Low level AT Commands\n###############################################################################\n\ndef at_ref(seq, takeoff, emergency=False):\n \"\"\"\n Basic behaviour of the drone: take-off/landing, emergency stop/reset)\n\n Parameters:\n seq -- sequence number\n takeoff -- True: Takeoff / False: Land\n emergency -- True: Turn off the engines\n \"\"\"\n p = 0b10001010101000000000000000000\n if takeoff:\n p += 0b1000000000\n if emergency:\n p += 0b0100000000\n at(\"REF\", seq, [p])\n\ndef at_pcmd(seq, progressive, lr, fb, vv, va):\n \"\"\"\n Makes the drone move (translate/rotate).\n\n Parameters:\n seq -- sequence number\n progressive -- True: enable progressive commands, False: disable (i.e.\n enable hovering mode)\n lr -- left-right tilt: float [-1..1] negative: left, positive: right\n rb -- front-back tilt: float [-1..1] negative: forwards, positive:\n backwards\n vv -- vertical speed: float [-1..1] negative: go down, positive: rise\n va -- angular speed: float [-1..1] negative: spin left, positive: spin\n right\n\n The above float values are a percentage of the maximum speed.\n \"\"\"\n p = 1 if progressive else 0\n at(\"PCMD\", seq, [p, float(lr), float(fb), float(vv), float(va)])\n\ndef at_ftrim(seq):\n \"\"\"\n Tell the drone it's lying horizontally.\n\n Parameters:\n seq -- sequence number\n \"\"\"\n at(\"FTRIM\", seq, [])\n\ndef at_zap(seq, stream):\n \"\"\"\n Selects which video stream to send on the video UDP port.\n\n Parameters:\n seq -- sequence number\n stream -- Integer: video stream to broadcast\n \"\"\"\n # FIXME: improve parameters to select the modes directly\n at(\"ZAP\", seq, [stream])\n\ndef at_config(seq, option, value):\n \"\"\"Set configuration parameters of the drone.\"\"\"\n at(\"CONFIG\", seq, [str(option), str(value)])\n\ndef at_config_ids(seq, value):\n \"\"\"Set configuration parameters of the drone.\"\"\"\n at(\"CONFIG_IDS\", seq, value)\n\ndef at_ctrl(seq, num):\n \"\"\"Ask the parrot to drop its configuration file\"\"\"\n at(\"CTRL\", seq, [num, 0])\n\ndef at_comwdg(seq):\n \"\"\"\n Reset communication watchdog.\n \"\"\"\n # FIXME: no sequence number\n at(\"COMWDG\", seq, [])\n\ndef at_aflight(seq, flag):\n \"\"\"\n Makes the drone fly autonomously.\n\n Parameters:\n seq -- sequence number\n flag -- Integer: 1: start flight, 0: stop flight\n \"\"\"\n at(\"AFLIGHT\", seq, [flag])\n\ndef at_pwm(seq, m1, m2, m3, m4):\n \"\"\"\n Sends control values directly to the engines, overriding control loops.\n\n Parameters:\n seq -- sequence number\n m1 -- front left command\n m2 -- fright right command\n m3 -- back right command\n m4 -- back left command\n \"\"\"\n # FIXME: what type do mx have?\n raise NotImplementedError()\n\ndef at_led(seq, anim, f, d):\n \"\"\"\n Control the drones LED.\n\n Parameters:\n seq -- sequence number\n anim -- Integer: animation to play\n f -- ?: frequence in HZ of the animation\n d -- Integer: total duration in seconds of the animation\n \"\"\"\n at(\"LED\", seq, [anim, float(f), d]) \n\ndef at_anim(seq, anim, d):\n \"\"\"\n Makes the drone execute a predefined movement (animation).\n\n Parameters:\n seq -- sequcence number\n anim -- Integer: animation to play\n d -- Integer: total duration in sections of the animation\n \"\"\"\n at(\"ANIM\", seq, [anim, d])\n\ndef at(command, seq, params):\n \"\"\"\n Parameters:\n command -- the command\n seq -- the sequence number\n params -- a list of elements which can be either int, float or string\n \"\"\"\n param_str = ''\n for p in params:\n if type(p) == int:\n param_str += \",%d\" % p\n elif type(p) == float:\n param_str += \",%d\" % f2i(p)\n elif type(p) == str:\n param_str += ',\"' + p + '\"'\n msg = \"AT*%s=%i%s\\r\" % (command, seq, param_str)\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(msg, (\"192.168.1.1\", ARDRONE_COMMAND_PORT))\n\ndef f2i(f):\n \"\"\"Interpret IEEE-754 floating-point value as signed integer.\n\n Arguments:\n f -- floating point value\n \"\"\"\n return struct.unpack('i', struct.pack('f', f))[0]\n\n###############################################################################\n### navdata\n###############################################################################\ndef decode_navdata(packet):\n \"\"\"Decode a navdata packet.\"\"\"\n offset = 0\n _ = struct.unpack_from(\"IIII\", packet, offset)\n drone_state = dict()\n drone_state['fly_mask'] = _[1] & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying\n drone_state['video_mask'] = _[1] >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable\n drone_state['vision_mask'] = _[1] >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable */\n drone_state['control_mask'] = _[1] >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control */\n drone_state['altitude_mask'] = _[1] >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active */\n drone_state['user_feedback_start'] = _[1] >> 5 & 1 # USER feedback : Start button state */\n drone_state['command_mask'] = _[1] >> 6 & 1 # Control command ACK : (0) None, (1) one received */\n drone_state['fw_file_mask'] = _[1] >> 7 & 1 # Firmware file is good (1) */\n drone_state['fw_ver_mask'] = _[1] >> 8 & 1 # Firmware update is newer (1) */\n drone_state['fw_upd_mask'] = _[1] >> 9 & 1 # Firmware update is ongoing (1) */\n drone_state['navdata_demo_mask'] = _[1] >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo */\n drone_state['navdata_bootstrap'] = _[1] >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent */\n drone_state['motors_mask'] = _[1] >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem */\n drone_state['com_lost_mask'] = _[1] >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok */\n drone_state['vbat_low'] = _[1] >> 15 & 1 # VBat low : (1) too low, (0) Ok */\n drone_state['user_el'] = _[1] >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF*/\n drone_state['timer_elapsed'] = _[1] >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed */\n drone_state['angles_out_of_range'] = _[1] >> 19 & 1 # Angles : (0) Ok, (1) out of range */\n drone_state['ultrasound_mask'] = _[1] >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf */\n drone_state['cutout_mask'] = _[1] >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected */\n drone_state['pic_version_mask'] = _[1] >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK */\n drone_state['atcodec_thread_on'] = _[1] >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON */\n drone_state['navdata_thread_on'] = _[1] >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON */\n drone_state['video_thread_on'] = _[1] >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON */\n drone_state['acq_thread_on'] = _[1] >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON */\n drone_state['ctrl_watchdog_mask'] = _[1] >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled */\n drone_state['adc_watchdog_mask'] = _[1] >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good */\n drone_state['com_watchdog_mask'] = _[1] >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok */\n drone_state['emergency_mask'] = _[1] >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency */\n data = dict()\n data['drone_state'] = drone_state\n data['header'] = _[0]\n data['seq_nr'] = _[2]\n data['vision_flag'] = _[3]\n offset += struct.calcsize(\"IIII\")\n has_flying_information = False\n while 1:\n try:\n id_nr, size = struct.unpack_from(\"HH\", packet, offset)\n offset += struct.calcsize(\"HH\")\n except struct.error:\n break\n values = []\n for i in range(size - struct.calcsize(\"HH\")):\n values.append(struct.unpack_from(\"c\", packet, offset)[0])\n offset += struct.calcsize(\"c\")\n # navdata_tag_t in navdata-common.h\n if id_nr == 0:\n has_flying_information = True\n values = struct.unpack_from(\"IIfffifffI\", \"\".join(values))\n values = dict(zip(['ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames'], values))\n # convert the millidegrees into degrees and round to int, as they\n # are not so precise anyways\n for i in 'theta', 'phi', 'psi':\n values[i] = int(values[i] / 1000)\n data[id_nr] = values\n return data, has_flying_information\n" ]
[ [ "numpy.copy", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lianapanatau/Snippext_public
[ "a8829802d47678d6f513dde08391aeb0d4a8e37f" ]
[ "snippext/dataset.py" ]
[ "import random\n\nimport jsonlines\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils import data\nfrom transformers import BertTokenizer\n\nfrom .augment import Augmenter\n\ntokenizer = None\n\n\ndef get_tokenizer():\n \"\"\"Return the tokenizer. Intiailize it if not initialized.\n\n Args:\n lm (string, optional): the name of the language model\n (bert, albert, roberta, distilbert, etc.)\n\n Returns:\n Tokenizer: the tokenizer to be used\n \"\"\"\n global tokenizer\n if tokenizer is None:\n tokenizer = BertTokenizer.from_pretrained(\"readerbench/RoBERT-small\")\n # \"/Users/liana/Documents/Projects/master/ssl/solutions/fork/BERT-for-RRC-ABSA/pytorch-pretrained-bert/pt_model/bert\"\n return tokenizer\n\n\nclass SnippextDataset(data.Dataset):\n def __init__(self,\n source,\n vocab,\n taskname,\n max_len=512,\n lm='bert',\n augment_index=None,\n augment_op=None,\n size=None):\n # tokens and tags\n sents, tags_li = [], [] # list of lists\n self.max_len = max_len\n get_tokenizer()\n\n if type(source) is str:\n sents, tags_li = self.read_tagging_file(source, taskname)\n if size is not None:\n sents, tags_li = sents[:size], tags_li[:size]\n else:\n # read from list of tokens (for prediction)\n if '_tagging' in taskname or '_qa' in taskname:\n for tokens in source:\n sents.append([\"[CLS]\"] + [token for token in tokens] + [\"[SEP]\"])\n tags_li.append([\"<PAD>\"] + ['O' for token in tokens] + [\"<PAD>\"])\n else:\n for sent in source:\n sents.append(sent)\n tags_li.append(vocab[0])\n\n # assign class variables\n self.sents, self.tags_li = sents, tags_li\n self.vocab = vocab\n\n # add special tags for tagging\n if '_tagging' in taskname:\n if 'O' not in self.vocab:\n self.vocab.append('O')\n if self.vocab[0] != '<PAD>':\n self.vocab.insert(0, '<PAD>')\n\n # index for tags/labels\n self.tag2idx = {tag: idx for idx, tag in enumerate(self.vocab)}\n self.idx2tag = {idx: tag for idx, tag in enumerate(self.vocab)}\n self.taskname = taskname\n\n # augmentation index and op\n self.augment_op = augment_op\n if augment_op == 't5':\n None\n # self.load_t5_examples(source)\n elif augment_index != None:\n self.augmenter = Augmenter(augment_index)\n else:\n self.augmenter = None\n self.augment_op = None\n\n def print_size(self):\n None\n # print(\"Sents size \", str(len(self.sents)))\n # print(\"Tags size \", str(len(self.tags_li)))\n\n # def load_t5_examples(self, source):\n # self.augmenter = None\n # # read augmented examples\n # self.augmented_examples = []\n # if '_tagging' in self.taskname:\n # with jsonlines.open(source + '.augment.jsonl', mode='r') as reader:\n # for row in reader:\n # exms = []\n # for entry in row['augment']:\n # tokens, labels = self.read_tagging_file(entry, is_file=False)\n # exms.append((tokens[0], labels[0]))\n # self.augmented_examples.append(exms)\n # else:\n # with jsonlines.open(source + '.augment.jsonl', mode='r') as reader:\n # for row in reader:\n # exms = []\n # label = row['label']\n # for entry in row['augment']:\n # sent = ' [SEP] '.join(entry.split('\\t'))\n # exms.append((sent, label))\n # self.augmented_examples.append(exms)\n\n def read_tagging_file(self, path, task_name):\n \"\"\"Read a train/eval classification/tagging dataset from file\n\n The input file should contain multiple lines where each line is an example.\n The format of each line:\n The room is clean.\\tpositive\n\n Args:\n path (str): the path to the dataset file\n\n Returns:\n list of str: the input sequences\n list of str: the labels\n\n The input file should contain multiple entries separated by empty lines.\n The format of each entry:\n\n The O\n room B-AS\n is O\n very B-OP\n clean I-OP\n . O\n\n Args:\n path (str): the path to the dataset file\n\n Returns:\n list of list of str: the tokens\n list of list of str: the labels\n \"\"\"\n sents, tags_li = [], []\n if '_tagging' in task_name:\n df = pd.read_json(path, orient='index')\n for index, row in df.iterrows():\n try:\n sents.append([\"[CLS]\"] + row['sentence'][:self.max_len] + [\"[SEP]\"])\n tags_li.append([\"<PAD>\"] + row['label'][:self.max_len] + [\"<PAD>\"])\n except:\n print('error @', index)\n else:\n df = pd.read_csv(path, names=['id', 'review', 'sentiment'])\n for index, row in df.iterrows():\n try:\n if row['review'] is not np.nan:\n sents.append(row['review'])\n if row['sentiment'] == 0:\n tags_li.append('negative')\n else:\n tags_li.append('positive')\n except:\n print('error @ @', index, row)\n return sents, tags_li\n\n def read_classification_file(self, path):\n \"\"\"Read a train/eval classification dataset from file\n\n The input file should contain multiple lines where each line is an example.\n The format of each line:\n The room is clean.\\troom\\tpositive\n\n Args:\n path (str): the path to the dataset file\n\n Returns:\n list of str: the input sequences\n list of str: the labels\n \"\"\"\n sents, labels = [], []\n lines = open(path).readlines()\n for line in lines:\n items = line.strip().split('\\t')\n # only consider sentence and sentence pairs\n if len(items) < 2 or len(items) > 3:\n continue\n try:\n if len(items) == 2:\n sents.append(items[0])\n labels.append(items[1])\n else:\n sents.append(items[0] + ' [SEP] ' + items[1])\n labels.append(items[2])\n except:\n print('error @', line.strip())\n return sents, labels\n\n def __len__(self):\n \"\"\"Return the length of the dataset\"\"\"\n return len(self.sents)\n\n def get(self, idx, op=[]):\n ag = self.augmenter\n self.augmenter = None\n item = self.__getitem__(idx)\n self.augmenter = ag\n return item\n\n def __getitem__(self, idx):\n \"\"\"Return the ith item of in the dataset.\n\n Args:\n idx (int): the element index\n Returns (TODO):\n words, x, is_heads, tags, mask, y, seqlen, self.taskname\n \"\"\"\n # print(\"Augument element at \", idx)\n words, tags = self.sents[idx], self.tags_li[idx]\n\n if '_tagging' in self.taskname:\n # apply data augmentation if specified\n if self.augment_op == 't5':\n if len(self.augmented_examples[idx]) > 0:\n words, tags = random.choice(self.augmented_examples[idx])\n elif self.augmenter != None:\n words, tags = self.augmenter.augment(words, tags, self.augment_op)\n\n # We give credits only to the first piece.\n x, y = [], [] # list of ids\n is_heads = [] # list. 1: the token is the first piece of a word\n\n for w, t in zip(words, tags):\n # avoid bad tokens\n w = w[:50]\n tokens = tokenizer.tokenize(w) if w not in (\"[CLS]\", \"[SEP]\") else [w]\n bert_encodings_for_words = tokenizer.convert_tokens_to_ids(tokens)\n if len(bert_encodings_for_words) == 0:\n continue\n\n is_head = [1] + [0] * (len(tokens) - 1)\n\n t = [t] + [\"<PAD>\"] * (len(tokens) - 1) # <PAD>: no decision\n encodings_for_tags = [self.tag2idx[each] for each in t] # (T,)\n\n x.extend(bert_encodings_for_words)\n is_heads.extend(is_head)\n y.extend(encodings_for_tags)\n # make sure that the length of x is not too large\n if len(x) > self.max_len:\n break\n\n assert len(x) == len(y) == len(is_heads), \\\n f\"len(x)={len(x)}, len(y)={len(y)}, len(is_heads)={len(is_heads)}, {' '.join(tokens)}\"\n\n # seqlen\n seqlen = len(y)\n\n mask = [1] * seqlen\n # masking for QA\n for i, t in enumerate(tags):\n if t != '<PAD>':\n break\n mask[i] = 0\n\n # to string\n words = \" \".join(words)\n tags = \" \".join(tags)\n else: # classification\n if self.augmenter != None:\n words = self.augmenter.augment_sent(words, self.augment_op)\n\n if ' [SEP] ' in words:\n sent_a, sent_b = words.split(' [SEP] ')\n else:\n sent_a, sent_b = words, None\n\n x = tokenizer.encode(sent_a, text_pair=sent_b,\n truncation=\"longest_first\",\n max_length=self.max_len,\n add_special_tokens=True)\n\n y = self.tag2idx[tags] # label\n is_heads = [1] * len(x)\n mask = [1] * len(x)\n\n assert len(x) == len(mask) == len(is_heads), \\\n f\"len(x)={len(x)}, len(y)={len(y)}, len(is_heads)={len(is_heads)}\"\n # seqlen\n seqlen = len(mask)\n\n return words, x, is_heads, tags, mask, y, seqlen, self.taskname\n\n @staticmethod\n def pad(batch):\n '''Pads to the longest sample\n\n Args:\n batch:\n\n Returns (TODO):\n return words, f(x), is_heads, tags, f(mask), f(y), seqlens, name\n '''\n f = lambda x: [sample[x] for sample in batch]\n g = lambda x, seqlen, val: \\\n [sample[x] + [val] * (seqlen - len(sample[x])) \\\n for sample in batch] # 0: <pad>\n\n # get maximal sequence length\n seqlens = f(6)\n maxlen = np.array(seqlens).max()\n # get task name\n name = f(7)\n\n words = f(0)\n x = g(1, maxlen, 0)\n is_heads = f(2)\n tags = f(3)\n mask = g(4, maxlen, 1)\n if '_tagging' in name[0]:\n y = g(5, maxlen, 0)\n else:\n y = f(5)\n\n f = torch.LongTensor\n if isinstance(y[0], float):\n y = torch.Tensor(y)\n else:\n y = torch.LongTensor(y)\n return words, f(x), is_heads, tags, f(mask), y, seqlens, name\n" ]
[ [ "torch.LongTensor", "pandas.read_csv", "torch.Tensor", "pandas.read_json", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
Hrafnir/poptimizer
[ "16bc9e056a6daa452d48cdac0dea5901e4a3d4a1" ]
[ "poptimizer/portfolio/tests/test_metrics.py" ]
[ "from types import SimpleNamespace\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom poptimizer.portfolio import metrics, portfolio\nfrom poptimizer.portfolio.portfolio import CASH, PORTFOLIO\n\n\[email protected](scope=\"module\", name=\"single\")\ndef make_metrics():\n positions = dict(BSPB=4890, FESH=1300, KZOS=5080)\n port = portfolio.Portfolio(\"2020-05-14\", 84449, positions)\n mean = pd.Series([0.09, 0.06, 0.07], index=list(positions))\n cov = np.array([[0.04, 0.005, 0.01], [0.005, 0.0625, 0.00625], [0.01, 0.00625, 0.0625]])\n fake_forecast = SimpleNamespace()\n fake_forecast.mean = mean\n fake_forecast.cov = cov\n # noinspection PyTypeChecker\n yield metrics.MetricsSingle(port, fake_forecast)\n\n\nclass TestMetricsSingle:\n def test_mean(self, single):\n mean = single.mean\n assert isinstance(mean, pd.Series)\n assert mean.name == \"MEAN\"\n assert len(mean) == 5\n assert mean[\"BSPB\"] == 0.09\n assert mean[\"FESH\"] == 0.06\n assert mean[\"KZOS\"] == 0.07\n assert mean[CASH] == 0.0\n assert mean[PORTFOLIO] == pytest.approx(0.0671295513194378)\n\n def test_std(self, single):\n std = single.std\n assert isinstance(std, pd.Series)\n assert std.name == \"STD\"\n assert len(std) == 5\n assert std[\"BSPB\"] == 0.20\n assert std[\"FESH\"] == 0.25\n assert std[\"KZOS\"] == 0.25\n assert std[CASH] == 0.0\n assert std[PORTFOLIO] == pytest.approx(0.171832239704213)\n\n def test_beta(self, single):\n beta = single.beta\n assert isinstance(beta, pd.Series)\n assert beta.name == \"BETA\"\n assert len(beta) == 5\n assert beta[\"BSPB\"] == pytest.approx(0.564325931057505)\n assert beta[\"FESH\"] == pytest.approx(0.197707113104551)\n assert beta[\"KZOS\"] == pytest.approx(1.3876207989677)\n assert beta[CASH] == 0.0\n assert beta[PORTFOLIO] == 1.0\n\n def test_r_geom(self, single):\n r_geom = single.r_geom\n assert isinstance(r_geom, pd.Series)\n assert r_geom.name == \"R_GEOM\"\n assert len(r_geom) == 5\n assert r_geom[\"BSPB\"] == pytest.approx(0.0881006920652409)\n assert r_geom[\"FESH\"] == pytest.approx(0.0689255960895227)\n assert r_geom[\"KZOS\"] == pytest.approx(0.0437918254921255)\n assert r_geom[CASH] == pytest.approx(0.0147631593008831)\n assert r_geom[PORTFOLIO] == pytest.approx(0.0523663920185547)\n\n assert r_geom[PORTFOLIO] == (single._portfolio.weight * r_geom).iloc[:-1].sum()\n\n def test_gradient(self, single):\n gradient = single.gradient\n assert isinstance(gradient, pd.Series)\n assert gradient.name == \"GRAD\"\n assert len(gradient) == 5\n assert gradient[\"BSPB\"] == pytest.approx(0.0357343000466862)\n assert gradient[\"FESH\"] == pytest.approx(0.016559204070968)\n assert gradient[\"KZOS\"] == pytest.approx(-0.00857456652642924)\n assert gradient[CASH] == pytest.approx(-0.0376032327176716)\n assert gradient[PORTFOLIO] == 0.0\n\n assert gradient[PORTFOLIO] == pytest.approx(\n (single._portfolio.weight * gradient).iloc[:-1].sum()\n )\n\n def test_str(self, single):\n assert \"КЛЮЧЕВЫЕ МЕТРИКИ ПОРТФЕЛЯ\" in str(single)\n\n\[email protected](scope=\"module\", name=\"resample\")\ndef make_resample():\n positions = dict(BSPB=4890, FESH=1300)\n port = portfolio.Portfolio(\"2020-05-14\", 84449, positions)\n\n mean1 = pd.Series([0.09, 0.06], index=list(positions))\n cov1 = np.array([[0.04, 0.005], [0.005, 0.0625]])\n\n mean2 = pd.Series([0.05, 0.09], index=list(positions))\n cov12 = np.array([[0.0225, 0.0042], [0.0042, 0.0196]])\n\n def fake_get_forecasts(*_):\n data = [\n SimpleNamespace(mean=mean1, cov=cov1, history_days=1, cor=0.4, shrinkage=0.3,),\n SimpleNamespace(mean=mean2, cov=cov12, history_days=2, cor=0.5, shrinkage=0.2,),\n ]\n yield from data\n\n saved_get_forecast = metrics.evolve.get_forecasts\n metrics.evolve.get_forecasts = fake_get_forecasts\n\n yield metrics.MetricsResample(port)\n\n metrics.evolve.get_forecasts = saved_get_forecast\n\n\nclass TestMetricsResample:\n def test_count(self, resample):\n assert resample.count == 2\n\n def test_mean(self, resample):\n mean = resample.mean\n assert isinstance(mean, pd.Series)\n assert mean.name == \"MEAN\"\n assert len(mean) == 4\n assert mean[\"BSPB\"] == 0.07\n assert mean[\"FESH\"] == 0.075\n assert mean[CASH] == 0.0\n assert mean[PORTFOLIO] == pytest.approx(0.0495010842956967)\n\n def test_std(self, resample):\n std = resample.std\n assert isinstance(std, pd.Series)\n assert std.name == \"STD\"\n assert len(std) == 4\n assert std[\"BSPB\"] == 0.175\n assert std[\"FESH\"] == 0.195\n assert std[CASH] == 0.0\n assert std[PORTFOLIO] == pytest.approx(0.119237329326756)\n\n def test_beta(self, resample):\n beta = resample.beta\n assert isinstance(beta, pd.Series)\n assert beta.name == \"BETA\"\n assert len(beta) == 4\n assert beta[\"BSPB\"] == pytest.approx(1.46588406985897)\n assert beta[\"FESH\"] == pytest.approx(0.302533282078987)\n assert beta[CASH] == 0.0\n assert beta[PORTFOLIO] == 1.0\n\n def test_r_geom(self, resample):\n r_geom = resample.r_geom\n assert isinstance(r_geom, pd.Series)\n assert r_geom.name == \"R_GEOM\"\n assert len(r_geom) == 4\n assert r_geom[\"BSPB\"] == pytest.approx(0.0559870972984637)\n assert r_geom[\"FESH\"] == pytest.approx(0.0779560420919237)\n assert r_geom[CASH] == pytest.approx(0.00725189173625379)\n assert r_geom[PORTFOLIO] == pytest.approx(0.0422491925594429)\n\n assert r_geom[PORTFOLIO] == pytest.approx((resample._portfolio.weight * r_geom).iloc[:-1].sum())\n\n def test_gradient(self, resample):\n gradient = resample.gradient\n assert isinstance(gradient, pd.Series)\n assert gradient.name == \"GRAD\"\n assert len(gradient) == 4\n assert gradient[\"BSPB\"] == pytest.approx(0.0137379047390208)\n assert gradient[\"FESH\"] == pytest.approx(0.0357068495324808)\n assert gradient[CASH] == pytest.approx(-0.0349973008231891)\n assert gradient[PORTFOLIO] == 0.0\n\n assert gradient[PORTFOLIO] == pytest.approx(\n (resample._portfolio.weight * gradient).iloc[:-1].sum()\n )\n\n def test_str(self, resample):\n assert \"КЛЮЧЕВЫЕ МЕТРИКИ ПОРТФЕЛЯ\" in str(resample)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
schreon/transformers
[ "9ac581e69fd5dd07a57afbac837a504713fd917a" ]
[ "examples/research_projects/seq2seq-distillation/utils.py" ]
[ "import itertools\nimport json\nimport linecache\nimport math\nimport os\nimport pickle\nimport socket\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Callable, Dict, Iterable, List, Tuple, Union\n\nimport git\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom rouge_score import rouge_scorer, scoring\nfrom sacrebleu import corpus_bleu\nfrom torch import nn\nfrom torch.utils.data import Dataset, Sampler\n\nfrom sentence_splitter import add_newline_to_end_of_each_sentence\nfrom transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer\nfrom transformers.file_utils import cached_property\nfrom transformers.models.bart.modeling_bart import shift_tokens_right\n\n\ntry:\n from fairseq.data.data_utils import batch_by_size\n\n FAIRSEQ_AVAILABLE = True\nexcept (ImportError, ModuleNotFoundError):\n FAIRSEQ_AVAILABLE = False\n\n\ndef label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100):\n \"\"\"From fairseq\"\"\"\n if target.hidden_size() == lprobs.hidden_size() - 1:\n target = target.unsqueeze(-1)\n nll_loss = -lprobs.gather(dim=-1, index=target)\n smooth_loss = -lprobs.sum(dim=-1, keepdim=True)\n if ignore_index is not None:\n pad_mask = target.eq(ignore_index)\n nll_loss.masked_fill_(pad_mask, 0.0)\n smooth_loss.masked_fill_(pad_mask, 0.0)\n else:\n nll_loss = nll_loss.squeeze(-1)\n smooth_loss = smooth_loss.squeeze(-1)\n\n nll_loss = nll_loss.sum() # mean()? Scared to break other math.\n smooth_loss = smooth_loss.sum()\n eps_i = epsilon / lprobs.size(-1)\n loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss\n return loss, nll_loss\n\n\ndef lmap(f: Callable, x: Iterable) -> List:\n \"\"\"list(map(f, x))\"\"\"\n return list(map(f, x))\n\n\ndef calculate_bleu(output_lns, refs_lns, **kwargs) -> dict:\n \"\"\"Uses sacrebleu's corpus_bleu implementation.\"\"\"\n return {\"bleu\": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)}\n\n\ndef build_compute_metrics_fn(task_name: str, tokenizer: PreTrainedTokenizer) -> Callable[[EvalPrediction], Dict]:\n def non_pad_len(tokens: np.ndarray) -> int:\n return np.count_nonzero(tokens != tokenizer.pad_token_id)\n\n def decode_pred(pred: EvalPrediction) -> Tuple[List[str], List[str]]:\n pred_str = tokenizer.batch_decode(pred.predictions, skip_special_tokens=True)\n label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)\n pred_str = lmap(str.strip, pred_str)\n label_str = lmap(str.strip, label_str)\n return pred_str, label_str\n\n def summarization_metrics(pred: EvalPrediction) -> Dict:\n pred_str, label_str = decode_pred(pred)\n rouge: Dict = calculate_rouge(pred_str, label_str)\n summ_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1)\n rouge.update({\"gen_len\": summ_len})\n return rouge\n\n def translation_metrics(pred: EvalPrediction) -> Dict:\n pred_str, label_str = decode_pred(pred)\n bleu: Dict = calculate_bleu(pred_str, label_str)\n gen_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1)\n bleu.update({\"gen_len\": gen_len})\n return bleu\n\n compute_metrics_fn = summarization_metrics if \"summarization\" in task_name else translation_metrics\n return compute_metrics_fn\n\n\ndef trim_batch(\n input_ids,\n pad_token_id,\n attention_mask=None,\n):\n \"\"\"Remove columns that are populated exclusively by pad_token_id\"\"\"\n keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)\n if attention_mask is None:\n return input_ids[:, keep_column_mask]\n else:\n return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])\n\n\nclass AbstractSeq2SeqDataset(Dataset):\n def __init__(\n self,\n tokenizer,\n data_dir,\n max_source_length,\n max_target_length,\n type_path=\"train\",\n n_obs=None,\n prefix=\"\",\n **dataset_kwargs\n ):\n super().__init__()\n self.src_file = Path(data_dir).joinpath(type_path + \".source\")\n self.tgt_file = Path(data_dir).joinpath(type_path + \".target\")\n self.len_file = Path(data_dir).joinpath(type_path + \".len\")\n if os.path.exists(self.len_file):\n self.src_lens = pickle_load(self.len_file)\n self.used_char_len = False\n else:\n self.src_lens = self.get_char_lens(self.src_file)\n self.used_char_len = True\n self.max_source_length = max_source_length\n self.max_target_length = max_target_length\n assert min(self.src_lens) > 0, f\"found empty line in {self.src_file}\"\n self.tokenizer = tokenizer\n self.prefix = prefix if prefix is not None else \"\"\n\n if n_obs is not None:\n self.src_lens = self.src_lens[:n_obs]\n self.pad_token_id = self.tokenizer.pad_token_id\n self.dataset_kwargs = dataset_kwargs\n dataset_kwargs.update({\"add_prefix_space\": True} if isinstance(self.tokenizer, BartTokenizer) else {})\n\n def __len__(self):\n return len(self.src_lens)\n\n @staticmethod\n def get_char_lens(data_file):\n return [len(x) for x in Path(data_file).open().readlines()]\n\n @cached_property\n def tgt_lens(self):\n \"\"\"Length in characters of target documents\"\"\"\n return self.get_char_lens(self.tgt_file)\n\n def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):\n if distributed:\n return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs)\n else:\n return SortishSampler(self.src_lens, batch_size, shuffle=shuffle)\n\n def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):\n assert FAIRSEQ_AVAILABLE, \"Dynamic batch size requires `pip install fairseq`\"\n assert not self.used_char_len, \"You must call python make_len_file.py before calling make_dynamic_sampler\"\n sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False))\n\n def num_tokens_in_example(i):\n return min(self.src_lens[i], self.max_target_length)\n\n # call fairseq cython function\n batch_sampler: List[List[int]] = batch_by_size(\n sorted_indices,\n num_tokens_fn=num_tokens_in_example,\n max_tokens=max_tokens_per_batch,\n required_batch_size_multiple=64,\n )\n shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))]\n # move the largest batch to the front to OOM quickly (uses an approximation for padding)\n approximate_toks_per_batch = [max(self.src_lens[i] for i in batch) * len(batch) for batch in shuffled_batches]\n largest_batch_idx = np.argmax(approximate_toks_per_batch)\n shuffled_batches[0], shuffled_batches[largest_batch_idx] = (\n shuffled_batches[largest_batch_idx],\n shuffled_batches[0],\n )\n return shuffled_batches\n\n def __getitem__(self, item):\n raise NotImplementedError(\"You must implement this\")\n\n def collate_fn(self, batch):\n raise NotImplementedError(\"You must implement this\")\n\n\nclass LegacySeq2SeqDataset(AbstractSeq2SeqDataset):\n def __getitem__(self, index) -> Dict[str, torch.Tensor]:\n \"\"\"Call tokenizer on src and tgt_lines\"\"\"\n index = index + 1 # linecache starts at 1\n source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip(\"\\n\")\n tgt_line = linecache.getline(str(self.tgt_file), index).rstrip(\"\\n\")\n assert source_line, f\"empty source line for index {index}\"\n assert tgt_line, f\"empty tgt line for index {index}\"\n source_inputs = self.encode_line(self.tokenizer, source_line, self.max_source_length)\n target_inputs = self.encode_line(self.tokenizer, tgt_line, self.max_target_length)\n\n source_ids = source_inputs[\"input_ids\"].squeeze()\n target_ids = target_inputs[\"input_ids\"].squeeze()\n src_mask = source_inputs[\"attention_mask\"].squeeze()\n return {\n \"input_ids\": source_ids,\n \"attention_mask\": src_mask,\n \"labels\": target_ids,\n }\n\n def encode_line(self, tokenizer, line, max_length, pad_to_max_length=True, return_tensors=\"pt\"):\n \"\"\"Only used by LegacyDataset\"\"\"\n return tokenizer(\n [line],\n max_length=max_length,\n padding=\"max_length\" if pad_to_max_length else None,\n truncation=True,\n return_tensors=return_tensors,\n **self.dataset_kwargs,\n )\n\n def collate_fn(self, batch) -> Dict[str, torch.Tensor]:\n input_ids = torch.stack([x[\"input_ids\"] for x in batch])\n masks = torch.stack([x[\"attention_mask\"] for x in batch])\n target_ids = torch.stack([x[\"labels\"] for x in batch])\n pad_token_id = self.pad_token_id\n y = trim_batch(target_ids, pad_token_id)\n source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks)\n batch = {\n \"input_ids\": source_ids,\n \"attention_mask\": source_mask,\n \"labels\": y,\n }\n return batch\n\n\nclass Seq2SeqDataset(AbstractSeq2SeqDataset):\n \"\"\"A dataset that calls prepare_seq2seq_batch.\"\"\"\n\n def __getitem__(self, index) -> Dict[str, str]:\n index = index + 1 # linecache starts at 1\n source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip(\"\\n\")\n tgt_line = linecache.getline(str(self.tgt_file), index).rstrip(\"\\n\")\n assert source_line, f\"empty source line for index {index}\"\n assert tgt_line, f\"empty tgt line for index {index}\"\n return {\"tgt_texts\": tgt_line, \"src_texts\": source_line, \"id\": index - 1}\n\n def collate_fn(self, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Call prepare_seq2seq_batch.\"\"\"\n batch_encoding: Dict[str, torch.Tensor] = self.tokenizer.prepare_seq2seq_batch(\n [x[\"src_texts\"] for x in batch],\n tgt_texts=[x[\"tgt_texts\"] for x in batch],\n max_length=self.max_source_length,\n max_target_length=self.max_target_length,\n return_tensors=\"pt\",\n **self.dataset_kwargs,\n ).data\n batch_encoding[\"ids\"] = torch.tensor([x[\"id\"] for x in batch])\n return batch_encoding\n\n\nclass Seq2SeqDataCollator:\n def __init__(self, tokenizer, data_args, tpu_num_cores=None):\n self.tokenizer = tokenizer\n self.pad_token_id = tokenizer.pad_token_id\n assert (\n self.pad_token_id is not None\n ), f\"pad_token_id is not defined for ({self.tokenizer.__class__.__name__}), it must be defined.\"\n self.data_args = data_args\n self.tpu_num_cores = tpu_num_cores\n self.dataset_kwargs = {\"add_prefix_space\": True} if isinstance(tokenizer, BartTokenizer) else {}\n if data_args.src_lang is not None:\n self.dataset_kwargs[\"src_lang\"] = data_args.src_lang\n if data_args.tgt_lang is not None:\n self.dataset_kwargs[\"tgt_lang\"] = data_args.tgt_lang\n\n def __call__(self, batch) -> Dict[str, torch.Tensor]:\n if hasattr(self.tokenizer, \"prepare_seq2seq_batch\"):\n batch = self._encode(batch)\n input_ids, attention_mask, labels = (\n batch[\"input_ids\"],\n batch[\"attention_mask\"],\n batch[\"labels\"],\n )\n else:\n input_ids = torch.stack([x[\"input_ids\"] for x in batch])\n attention_mask = torch.stack([x[\"attention_mask\"] for x in batch])\n labels = torch.stack([x[\"labels\"] for x in batch])\n\n labels = trim_batch(labels, self.pad_token_id)\n input_ids, attention_mask = trim_batch(input_ids, self.pad_token_id, attention_mask=attention_mask)\n\n if isinstance(self.tokenizer, T5Tokenizer):\n decoder_input_ids = self._shift_right_t5(labels)\n else:\n decoder_input_ids = shift_tokens_right(labels, self.pad_token_id)\n\n batch = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"decoder_input_ids\": decoder_input_ids,\n \"labels\": labels,\n }\n return batch\n\n def _shift_right_t5(self, input_ids):\n # shift inputs to the right\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()\n shifted_input_ids[..., 0] = self.pad_token_id\n return shifted_input_ids\n\n def _encode(self, batch) -> Dict[str, torch.Tensor]:\n batch_encoding = self.tokenizer.prepare_seq2seq_batch(\n [x[\"src_texts\"] for x in batch],\n tgt_texts=[x[\"tgt_texts\"] for x in batch],\n max_length=self.data_args.max_source_length,\n max_target_length=self.data_args.max_target_length,\n padding=\"max_length\" if self.tpu_num_cores is not None else \"longest\", # TPU hack\n return_tensors=\"pt\",\n **self.dataset_kwargs,\n )\n return batch_encoding.data\n\n\nclass SortishSampler(Sampler):\n \"Go through the text data by order of src length with a bit of randomness. From fastai repo.\"\n\n def __init__(self, data, batch_size, shuffle=True):\n self.data, self.bs, self.shuffle = data, batch_size, shuffle\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __iter__(self):\n return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle))\n\n\ndef sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array:\n \"Go through the text data by order of src length with a bit of randomness. From fastai repo.\"\n if not shuffle:\n return np.argsort(np.array(data) * -1)\n\n def key_fn(i):\n return data[i]\n\n idxs = np.random.permutation(len(data))\n sz = bs * 50\n ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)]\n sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx])\n sz = bs\n ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)]\n max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,\n ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first.\n sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)\n sort_idx = np.concatenate((ck_idx[0], sort_idx))\n return sort_idx\n\n\nclass DistributedSortishSampler(Sampler):\n \"\"\"Copied from torch DistributedSampler\"\"\"\n\n def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True):\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n if add_extra_examples:\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n else:\n self.total_size = len(dataset)\n self.num_samples = len(self.available_indices)\n self.batch_size = batch_size\n self.add_extra_examples = add_extra_examples\n self.shuffle = shuffle\n\n def __iter__(self) -> Iterable:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n sortish_data = [self.dataset.src_lens[i] for i in self.available_indices]\n sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle)\n indices = [self.available_indices[i] for i in sortish_indices]\n assert len(indices) == self.num_samples\n return iter(indices)\n\n @cached_property\n def available_indices(self) -> np.array:\n indices = list(range(len(self.dataset)))\n # add extra samples to make it evenly divisible\n indices += indices[: (self.total_size - len(indices))]\n assert len(indices) == self.total_size\n # subsample\n available_indices = indices[self.rank : self.total_size : self.num_replicas]\n return available_indices\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n\nlogger = getLogger(__name__)\n\n\ndef use_task_specific_params(model, task):\n \"\"\"Update config with summarization specific params.\"\"\"\n task_specific_params = model.config.task_specific_params\n\n if task_specific_params is not None:\n pars = task_specific_params.get(task, {})\n logger.info(f\"using task specific params for {task}: {pars}\")\n model.config.update(pars)\n\n\ndef pickle_load(path):\n \"\"\"pickle.load(path)\"\"\"\n with open(path, \"rb\") as f:\n return pickle.load(f)\n\n\ndef pickle_save(obj, path):\n \"\"\"pickle.dump(obj, path)\"\"\"\n with open(path, \"wb\") as f:\n return pickle.dump(obj, f)\n\n\ndef flatten_list(summary_ids: List[List]):\n return [x for x in itertools.chain.from_iterable(summary_ids)]\n\n\ndef save_git_info(folder_path: str) -> None:\n \"\"\"Save git information to output_dir/git_log.json\"\"\"\n repo_infos = get_git_info()\n save_json(repo_infos, os.path.join(folder_path, \"git_log.json\"))\n\n\ndef save_json(content, path, indent=4, **json_dump_kwargs):\n with open(path, \"w\") as f:\n json.dump(content, f, indent=indent, **json_dump_kwargs)\n\n\ndef load_json(path):\n with open(path) as f:\n return json.load(f)\n\n\ndef get_git_info():\n try:\n repo = git.Repo(search_parent_directories=True)\n repo_infos = {\n \"repo_id\": str(repo),\n \"repo_sha\": str(repo.head.object.hexsha),\n \"repo_branch\": str(repo.active_branch),\n \"hostname\": str(socket.gethostname()),\n }\n return repo_infos\n except TypeError:\n return {\n \"repo_id\": None,\n \"repo_sha\": None,\n \"repo_branch\": None,\n \"hostname\": None,\n }\n\n\nROUGE_KEYS = [\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"]\n\n\ndef extract_rouge_mid_statistics(dct):\n new_dict = {}\n for k1, v1 in dct.items():\n mid = v1.mid\n new_dict[k1] = {stat: round(getattr(mid, stat), 4) for stat in [\"precision\", \"recall\", \"fmeasure\"]}\n return new_dict\n\n\ndef calculate_rouge(\n pred_lns: List[str],\n tgt_lns: List[str],\n use_stemmer=True,\n rouge_keys=ROUGE_KEYS,\n return_precision_and_recall=False,\n bootstrap_aggregation=True,\n newline_sep=True,\n) -> Dict:\n \"\"\"Calculate rouge using rouge_scorer package.\n\n Args:\n pred_lns: list of summaries generated by model\n tgt_lns: list of groundtruth summaries (e.g. contents of val.target)\n use_stemmer: Bool indicating whether Porter stemmer should be used to\n strip word suffixes to improve matching.\n rouge_keys: which metrics to compute, defaults to rouge1, rouge2, rougeL, rougeLsum\n return_precision_and_recall: (False) whether to also return precision and recall.\n bootstrap_aggregation: whether to do the typical bootstrap resampling of scores. Defaults to True, if False\n this function returns a collections.defaultdict[metric: list of values for each observation for each subscore]``\n newline_sep:(default=True) whether to add newline between sentences. This is essential for calculation rougeL\n on multi sentence summaries (CNN/DM dataset).\n\n Returns:\n Dict[score: value] if aggregate else defaultdict(list) keyed by rouge_keys\n\n \"\"\"\n scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=use_stemmer)\n aggregator = scoring.BootstrapAggregator()\n for pred, tgt in zip(tgt_lns, pred_lns):\n # rougeLsum expects \"\\n\" separated sentences within a summary\n if newline_sep:\n pred = add_newline_to_end_of_each_sentence(pred)\n tgt = add_newline_to_end_of_each_sentence(tgt)\n scores = scorer.score(pred, tgt)\n aggregator.add_scores(scores)\n\n if bootstrap_aggregation:\n result = aggregator.aggregate()\n if return_precision_and_recall:\n return extract_rouge_mid_statistics(result) # here we return dict\n else:\n return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()}\n\n else:\n return aggregator._scores # here we return defaultdict(list)\n\n\n# Utilities for freezing parameters and checking whether they are frozen\n\n\ndef freeze_params(model: nn.Module):\n \"\"\"Set requires_grad=False for each of model.parameters()\"\"\"\n for par in model.parameters():\n par.requires_grad = False\n\n\ndef freeze_embeds(model):\n \"\"\"Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.\"\"\"\n model_type = model.config.model_type\n\n if model_type == \"t5\":\n freeze_params(model.shared)\n for d in [model.encoder, model.decoder]:\n freeze_params(d.embed_tokens)\n elif model_type == \"fsmt\":\n for d in [model.model.encoder, model.model.decoder]:\n freeze_params(d.embed_positions)\n freeze_params(d.embed_tokens)\n else:\n freeze_params(model.model.shared)\n for d in [model.model.encoder, model.model.decoder]:\n freeze_params(d.embed_positions)\n freeze_params(d.embed_tokens)\n\n\ndef grad_status(model: nn.Module) -> Iterable:\n return (par.requires_grad for par in model.parameters())\n\n\ndef any_requires_grad(model: nn.Module) -> bool:\n return any(grad_status(model))\n\n\ndef assert_all_frozen(model):\n model_grads: List[bool] = list(grad_status(model))\n n_require_grad = sum(lmap(int, model_grads))\n npars = len(model_grads)\n assert not any(model_grads), f\"{n_require_grad/npars:.1%} of {npars} weights require grad\"\n\n\ndef assert_not_all_frozen(model):\n model_grads: List[bool] = list(grad_status(model))\n npars = len(model_grads)\n assert any(model_grads), f\"none of {npars} weights require grad\"\n\n\ndef parse_numeric_n_bool_cl_kwargs(unparsed_args: List[str]) -> Dict[str, Union[int, float, bool]]:\n \"\"\"\n Parse an argv list of unspecified command line args to a dict.\n Assumes all values are either numeric or boolean in the form of true/false.\n \"\"\"\n result = {}\n assert len(unparsed_args) % 2 == 0, f\"got odd number of unparsed args: {unparsed_args}\"\n num_pairs = len(unparsed_args) // 2\n for pair_num in range(num_pairs):\n i = 2 * pair_num\n assert unparsed_args[i].startswith(\"--\")\n if unparsed_args[i + 1].lower() == \"true\":\n value = True\n elif unparsed_args[i + 1].lower() == \"false\":\n value = False\n else:\n try:\n value = int(unparsed_args[i + 1])\n except ValueError:\n value = float(unparsed_args[i + 1]) # this can raise another informative ValueError\n\n result[unparsed_args[i][2:]] = value\n return result\n\n\ndef write_txt_file(ordered_tgt, path):\n f = Path(path).open(\"w\")\n for ln in ordered_tgt:\n f.write(ln + \"\\n\")\n f.flush()\n\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\ndef check_output_dir(args, expected_items=0):\n \"\"\"\n Checks whether to bail out if output_dir already exists and has more than expected_items in it\n\n `args`: needs to have the following attributes of `args`:\n - output_dir\n - do_train\n - overwrite_output_dir\n\n `expected_items`: normally 0 (default) - i.e. empty dir, but in some cases a few files are expected (e.g. recovery from OOM)\n \"\"\"\n if (\n os.path.exists(args.output_dir)\n and len(os.listdir(args.output_dir)) > expected_items\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({args.output_dir}) already exists and \"\n f\"has {len(os.listdir(args.output_dir))} items in it (expected {expected_items} items). \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n" ]
[ [ "torch.Generator", "torch.tensor", "numpy.concatenate", "numpy.argmax", "numpy.random.permutation", "torch.distributed.is_available", "numpy.count_nonzero", "torch.stack", "torch.distributed.get_rank", "torch.distributed.get_world_size", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Harshit-Vavaiya/Edith-A
[ "e36a21ef3537108d87f5596eacacbfb827e0de7d" ]
[ "chat.py" ]
[ "import random\nimport json\nimport requests\nimport torch\nfrom bs4 import BeautifulSoup\nfrom model import NeuralNet\nfrom nltk_utils import bag_of_words, tokenize\nfrom search import search\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n\ndef getResponse(q='hi'):\n with open('intents.json', 'r') as json_data:\n intents = json.load(json_data)\n\n FILE = \"data.pth\"\n data = torch.load(FILE)\n\n input_size = data[\"input_size\"]\n hidden_size = data[\"hidden_size\"]\n output_size = data[\"output_size\"]\n all_words = data['all_words']\n tags = data['tags']\n model_state = data[\"model_state\"]\n\n model = NeuralNet(input_size, hidden_size, output_size).to(device)\n model.load_state_dict(model_state)\n model.eval()\n\n bot_name = \"Edith\"\n print(\"Let's chat! (type 'quit' to exit)\")\n \n sentence = tokenize(q)\n X = bag_of_words(sentence, all_words)\n X = X.reshape(1, X.shape[0])\n X = torch.from_numpy(X).to(device)\n\n output = model(X)\n _, predicted = torch.max(output, dim=1)\n\n tag = tags[predicted.item()]\n\n probs = torch.softmax(output, dim=1)\n prob = probs[0][predicted.item()]\n if prob.item() > 0.75:\n if tag == 'search':\n return search(sentence) \n \n else:\n for intent in intents['intents']:\n if tag == intent[\"tag\"]:\n return {'response': random.choice(intent['responses']) }\n else:\n return { 'response' : 'I do not understand...'}" ]
[ [ "torch.softmax", "torch.max", "torch.load", "torch.from_numpy", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brunnokick/arquitetura-dados
[ "ed6e3c2ccfa0122ba996fa3f880fce96abc857aa" ]
[ "weight_lifting.py" ]
[ "import csv\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import (\n classification_report,\n confusion_matrix,\n accuracy_score,\n f1_score,\n precision_score,\n recall_score,\n plot_confusion_matrix,\n)\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom numpy.random import RandomState\nimport warnings\nfrom typing import List\nimport random\n\nfrom utils import to_latex\n\nwarnings.filterwarnings(\"ignore\")\n\nrandom.seed(42)\nRANDOM_NUM = 42\nnp.random.seed(42)\nRANDOM_STATE = RandomState(42)\n\n\nclass WeightLifting:\n @staticmethod\n def load_df() -> pd.DataFrame:\n df = pd.read_csv(\"weight_lifting.csv\", header=1)\n df.to_csv(r\"outputs/original_database.csv\", quoting=csv.QUOTE_NONNUMERIC)\n return df\n\n @staticmethod\n def transform(df: pd.DataFrame) -> pd.DataFrame:\n df.drop(\n columns=[\n \"user_name\",\n \"cvtd_timestamp\",\n \"num_window\",\n \"raw_timestamp_part_2\",\n \"raw_timestamp_part_1\",\n ],\n inplace=True,\n )\n\n # Convertendo coluna \"new_window\" para booleano\n df[\"new_window\"] = np.where(df[\"new_window\"].str.lower() == \"yes\", 1, 0)\n df[\"new_window\"] = df[\"new_window\"].astype(int)\n\n for col in df.columns[:-1]:\n # Corrigindo campos com \"#DIV/0!\"\n if df[col].dtype == object:\n df[col] = df[col].str.replace(\"#DIV/0!\", \"0\")\n df[col] = df[col].astype(float)\n\n # Corrigindo valores N/A com a média ou \"0\"\n if df[col].dtype in (int, float):\n df[col] = df[col].replace(np.nan, df[col].mean())\n else:\n df[col] = df[col].replace(np.nan, \"0\")\n\n df.to_csv(r\"outputs/cleaned_database.csv\", quoting=csv.QUOTE_NONNUMERIC)\n return df\n\n @staticmethod\n def create_train_test(\n df: pd.DataFrame,\n features: List[str] = None,\n target: str = None,\n test_size: float = 0.25,\n ) -> List:\n if features is None:\n X = df.iloc[:, 0:-1]\n else:\n X = df[features]\n\n if target is None:\n y = df.iloc[:, -1:]\n else:\n y = df[target]\n\n return train_test_split(X, y, test_size=test_size, random_state=RANDOM_NUM)\n\n @staticmethod\n def fit_and_predict(\n X_train: pd.DataFrame,\n X_test: pd.DataFrame,\n y_train: pd.DataFrame,\n y_test: pd.DataFrame,\n list_model: List = None,\n state: str = \"INICIAL\",\n ) -> List:\n if list_model is None:\n list_model = [\n (\n \"LR\",\n LogisticRegression(\n **{\n \"C\": 0.1,\n \"fit_intercept\": True,\n \"multi_class\": \"ovr\",\n \"penalty\": \"l2\",\n \"solver\": \"newton-cg\",\n }\n ),\n ),\n (\n \"SVM\",\n SVC(\n **{\n \"C\": 10,\n \"gamma\": 1e-05,\n \"kernel\": \"rbf\",\n \"probability\": True,\n }\n ),\n ),\n (\n \"MLP\",\n MLPClassifier(\n **{\n \"alpha\": 0.0001,\n \"hidden_layer_sizes\": (5, 2),\n \"solver\": \"sgd\",\n }\n ),\n ),\n (\"DTC\", DecisionTreeClassifier()),\n ]\n models_base_predict = []\n for result in list_model:\n name, model = result\n model.fit(X_train, y_train)\n predict = model.predict(X_test)\n accuracy = round(accuracy_score(y_test, predict), 4)\n f1 = round(f1_score(y_test, predict, average=\"macro\"), 4)\n precision = round(precision_score(y_test, predict, average=\"macro\"), 4)\n recall = round(recall_score(y_test, predict, average=\"macro\"), 4)\n models_base_predict.append(\n {\n \"name\": name,\n \"state\": state,\n \"model\": model,\n \"predict\": predict,\n \"accuracy\": accuracy,\n \"f1\": f1,\n \"precision\": precision,\n \"recall\": recall,\n \"state_name\": f\"{state}_{name}\",\n }\n )\n\n return models_base_predict\n\n @staticmethod\n def plot_results(list_predict, X_test, y_test, export_files=True):\n for result in list_predict:\n print(f\"Model: {result['name']}\")\n metrics = {\n \"Accuracy\": [result[\"accuracy\"]],\n \"F1\": [result[\"f1\"]],\n \"Precision\": [result[\"precision\"]],\n \"Recall\": [result[\"recall\"]],\n }\n\n metrics_df = pd.DataFrame.from_dict(\n metrics, orient=\"index\", columns=[\"Valor\"],\n )\n print(metrics_df)\n print()\n print(confusion_matrix(y_test, result[\"predict\"]))\n print()\n report = classification_report(y_test, result[\"predict\"], output_dict=True)\n report_df = pd.DataFrame(report).transpose()\n print(report_df)\n plot_confusion_matrix(result[\"model\"], X_test, y_test)\n print()\n if export_files:\n # to_latex(\n # metrics_df,\n # f\"outputs/tex/table_metrics_{result['state_name'].lower()}.tex\",\n # float_format=\"%.2f\",\n # )\n to_latex(\n report_df, # report_df.iloc[:-3, :-1],\n f\"outputs/tex/table_{result['state_name'].lower()}.tex\",\n float_format=\"%.2f\",\n )\n plt.savefig(f\"outputs/img/matrix_{result['state_name'].lower()}.png\")\n plt.show()\n print(\"--------------------------------------------\")\n\n @staticmethod\n def plot_correlation_matrix(df: pd.DataFrame):\n corr = df.corr()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(corr, cmap=\"coolwarm\", vmin=-1, vmax=1)\n fig.colorbar(cax)\n ticks = np.arange(0, len(df.columns), 1)\n ax.set_xticks(ticks)\n plt.xticks(rotation=90)\n ax.set_yticks(ticks)\n ax.set_xticklabels(df.columns)\n ax.set_yticklabels(df.columns)\n plt.show()\n\n @staticmethod\n def remove_outliers(df: pd.DataFrame) -> pd.DataFrame:\n # identify and remove outliers from dataframe\n iso = IsolationForest(contamination=0.05, random_state=RANDOM_STATE)\n predict = iso.fit_predict(df.iloc[:, 0:-1])\n\n mask = predict != -1\n return df.iloc[mask]\n\n def plot_final_results(self, df: pd.DataFrame):\n # print(df)\n\n def get_approach(row: pd.Series):\n return {\n \"INICIAL\": \"Inicial\",\n \"ISO\": \"Floresta de Isolamento\",\n \"SFS\": \"Sequential Feature Selector\",\n \"ISO_SFS\": \"SFS + Floresta de Isolamento\",\n }.get(row[\"state\"])\n\n def get_approach_order(row: pd.Series):\n return {\"INICIAL\": 0, \"ISO\": 1, \"SFS\": 2, \"ISO_SFS\": 3,}.get(row[\"state\"])\n\n def get_classifier_order(row: pd.Series):\n return {\"LR\": 0, \"SVM\": 1, \"MLP\": 2, \"DTC\": 3}.get(row[\"name\"])\n\n def get_classifier(row: pd.Series):\n return {\n \"LR\": \"Regressão Logística\",\n \"SVM\": \"Máquina de Vetores de Suporte\",\n \"MLP\": \"Perceptron Multicamadas\",\n \"DTC\": \"Árvore de Decisão\",\n }.get(row[\"name\"])\n\n df[\"tecnica\"] = df.apply(lambda x: get_approach(x), axis=1)\n df[\"classificador\"] = df.apply(lambda x: get_classifier(x), axis=1)\n df[\"ordem_tecnica\"] = df.apply(lambda x: get_approach_order(x), axis=1)\n df[\"ordem_classificador\"] = df.apply(lambda x: get_classifier_order(x), axis=1)\n df = df.sort_values(by=[\"ordem_classificador\", \"ordem_tecnica\"])\n resultados_df = df[\n [\"classificador\", \"tecnica\", \"accuracy\", \"f1\", \"precision\", \"recall\"]\n ]\n to_latex(resultados_df, \"outputs/tex/table_resultado_final.tex\", index=False)\n print(resultados_df)\n\n sns.set()\n columns = [\n \"name\",\n \"state\",\n \"state_name\",\n \"accuracy\",\n \"f1\",\n \"precision\",\n \"recall\",\n ]\n resultados = df[columns]\n resultados.columns = [col.lower() for col in columns]\n resultados.to_csv(\"outputs/resultados.csv\")\n pivot = resultados.pivot(\"name\", \"state\", \"accuracy\")[\n [\"INICIAL\", \"ISO\", \"SFS\", \"ISO_SFS\"]\n ]\n sns_plot = sns.heatmap(pivot, annot=True, linewidths=0.5)\n sns_plot.figure.savefig(\"outputs/img/results_heatmap.png\")\n" ]
[ [ "sklearn.neural_network.MLPClassifier", "sklearn.metrics.plot_confusion_matrix", "sklearn.metrics.confusion_matrix", "pandas.DataFrame", "sklearn.tree.DecisionTreeClassifier", "sklearn.metrics.f1_score", "sklearn.metrics.classification_report", "pandas.read_csv", "matplotlib.pyplot.figure", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "sklearn.svm.SVC", "pandas.DataFrame.from_dict", "numpy.random.RandomState", "sklearn.metrics.recall_score", "sklearn.ensemble.IsolationForest", "matplotlib.pyplot.show", "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "matplotlib.pyplot.xticks", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
JuliusHarald/silx
[ "3f9bcda88c074438fdb30cde29fec314d26f471c" ]
[ "silx/gui/plot/items/core.py" ]
[ "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module provides the base class for items of the :class:`Plot`.\n\"\"\"\n\n__authors__ = [\"T. Vincent\"]\n__license__ = \"MIT\"\n__date__ = \"29/01/2019\"\n\nimport collections\ntry:\n from collections import abc\nexcept ImportError: # Python2 support\n import collections as abc\nfrom copy import deepcopy\nimport logging\nimport enum\nimport warnings\nimport weakref\n\nimport numpy\nimport six\n\nfrom ....utils.deprecation import deprecated\nfrom ....utils.enum import Enum as _Enum\nfrom ... import qt\nfrom ... import colors\nfrom ...colors import Colormap\nfrom ._pick import PickingResult\n\nfrom silx import config\n\n_logger = logging.getLogger(__name__)\n\n\[email protected]\nclass ItemChangedType(enum.Enum):\n \"\"\"Type of modification provided by :attr:`Item.sigItemChanged` signal.\"\"\"\n # Private setters and setInfo are not emitting sigItemChanged signal.\n # Signals to consider:\n # COLORMAP_SET emitted when setColormap is called but not forward colormap object signal\n # CURRENT_COLOR_CHANGED emitted current color changed because highlight changed,\n # highlighted color changed or color changed depending on hightlight state.\n\n VISIBLE = 'visibleChanged'\n \"\"\"Item's visibility changed flag.\"\"\"\n\n ZVALUE = 'zValueChanged'\n \"\"\"Item's Z value changed flag.\"\"\"\n\n COLORMAP = 'colormapChanged' # Emitted when set + forward events from the colormap object\n \"\"\"Item's colormap changed flag.\n\n This is emitted both when setting a new colormap and\n when the current colormap object is updated.\n \"\"\"\n\n SYMBOL = 'symbolChanged'\n \"\"\"Item's symbol changed flag.\"\"\"\n\n SYMBOL_SIZE = 'symbolSizeChanged'\n \"\"\"Item's symbol size changed flag.\"\"\"\n\n LINE_WIDTH = 'lineWidthChanged'\n \"\"\"Item's line width changed flag.\"\"\"\n\n LINE_STYLE = 'lineStyleChanged'\n \"\"\"Item's line style changed flag.\"\"\"\n\n COLOR = 'colorChanged'\n \"\"\"Item's color changed flag.\"\"\"\n\n LINE_BG_COLOR = 'lineBgColorChanged'\n \"\"\"Item's line background color changed flag.\"\"\"\n\n YAXIS = 'yAxisChanged'\n \"\"\"Item's Y axis binding changed flag.\"\"\"\n\n FILL = 'fillChanged'\n \"\"\"Item's fill changed flag.\"\"\"\n\n ALPHA = 'alphaChanged'\n \"\"\"Item's transparency alpha changed flag.\"\"\"\n\n DATA = 'dataChanged'\n \"\"\"Item's data changed flag\"\"\"\n\n HIGHLIGHTED = 'highlightedChanged'\n \"\"\"Item's highlight state changed flag.\"\"\"\n\n HIGHLIGHTED_COLOR = 'highlightedColorChanged'\n \"\"\"Deprecated, use HIGHLIGHTED_STYLE instead.\"\"\"\n\n HIGHLIGHTED_STYLE = 'highlightedStyleChanged'\n \"\"\"Item's highlighted style changed flag.\"\"\"\n\n SCALE = 'scaleChanged'\n \"\"\"Item's scale changed flag.\"\"\"\n\n TEXT = 'textChanged'\n \"\"\"Item's text changed flag.\"\"\"\n\n POSITION = 'positionChanged'\n \"\"\"Item's position changed flag.\n\n This is emitted when a marker position changed and\n when an image origin changed.\n \"\"\"\n\n OVERLAY = 'overlayChanged'\n \"\"\"Item's overlay state changed flag.\"\"\"\n\n VISUALIZATION_MODE = 'visualizationModeChanged'\n \"\"\"Item's visualization mode changed flag.\"\"\"\n\n COMPLEX_MODE = 'complexModeChanged'\n \"\"\"Item's complex data visualization mode changed flag.\"\"\"\n\n NAME = 'nameChanged'\n \"\"\"Item's name changed flag.\"\"\"\n\n EDITABLE = 'editableChanged'\n \"\"\"Item's editable state changed flags.\"\"\"\n\n\nclass Item(qt.QObject):\n \"\"\"Description of an item of the plot\"\"\"\n\n _DEFAULT_Z_LAYER = 0\n \"\"\"Default layer for overlay rendering\"\"\"\n\n _DEFAULT_SELECTABLE = False\n \"\"\"Default selectable state of items\"\"\"\n\n sigItemChanged = qt.Signal(object)\n \"\"\"Signal emitted when the item has changed.\n\n It provides a flag describing which property of the item has changed.\n See :class:`ItemChangedType` for flags description.\n \"\"\"\n\n def __init__(self):\n qt.QObject.__init__(self)\n self._dirty = True\n self._plotRef = None\n self._visible = True\n self._selectable = self._DEFAULT_SELECTABLE\n self._z = self._DEFAULT_Z_LAYER\n self._info = None\n self._xlabel = None\n self._ylabel = None\n self.__name = ''\n\n self._backendRenderer = None\n\n def getPlot(self):\n \"\"\"Returns the ~silx.gui.plot.PlotWidget this item belongs to.\n\n :rtype: Union[~silx.gui.plot.PlotWidget,None]\n \"\"\"\n return None if self._plotRef is None else self._plotRef()\n\n def _setPlot(self, plot):\n \"\"\"Set the plot this item belongs to.\n\n WARNING: This should only be called from the Plot.\n\n :param Union[~silx.gui.plot.PlotWidget,None] plot: The Plot instance.\n \"\"\"\n if plot is not None and self._plotRef is not None:\n raise RuntimeError('Trying to add a node at two places.')\n self._plotRef = None if plot is None else weakref.ref(plot)\n self._updated()\n\n def getBounds(self): # TODO return a Bounds object rather than a tuple\n \"\"\"Returns the bounding box of this item in data coordinates\n\n :returns: (xmin, xmax, ymin, ymax) or None\n :rtype: 4-tuple of float or None\n \"\"\"\n return self._getBounds()\n\n def _getBounds(self):\n \"\"\":meth:`getBounds` implementation to override by sub-class\"\"\"\n return None\n\n def isVisible(self):\n \"\"\"True if item is visible, False otherwise\n\n :rtype: bool\n \"\"\"\n return self._visible\n\n def setVisible(self, visible):\n \"\"\"Set visibility of item.\n\n :param bool visible: True to display it, False otherwise\n \"\"\"\n visible = bool(visible)\n if visible != self._visible:\n self._visible = visible\n # When visibility has changed, always mark as dirty\n self._updated(ItemChangedType.VISIBLE,\n checkVisibility=False)\n\n def isOverlay(self):\n \"\"\"Return true if item is drawn as an overlay.\n\n :rtype: bool\n \"\"\"\n return False\n\n def getName(self):\n \"\"\"Returns the name of the item which is used as legend.\n\n :rtype: str\n \"\"\"\n return self.__name\n\n def setName(self, name):\n \"\"\"Set the name of the item which is used as legend.\n\n :param str name: New name of the item\n :raises RuntimeError: If item belongs to a PlotWidget.\n \"\"\"\n name = str(name)\n if self.__name != name:\n if self.getPlot() is not None:\n raise RuntimeError(\n \"Cannot change name while item is in a PlotWidget\")\n\n self.__name = name\n self._updated(ItemChangedType.NAME)\n\n def getLegend(self): # Replaced by getName for API consistency\n return self.getName()\n\n @deprecated(replacement='setName', since_version='0.13')\n def _setLegend(self, legend):\n legend = str(legend) if legend is not None else ''\n self.setName(legend)\n\n def isSelectable(self):\n \"\"\"Returns true if item is selectable (bool)\"\"\"\n return self._selectable\n\n def _setSelectable(self, selectable): # TODO support update\n \"\"\"Set whether item is selectable or not.\n\n This is private for now as change is not handled.\n\n :param bool selectable: True to make item selectable\n \"\"\"\n self._selectable = bool(selectable)\n\n def getZValue(self):\n \"\"\"Returns the layer on which to draw this item (int)\"\"\"\n return self._z\n\n def setZValue(self, z):\n z = int(z) if z is not None else self._DEFAULT_Z_LAYER\n if z != self._z:\n self._z = z\n self._updated(ItemChangedType.ZVALUE)\n\n def getInfo(self, copy=True):\n \"\"\"Returns the info associated to this item\n\n :param bool copy: True to get a deepcopy, False otherwise.\n \"\"\"\n return deepcopy(self._info) if copy else self._info\n\n def setInfo(self, info, copy=True):\n if copy:\n info = deepcopy(info)\n self._info = info\n\n def _updated(self, event=None, checkVisibility=True):\n \"\"\"Mark the item as dirty (i.e., needing update).\n\n This also triggers Plot.replot.\n\n :param event: The event to send to :attr:`sigItemChanged` signal.\n :param bool checkVisibility: True to only mark as dirty if visible,\n False to always mark as dirty.\n \"\"\"\n if not checkVisibility or self.isVisible():\n if not self._dirty:\n self._dirty = True\n # TODO: send event instead of explicit call\n plot = self.getPlot()\n if plot is not None:\n plot._itemRequiresUpdate(self)\n if event is not None:\n self.sigItemChanged.emit(event)\n\n def _update(self, backend):\n \"\"\"Called by Plot to update the backend for this item.\n\n This is meant to be called asynchronously from _updated.\n This optimizes the number of call to _update.\n\n :param backend: The backend to update\n \"\"\"\n if self._dirty:\n # Remove previous renderer from backend if any\n self._removeBackendRenderer(backend)\n\n # If not visible, do not add renderer to backend\n if self.isVisible():\n self._backendRenderer = self._addBackendRenderer(backend)\n\n self._dirty = False\n\n def _addBackendRenderer(self, backend):\n \"\"\"Override in subclass to add specific backend renderer.\n\n :param BackendBase backend: The backend to update\n :return: The renderer handle to store or None if no renderer in backend\n \"\"\"\n return None\n\n def _removeBackendRenderer(self, backend):\n \"\"\"Override in subclass to remove specific backend renderer.\n\n :param BackendBase backend: The backend to update\n \"\"\"\n if self._backendRenderer is not None:\n backend.remove(self._backendRenderer)\n self._backendRenderer = None\n\n def pick(self, x, y):\n \"\"\"Run picking test on this item\n\n :param float x: The x pixel coord where to pick.\n :param float y: The y pixel coord where to pick.\n :return: None if not picked, else the picked position information\n :rtype: Union[None,PickingResult]\n \"\"\"\n if not self.isVisible() or self._backendRenderer is None:\n return None\n plot = self.getPlot()\n if plot is None:\n return None\n\n indices = plot._backend.pickItem(x, y, self._backendRenderer)\n if indices is None:\n return None\n else:\n return PickingResult(self, indices)\n\n\n# Mix-in classes ##############################################################\n\nclass ItemMixInBase(qt.QObject):\n \"\"\"Base class for Item mix-in\"\"\"\n\n def _updated(self, event=None, checkVisibility=True):\n \"\"\"This is implemented in :class:`Item`.\n\n Mark the item as dirty (i.e., needing update).\n This also triggers Plot.replot.\n\n :param event: The event to send to :attr:`sigItemChanged` signal.\n :param bool checkVisibility: True to only mark as dirty if visible,\n False to always mark as dirty.\n \"\"\"\n raise RuntimeError(\n \"Issue with Mix-In class inheritance order\")\n\n\nclass LabelsMixIn(ItemMixInBase):\n \"\"\"Mix-in class for items with x and y labels\n\n Setters are private, otherwise it needs to check the plot\n current active curve and access the internal current labels.\n \"\"\"\n\n def __init__(self):\n self._xlabel = None\n self._ylabel = None\n\n def getXLabel(self):\n \"\"\"Return the X axis label associated to this curve\n\n :rtype: str or None\n \"\"\"\n return self._xlabel\n\n def _setXLabel(self, label):\n \"\"\"Set the X axis label associated with this curve\n\n :param str label: The X axis label\n \"\"\"\n self._xlabel = str(label)\n\n def getYLabel(self):\n \"\"\"Return the Y axis label associated to this curve\n\n :rtype: str or None\n \"\"\"\n return self._ylabel\n\n def _setYLabel(self, label):\n \"\"\"Set the Y axis label associated with this curve\n\n :param str label: The Y axis label\n \"\"\"\n self._ylabel = str(label)\n\n\nclass DraggableMixIn(ItemMixInBase):\n \"\"\"Mix-in class for draggable items\"\"\"\n\n def __init__(self):\n self._draggable = False\n\n def isDraggable(self):\n \"\"\"Returns true if image is draggable\n\n :rtype: bool\n \"\"\"\n return self._draggable\n\n def _setDraggable(self, draggable): # TODO support update\n \"\"\"Set if image is draggable or not.\n\n This is private for not as it does not support update.\n\n :param bool draggable:\n \"\"\"\n self._draggable = bool(draggable)\n\n def drag(self, from_, to):\n \"\"\"Perform a drag of the item.\n\n :param List[float] from_: (x, y) previous position in data coordinates\n :param List[float] to: (x, y) current position in data coordinates\n \"\"\"\n raise NotImplementedError(\"Must be implemented in subclass\")\n\n\nclass ColormapMixIn(ItemMixInBase):\n \"\"\"Mix-in class for items with colormap\"\"\"\n\n def __init__(self):\n self._colormap = Colormap()\n self._colormap.sigChanged.connect(self._colormapChanged)\n self.__data = None\n self.__cacheColormapRange = {} # Store {normalization: range}\n\n def getColormap(self):\n \"\"\"Return the used colormap\"\"\"\n return self._colormap\n\n def setColormap(self, colormap):\n \"\"\"Set the colormap of this item\n\n :param silx.gui.colors.Colormap colormap: colormap description\n \"\"\"\n if self._colormap is colormap:\n return\n if isinstance(colormap, dict):\n colormap = Colormap._fromDict(colormap)\n\n if self._colormap is not None:\n self._colormap.sigChanged.disconnect(self._colormapChanged)\n self._colormap = colormap\n if self._colormap is not None:\n self._colormap.sigChanged.connect(self._colormapChanged)\n self._colormapChanged()\n\n def _colormapChanged(self):\n \"\"\"Handle updates of the colormap\"\"\"\n self._updated(ItemChangedType.COLORMAP)\n\n def _setColormappedData(self, data, copy=True,\n min_=None, minPositive=None, max_=None):\n \"\"\"Set the data used to compute the colormapped display.\n\n It also resets the cache of data ranges.\n\n This method MUST be called by inheriting classes when data is updated.\n\n :param Union[None,numpy.ndarray] data:\n :param Union[None,float] min_: Minimum value of the data\n :param Union[None,float] minPositive:\n Minimum of strictly positive values of the data\n :param Union[None,float] max_: Maximum value of the data\n \"\"\"\n self.__data = None if data is None else numpy.array(data, copy=copy)\n self.__cacheColormapRange = {} # Reset cache\n\n # Fill-up colormap range cache if values are provided\n if max_ is not None and numpy.isfinite(max_):\n if min_ is not None and numpy.isfinite(min_):\n self.__cacheColormapRange[Colormap.LINEAR, Colormap.MINMAX] = min_, max_\n if minPositive is not None and numpy.isfinite(minPositive):\n self.__cacheColormapRange[Colormap.LOGARITHM, Colormap.MINMAX] = minPositive, max_\n\n colormap = self.getColormap()\n if None in (colormap.getVMin(), colormap.getVMax()):\n self._colormapChanged()\n\n def getColormappedData(self, copy=True):\n \"\"\"Returns the data used to compute the displayed colors\n\n :param bool copy: True to get a copy,\n False to get internal data (do not modify!).\n :rtype: Union[None,numpy.ndarray]\n \"\"\"\n if self.__data is None:\n return None\n else:\n return numpy.array(self.__data, copy=copy)\n\n def _getColormapAutoscaleRange(self, colormap=None):\n \"\"\"Returns the autoscale range for current data and colormap.\n\n :param Union[None,~silx.gui.colors.Colormap] colormap:\n The colormap for which to compute the autoscale range.\n If None, the default, the colormap of the item is used\n :return: (vmin, vmax) range (vmin and /or vmax might be `None`)\n \"\"\"\n if colormap is None:\n colormap = self.getColormap()\n\n data = self.getColormappedData(copy=False)\n if colormap is None or data is None:\n return None, None\n\n normalization = colormap.getNormalization()\n autoscaleMode = colormap.getAutoscaleMode()\n key = normalization, autoscaleMode\n vRange = self.__cacheColormapRange.get(key, None)\n if vRange is None:\n vRange = colormap._computeAutoscaleRange(data)\n self.__cacheColormapRange[key] = vRange\n return vRange\n\n\nclass SymbolMixIn(ItemMixInBase):\n \"\"\"Mix-in class for items with symbol type\"\"\"\n\n _DEFAULT_SYMBOL = None\n \"\"\"Default marker of the item\"\"\"\n\n _DEFAULT_SYMBOL_SIZE = config.DEFAULT_PLOT_SYMBOL_SIZE\n \"\"\"Default marker size of the item\"\"\"\n\n _SUPPORTED_SYMBOLS = collections.OrderedDict((\n ('o', 'Circle'),\n ('d', 'Diamond'),\n ('s', 'Square'),\n ('+', 'Plus'),\n ('x', 'Cross'),\n ('.', 'Point'),\n (',', 'Pixel'),\n ('|', 'Vertical line'),\n ('_', 'Horizontal line'),\n ('tickleft', 'Tick left'),\n ('tickright', 'Tick right'),\n ('tickup', 'Tick up'),\n ('tickdown', 'Tick down'),\n ('caretleft', 'Caret left'),\n ('caretright', 'Caret right'),\n ('caretup', 'Caret up'),\n ('caretdown', 'Caret down'),\n (u'\\u2665', 'Heart'),\n ('', 'None')))\n \"\"\"Dict of supported symbols\"\"\"\n\n def __init__(self):\n if self._DEFAULT_SYMBOL is None: # Use default from config\n self._symbol = config.DEFAULT_PLOT_SYMBOL\n else:\n self._symbol = self._DEFAULT_SYMBOL\n\n if self._DEFAULT_SYMBOL_SIZE is None: # Use default from config\n self._symbol_size = config.DEFAULT_PLOT_SYMBOL_SIZE\n else:\n self._symbol_size = self._DEFAULT_SYMBOL_SIZE\n\n @classmethod\n def getSupportedSymbols(cls):\n \"\"\"Returns the list of supported symbol names.\n\n :rtype: tuple of str\n \"\"\"\n return tuple(cls._SUPPORTED_SYMBOLS.keys())\n\n @classmethod\n def getSupportedSymbolNames(cls):\n \"\"\"Returns the list of supported symbol human-readable names.\n\n :rtype: tuple of str\n \"\"\"\n return tuple(cls._SUPPORTED_SYMBOLS.values())\n\n def getSymbolName(self, symbol=None):\n \"\"\"Returns human-readable name for a symbol.\n\n :param str symbol: The symbol from which to get the name.\n Default: current symbol.\n :rtype: str\n :raise KeyError: if symbol is not in :meth:`getSupportedSymbols`.\n \"\"\"\n if symbol is None:\n symbol = self.getSymbol()\n return self._SUPPORTED_SYMBOLS[symbol]\n\n def getSymbol(self):\n \"\"\"Return the point marker type.\n\n Marker type::\n\n - 'o' circle\n - '.' point\n - ',' pixel\n - '+' cross\n - 'x' x-cross\n - 'd' diamond\n - 's' square\n\n :rtype: str\n \"\"\"\n return self._symbol\n\n def setSymbol(self, symbol):\n \"\"\"Set the marker type\n\n See :meth:`getSymbol`.\n\n :param str symbol: Marker type or marker name\n \"\"\"\n if symbol is None:\n symbol = self._DEFAULT_SYMBOL\n\n elif symbol not in self.getSupportedSymbols():\n for symbolCode, name in self._SUPPORTED_SYMBOLS.items():\n if name.lower() == symbol.lower():\n symbol = symbolCode\n break\n else:\n raise ValueError('Unsupported symbol %s' % str(symbol))\n\n if symbol != self._symbol:\n self._symbol = symbol\n self._updated(ItemChangedType.SYMBOL)\n\n def getSymbolSize(self):\n \"\"\"Return the point marker size in points.\n\n :rtype: float\n \"\"\"\n return self._symbol_size\n\n def setSymbolSize(self, size):\n \"\"\"Set the point marker size in points.\n\n See :meth:`getSymbolSize`.\n\n :param str symbol: Marker type\n \"\"\"\n if size is None:\n size = self._DEFAULT_SYMBOL_SIZE\n if size != self._symbol_size:\n self._symbol_size = size\n self._updated(ItemChangedType.SYMBOL_SIZE)\n\n\nclass LineMixIn(ItemMixInBase):\n \"\"\"Mix-in class for item with line\"\"\"\n\n _DEFAULT_LINEWIDTH = 1.\n \"\"\"Default line width\"\"\"\n\n _DEFAULT_LINESTYLE = '-'\n \"\"\"Default line style\"\"\"\n\n _SUPPORTED_LINESTYLE = '', ' ', '-', '--', '-.', ':', None\n \"\"\"Supported line styles\"\"\"\n\n def __init__(self):\n self._linewidth = self._DEFAULT_LINEWIDTH\n self._linestyle = self._DEFAULT_LINESTYLE\n\n @classmethod\n def getSupportedLineStyles(cls):\n \"\"\"Returns list of supported line styles.\n\n :rtype: List[str,None]\n \"\"\"\n return cls._SUPPORTED_LINESTYLE\n\n def getLineWidth(self):\n \"\"\"Return the curve line width in pixels\n\n :rtype: float\n \"\"\"\n return self._linewidth\n\n def setLineWidth(self, width):\n \"\"\"Set the width in pixel of the curve line\n\n See :meth:`getLineWidth`.\n\n :param float width: Width in pixels\n \"\"\"\n width = float(width)\n if width != self._linewidth:\n self._linewidth = width\n self._updated(ItemChangedType.LINE_WIDTH)\n\n def getLineStyle(self):\n \"\"\"Return the type of the line\n\n Type of line::\n\n - ' ' no line\n - '-' solid line\n - '--' dashed line\n - '-.' dash-dot line\n - ':' dotted line\n\n :rtype: str\n \"\"\"\n return self._linestyle\n\n def setLineStyle(self, style):\n \"\"\"Set the style of the curve line.\n\n See :meth:`getLineStyle`.\n\n :param str style: Line style\n \"\"\"\n style = str(style)\n assert style in self.getSupportedLineStyles()\n if style is None:\n style = self._DEFAULT_LINESTYLE\n if style != self._linestyle:\n self._linestyle = style\n self._updated(ItemChangedType.LINE_STYLE)\n\n\nclass ColorMixIn(ItemMixInBase):\n \"\"\"Mix-in class for item with color\"\"\"\n\n _DEFAULT_COLOR = (0., 0., 0., 1.)\n \"\"\"Default color of the item\"\"\"\n\n def __init__(self):\n self._color = self._DEFAULT_COLOR\n\n def getColor(self):\n \"\"\"Returns the RGBA color of the item\n\n :rtype: 4-tuple of float in [0, 1] or array of colors\n \"\"\"\n return self._color\n\n def setColor(self, color, copy=True):\n \"\"\"Set item color\n\n :param color: color(s) to be used\n :type color: str (\"#RRGGBB\") or (npoints, 4) unsigned byte array or\n one of the predefined color names defined in colors.py\n :param bool copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n \"\"\"\n if isinstance(color, six.string_types):\n color = colors.rgba(color)\n else:\n color = numpy.array(color, copy=copy)\n # TODO more checks + improve color array support\n if color.ndim == 1: # Single RGBA color\n color = colors.rgba(color)\n else: # Array of colors\n assert color.ndim == 2\n\n self._color = color\n self._updated(ItemChangedType.COLOR)\n\n\nclass YAxisMixIn(ItemMixInBase):\n \"\"\"Mix-in class for item with yaxis\"\"\"\n\n _DEFAULT_YAXIS = 'left'\n \"\"\"Default Y axis the item belongs to\"\"\"\n\n def __init__(self):\n self._yaxis = self._DEFAULT_YAXIS\n\n def getYAxis(self):\n \"\"\"Returns the Y axis this curve belongs to.\n\n Either 'left' or 'right'.\n\n :rtype: str\n \"\"\"\n return self._yaxis\n\n def setYAxis(self, yaxis):\n \"\"\"Set the Y axis this curve belongs to.\n\n :param str yaxis: 'left' or 'right'\n \"\"\"\n yaxis = str(yaxis)\n assert yaxis in ('left', 'right')\n if yaxis != self._yaxis:\n self._yaxis = yaxis\n self._updated(ItemChangedType.YAXIS)\n\n\nclass FillMixIn(ItemMixInBase):\n \"\"\"Mix-in class for item with fill\"\"\"\n\n def __init__(self):\n self._fill = False\n\n def isFill(self):\n \"\"\"Returns whether the item is filled or not.\n\n :rtype: bool\n \"\"\"\n return self._fill\n\n def setFill(self, fill):\n \"\"\"Set whether to fill the item or not.\n\n :param bool fill:\n \"\"\"\n fill = bool(fill)\n if fill != self._fill:\n self._fill = fill\n self._updated(ItemChangedType.FILL)\n\n\nclass AlphaMixIn(ItemMixInBase):\n \"\"\"Mix-in class for item with opacity\"\"\"\n\n def __init__(self):\n self._alpha = 1.\n\n def getAlpha(self):\n \"\"\"Returns the opacity of the item\n\n :rtype: float in [0, 1.]\n \"\"\"\n return self._alpha\n\n def setAlpha(self, alpha):\n \"\"\"Set the opacity of the item\n\n .. note::\n\n If the colormap already has some transparency, this alpha\n adds additional transparency. The alpha channel of the colormap\n is multiplied by this value.\n\n :param alpha: Opacity of the item, between 0 (full transparency)\n and 1. (full opacity)\n :type alpha: float\n \"\"\"\n alpha = float(alpha)\n alpha = max(0., min(alpha, 1.)) # Clip alpha to [0., 1.] range\n if alpha != self._alpha:\n self._alpha = alpha\n self._updated(ItemChangedType.ALPHA)\n\n\nclass ComplexMixIn(ItemMixInBase):\n \"\"\"Mix-in class for complex data mode\"\"\"\n\n _SUPPORTED_COMPLEX_MODES = None\n \"\"\"Override to only support a subset of all ComplexMode\"\"\"\n\n class ComplexMode(_Enum):\n \"\"\"Identify available display mode for complex\"\"\"\n NONE = 'none'\n ABSOLUTE = 'amplitude'\n PHASE = 'phase'\n REAL = 'real'\n IMAGINARY = 'imaginary'\n AMPLITUDE_PHASE = 'amplitude_phase'\n LOG10_AMPLITUDE_PHASE = 'log10_amplitude_phase'\n SQUARE_AMPLITUDE = 'square_amplitude'\n\n def __init__(self):\n self.__complex_mode = self.ComplexMode.ABSOLUTE\n\n def getComplexMode(self):\n \"\"\"Returns the current complex visualization mode.\n\n :rtype: ComplexMode\n \"\"\"\n return self.__complex_mode\n\n def setComplexMode(self, mode):\n \"\"\"Set the complex visualization mode.\n\n :param ComplexMode mode: The visualization mode in:\n 'real', 'imaginary', 'phase', 'amplitude'\n :return: True if value was set, False if is was already set\n :rtype: bool\n \"\"\"\n mode = self.ComplexMode.from_value(mode)\n assert mode in self.supportedComplexModes()\n\n if mode != self.__complex_mode:\n self.__complex_mode = mode\n self._updated(ItemChangedType.COMPLEX_MODE)\n return True\n else:\n return False\n\n def _convertComplexData(self, data, mode=None):\n \"\"\"Convert complex data to the specific mode.\n\n :param Union[ComplexMode,None] mode:\n The kind of value to compute.\n If None (the default), the current complex mode is used.\n :return: The converted dataset\n :rtype: Union[numpy.ndarray[float],None]\n \"\"\"\n if data is None:\n return None\n\n if mode is None:\n mode = self.getComplexMode()\n\n if mode is self.ComplexMode.REAL:\n return numpy.real(data)\n elif mode is self.ComplexMode.IMAGINARY:\n return numpy.imag(data)\n elif mode is self.ComplexMode.ABSOLUTE:\n return numpy.absolute(data)\n elif mode is self.ComplexMode.PHASE:\n return numpy.angle(data)\n elif mode is self.ComplexMode.SQUARE_AMPLITUDE:\n return numpy.absolute(data) ** 2\n else:\n raise ValueError('Unsupported conversion mode: %s', str(mode))\n\n @classmethod\n def supportedComplexModes(cls):\n \"\"\"Returns the list of supported complex visualization modes.\n\n See :class:`ComplexMode` and :meth:`setComplexMode`.\n\n :rtype: List[ComplexMode]\n \"\"\"\n if cls._SUPPORTED_COMPLEX_MODES is None:\n return cls.ComplexMode.members()\n else:\n return cls._SUPPORTED_COMPLEX_MODES\n\n\nclass ScatterVisualizationMixIn(ItemMixInBase):\n \"\"\"Mix-in class for scatter plot visualization modes\"\"\"\n\n _SUPPORTED_SCATTER_VISUALIZATION = None\n \"\"\"Allows to override supported Visualizations\"\"\"\n\n @enum.unique\n class Visualization(_Enum):\n \"\"\"Different modes of scatter plot visualizations\"\"\"\n\n POINTS = 'points'\n \"\"\"Display scatter plot as a point cloud\"\"\"\n\n LINES = 'lines'\n \"\"\"Display scatter plot as a wireframe.\n\n This is based on Delaunay triangulation\n \"\"\"\n\n SOLID = 'solid'\n \"\"\"Display scatter plot as a set of filled triangles.\n\n This is based on Delaunay triangulation\n \"\"\"\n\n REGULAR_GRID = 'regular_grid'\n \"\"\"Display scatter plot as an image.\n\n It expects the points to be the intersection of a regular grid,\n and the order of points following that of an image.\n First line, then second one, and always in the same direction\n (either all lines from left to right or all from right to left).\n \"\"\"\n\n IRREGULAR_GRID = 'irregular_grid'\n \"\"\"Display scatter plot as contiguous quadrilaterals.\n\n It expects the points to be the intersection of an irregular grid,\n and the order of points following that of an image.\n First line, then second one, and always in the same direction\n (either all lines from left to right or all from right to left).\n \"\"\"\n\n @enum.unique\n class VisualizationParameter(_Enum):\n \"\"\"Different parameter names for scatter plot visualizations\"\"\"\n\n GRID_MAJOR_ORDER = 'grid_major_order'\n \"\"\"The major order of points in the regular grid.\n\n Either 'row' (row-major, fast X) or 'column' (column-major, fast Y).\n \"\"\"\n\n GRID_BOUNDS = 'grid_bounds'\n \"\"\"The expected range in data coordinates of the regular grid.\n\n A 2-tuple of 2-tuple: (begin (x, y), end (x, y)).\n This provides the data coordinates of the first point and the expected\n last on.\n As for `GRID_SHAPE`, this can be wider than the current data.\n \"\"\"\n\n GRID_SHAPE = 'grid_shape'\n \"\"\"The expected size of the regular grid (height, width).\n\n The given shape can be wider than the number of points,\n in which case the grid is not fully filled.\n \"\"\"\n\n def __init__(self):\n self.__visualization = self.Visualization.POINTS\n self.__parameters = dict( # Init parameters to None\n (parameter, None) for parameter in self.VisualizationParameter)\n\n @classmethod\n def supportedVisualizations(cls):\n \"\"\"Returns the list of supported scatter visualization modes.\n\n See :meth:`setVisualization`\n\n :rtype: List[Visualization]\n \"\"\"\n if cls._SUPPORTED_SCATTER_VISUALIZATION is None:\n return cls.Visualization.members()\n else:\n return cls._SUPPORTED_SCATTER_VISUALIZATION\n\n def setVisualization(self, mode):\n \"\"\"Set the scatter plot visualization mode to use.\n\n See :class:`Visualization` for all possible values,\n and :meth:`supportedVisualizations` for supported ones.\n\n :param Union[str,Visualization] mode:\n The visualization mode to use.\n :return: True if value was set, False if is was already set\n :rtype: bool\n \"\"\"\n mode = self.Visualization.from_value(mode)\n assert mode in self.supportedVisualizations()\n\n if mode != self.__visualization:\n self.__visualization = mode\n\n self._updated(ItemChangedType.VISUALIZATION_MODE)\n return True\n else:\n return False\n\n def getVisualization(self):\n \"\"\"Returns the scatter plot visualization mode in use.\n\n :rtype: Visualization\n \"\"\"\n return self.__visualization\n\n def setVisualizationParameter(self, parameter, value=None):\n \"\"\"Set the given visualization parameter.\n\n :param Union[str,VisualizationParameter] parameter:\n The name of the parameter to set\n :param value: The value to use for this parameter\n Set to None to automatically set the parameter\n :raises ValueError: If parameter is not supported\n :return: True if parameter was set, False if is was already set\n :rtype: bool\n \"\"\"\n parameter = self.VisualizationParameter.from_value(parameter)\n\n if self.__parameters[parameter] != value:\n self.__parameters[parameter] = value\n self._updated(ItemChangedType.VISUALIZATION_MODE)\n return True\n return False\n\n def getVisualizationParameter(self, parameter):\n \"\"\"Returns the value of the given visualization parameter.\n\n This method returns the parameter as set by\n :meth:`setVisualizationParameter`.\n\n :param parameter: The name of the parameter to retrieve\n :returns: The value previously set or None if automatically set\n :raises ValueError: If parameter is not supported\n \"\"\"\n if parameter not in self.VisualizationParameter:\n raise ValueError(\"parameter not supported: %s\", parameter)\n\n return self.__parameters[parameter]\n\n def getCurrentVisualizationParameter(self, parameter):\n \"\"\"Returns the current value of the given visualization parameter.\n\n If the parameter was set by :meth:`setVisualizationParameter` to\n a value that is not None, this value is returned;\n else the current value that is automatically computed is returned.\n\n :param parameter: The name of the parameter to retrieve\n :returns: The current value (either set or automatically computed)\n :raises ValueError: If parameter is not supported\n \"\"\"\n # Override in subclass to provide automatically computed parameters\n return self.getVisualizationParameter(parameter)\n\n\nclass PointsBase(Item, SymbolMixIn, AlphaMixIn):\n \"\"\"Base class for :class:`Curve` and :class:`Scatter`\"\"\"\n # note: _logFilterData must be overloaded if you overload\n # getData to change its signature\n\n _DEFAULT_Z_LAYER = 1\n \"\"\"Default overlay layer for points,\n on top of images.\"\"\"\n\n def __init__(self):\n Item.__init__(self)\n SymbolMixIn.__init__(self)\n AlphaMixIn.__init__(self)\n self._x = ()\n self._y = ()\n self._xerror = None\n self._yerror = None\n\n # Store filtered data for x > 0 and/or y > 0\n self._filteredCache = {}\n self._clippedCache = {}\n\n # Store bounds depending on axes filtering >0:\n # key is (isXPositiveFilter, isYPositiveFilter)\n self._boundsCache = {}\n\n @staticmethod\n def _logFilterError(value, error):\n \"\"\"Filter/convert error values if they go <= 0.\n\n Replace error leading to negative values by nan\n\n :param numpy.ndarray value: 1D array of values\n :param numpy.ndarray error:\n Array of errors: scalar, N, Nx1 or 2xN or None.\n :return: Filtered error so error bars are never negative\n \"\"\"\n if error is not None:\n # Convert Nx1 to N\n if error.ndim == 2 and error.shape[1] == 1 and len(value) != 1:\n error = numpy.ravel(error)\n\n # Supports error being scalar, N or 2xN array\n valueMinusError = value - numpy.atleast_2d(error)[0]\n errorClipped = numpy.isnan(valueMinusError)\n mask = numpy.logical_not(errorClipped)\n errorClipped[mask] = valueMinusError[mask] <= 0\n\n if numpy.any(errorClipped): # Need filtering\n\n # expand errorbars to 2xN\n if error.size == 1: # Scalar\n error = numpy.full(\n (2, len(value)), error, dtype=numpy.float)\n\n elif error.ndim == 1: # N array\n newError = numpy.empty((2, len(value)),\n dtype=numpy.float)\n newError[0, :] = error\n newError[1, :] = error\n error = newError\n\n elif error.size == 2 * len(value): # 2xN array\n error = numpy.array(\n error, copy=True, dtype=numpy.float)\n\n else:\n _logger.error(\"Unhandled error array\")\n return error\n\n error[0, errorClipped] = numpy.nan\n\n return error\n\n def _getClippingBoolArray(self, xPositive, yPositive):\n \"\"\"Compute a boolean array to filter out points with negative\n coordinates on log axes.\n\n :param bool xPositive: True to filter arrays according to X coords.\n :param bool yPositive: True to filter arrays according to Y coords.\n :rtype: boolean numpy.ndarray\n \"\"\"\n assert xPositive or yPositive\n if (xPositive, yPositive) not in self._clippedCache:\n xclipped, yclipped = False, False\n\n if xPositive:\n x = self.getXData(copy=False)\n with numpy.errstate(invalid='ignore'): # Ignore NaN warnings\n xclipped = x <= 0\n\n if yPositive:\n y = self.getYData(copy=False)\n with numpy.errstate(invalid='ignore'): # Ignore NaN warnings\n yclipped = y <= 0\n\n self._clippedCache[(xPositive, yPositive)] = \\\n numpy.logical_or(xclipped, yclipped)\n return self._clippedCache[(xPositive, yPositive)]\n\n def _logFilterData(self, xPositive, yPositive):\n \"\"\"Filter out values with x or y <= 0 on log axes\n\n :param bool xPositive: True to filter arrays according to X coords.\n :param bool yPositive: True to filter arrays according to Y coords.\n :return: The filter arrays or unchanged object if filtering not needed\n :rtype: (x, y, xerror, yerror)\n \"\"\"\n x = self.getXData(copy=False)\n y = self.getYData(copy=False)\n xerror = self.getXErrorData(copy=False)\n yerror = self.getYErrorData(copy=False)\n\n if xPositive or yPositive:\n clipped = self._getClippingBoolArray(xPositive, yPositive)\n\n if numpy.any(clipped):\n # copy to keep original array and convert to float\n x = numpy.array(x, copy=True, dtype=numpy.float)\n x[clipped] = numpy.nan\n y = numpy.array(y, copy=True, dtype=numpy.float)\n y[clipped] = numpy.nan\n\n if xPositive and xerror is not None:\n xerror = self._logFilterError(x, xerror)\n\n if yPositive and yerror is not None:\n yerror = self._logFilterError(y, yerror)\n\n return x, y, xerror, yerror\n\n def _getBounds(self):\n if self.getXData(copy=False).size == 0: # Empty data\n return None\n\n plot = self.getPlot()\n if plot is not None:\n xPositive = plot.getXAxis()._isLogarithmic()\n yPositive = plot.getYAxis()._isLogarithmic()\n else:\n xPositive = False\n yPositive = False\n\n # TODO bounds do not take error bars into account\n if (xPositive, yPositive) not in self._boundsCache:\n # use the getData class method because instance method can be\n # overloaded to return additional arrays\n data = PointsBase.getData(self, copy=False, displayed=True)\n if len(data) == 5:\n # hack to avoid duplicating caching mechanism in Scatter\n # (happens when cached data is used, caching done using\n # Scatter._logFilterData)\n x, y, _xerror, _yerror = data[0], data[1], data[3], data[4]\n else:\n x, y, _xerror, _yerror = data\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=RuntimeWarning)\n # Ignore All-NaN slice encountered\n self._boundsCache[(xPositive, yPositive)] = (\n numpy.nanmin(x),\n numpy.nanmax(x),\n numpy.nanmin(y),\n numpy.nanmax(y)\n )\n return self._boundsCache[(xPositive, yPositive)]\n\n def _getCachedData(self):\n \"\"\"Return cached filtered data if applicable,\n i.e. if any axis is in log scale.\n Return None if caching is not applicable.\"\"\"\n plot = self.getPlot()\n if plot is not None:\n xPositive = plot.getXAxis()._isLogarithmic()\n yPositive = plot.getYAxis()._isLogarithmic()\n if xPositive or yPositive:\n # At least one axis has log scale, filter data\n if (xPositive, yPositive) not in self._filteredCache:\n self._filteredCache[(xPositive, yPositive)] = \\\n self._logFilterData(xPositive, yPositive)\n return self._filteredCache[(xPositive, yPositive)]\n return None\n\n def getData(self, copy=True, displayed=False):\n \"\"\"Returns the x, y values of the curve points and xerror, yerror\n\n :param bool copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :param bool displayed: True to only get curve points that are displayed\n in the plot. Default: False\n Note: If plot has log scale, negative points\n are not displayed.\n :returns: (x, y, xerror, yerror)\n :rtype: 4-tuple of numpy.ndarray\n \"\"\"\n if displayed: # filter data according to plot state\n cached_data = self._getCachedData()\n if cached_data is not None:\n return cached_data\n\n return (self.getXData(copy),\n self.getYData(copy),\n self.getXErrorData(copy),\n self.getYErrorData(copy))\n\n def getXData(self, copy=True):\n \"\"\"Returns the x coordinates of the data points\n\n :param copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :rtype: numpy.ndarray\n \"\"\"\n return numpy.array(self._x, copy=copy)\n\n def getYData(self, copy=True):\n \"\"\"Returns the y coordinates of the data points\n\n :param copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :rtype: numpy.ndarray\n \"\"\"\n return numpy.array(self._y, copy=copy)\n\n def getXErrorData(self, copy=True):\n \"\"\"Returns the x error of the points\n\n :param copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :rtype: numpy.ndarray, float or None\n \"\"\"\n if isinstance(self._xerror, numpy.ndarray):\n return numpy.array(self._xerror, copy=copy)\n else:\n return self._xerror # float or None\n\n def getYErrorData(self, copy=True):\n \"\"\"Returns the y error of the points\n\n :param copy: True (Default) to get a copy,\n False to use internal representation (do not modify!)\n :rtype: numpy.ndarray, float or None\n \"\"\"\n if isinstance(self._yerror, numpy.ndarray):\n return numpy.array(self._yerror, copy=copy)\n else:\n return self._yerror # float or None\n\n def setData(self, x, y, xerror=None, yerror=None, copy=True):\n \"\"\"Set the data of the curve.\n\n :param numpy.ndarray x: The data corresponding to the x coordinates.\n :param numpy.ndarray y: The data corresponding to the y coordinates.\n :param xerror: Values with the uncertainties on the x values\n :type xerror: A float, or a numpy.ndarray of float32.\n If it is an array, it can either be a 1D array of\n same length as the data or a 2D array with 2 rows\n of same length as the data: row 0 for positive errors,\n row 1 for negative errors.\n :param yerror: Values with the uncertainties on the y values.\n :type yerror: A float, or a numpy.ndarray of float32. See xerror.\n :param bool copy: True make a copy of the data (default),\n False to use provided arrays.\n \"\"\"\n x = numpy.array(x, copy=copy)\n y = numpy.array(y, copy=copy)\n assert len(x) == len(y)\n assert x.ndim == y.ndim == 1\n\n if xerror is not None:\n if isinstance(xerror, abc.Iterable):\n xerror = numpy.array(xerror, copy=copy)\n else:\n xerror = float(xerror)\n if yerror is not None:\n if isinstance(yerror, abc.Iterable):\n yerror = numpy.array(yerror, copy=copy)\n else:\n yerror = float(yerror)\n # TODO checks on xerror, yerror\n self._x, self._y = x, y\n self._xerror, self._yerror = xerror, yerror\n\n self._boundsCache = {} # Reset cached bounds\n self._filteredCache = {} # Reset cached filtered data\n self._clippedCache = {} # Reset cached clipped bool array\n\n # TODO hackish data range implementation\n if self.isVisible():\n plot = self.getPlot()\n if plot is not None:\n plot._invalidateDataRange()\n self._updated(ItemChangedType.DATA)\n\n\nclass BaselineMixIn(object):\n \"\"\"Base class for Baseline mix-in\"\"\"\n def __init__(self, baseline=None):\n self._baseline = baseline\n\n def _setBaseline(self, baseline):\n \"\"\"\n Set baseline value\n\n :param baseline: baseline value(s)\n :type: Union[None,float,numpy.ndarray]\n \"\"\"\n if (isinstance(baseline, abc.Iterable)):\n baseline = numpy.array(baseline)\n self._baseline = baseline\n\n def getBaseline(self, copy=True):\n \"\"\"\n\n :param bool copy:\n :return: histogram baseline\n :rtype: Union[None,float,numpy.ndarray]\n \"\"\"\n if isinstance(self._baseline, numpy.ndarray):\n return numpy.array(self._baseline, copy=True)\n else:\n return self._baseline\n" ]
[ [ "numpy.nanmax", "numpy.logical_not", "numpy.imag", "numpy.absolute", "numpy.isfinite", "numpy.isnan", "numpy.nanmin", "numpy.logical_or", "numpy.atleast_2d", "numpy.real", "numpy.any", "numpy.errstate", "numpy.ravel", "numpy.angle", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JoeshpCheung/trans_models
[ "00f3f640bc065fc4a69fe29839ff1b405f3d707c" ]
[ "cnn_tf_keras.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2021 jasoncheung <jasoncheung@iZwz95ffbqqbe9pkek5f3tZ>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\n\n\"\"\"\nimport re\nimport json\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow.keras.backend as K\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nimport logging\n\n\n# 文本预处理规则\ndef extract(s):\n result = re.sub('回复(.*?):', '', s)\n return result\n\n\ndef corpus2label(datas):\n punc = r'~`!#$%^&*()_+-=|\\\\\\';\":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'\n datas = [extract(i) for i in datas]\n datas = [i.replace(' ', '').replace('\\t', '').strip() for i in datas]\n datas = [re.sub(r\"[%s]+\" % punc, \"\", i) for i in datas]\n datas = [[char2idx[i] if i in char2idx else char2idx['<UNK>'] for i in j] for j in datas]\n datas = tf.keras.preprocessing.sequence.pad_sequences(datas, maxlen=MAX_LEN, padding='post', truncating='post')\n print('datas.shape', datas.shape)\n\n return datas\n\n\ndef Cnn_softmax(lens):# CNN best for ToB\n inputs = tf.keras.Input(shape=(lens, ), )\n embed = tf.keras.layers.Embedding(input_dim=len(char2idx), output_dim=EMBEDDING_DIM)(inputs)\n cnn_layer = tf.keras.layers.Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)\n pooling_layer = tf.keras.layers.MaxPool1D(pool_size=46)(cnn_layer)\n flatten_layer = tf.keras.layers.Flatten()(pooling_layer)\n dropout = tf.keras.layers.Dropout(0.3)(flatten_layer)\n y = tf.keras.layers.Dense(1, activation='softmax')(dropout)\n model = tf.keras.Model(inputs=inputs, outputs=[y])\n print(model.summary())\n return model \n\n\ndef binary_focal_loss(gamma=2, alpha=0.25):\n \"\"\"\n Binary form of focal loss.\n 适用于二分类问题的focal loss\n\n focal_loss(p_t) = -alpha_t * (1 - p_t)**gamma * log(p_t)\n where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0, respectively.\n References:\n https://arxiv.org/pdf/1708.02002.pdf\n Usage:\n model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)], metrics=[\"accuracy\"], optimizer=adam)\n \"\"\"\n alpha = tf.constant(alpha, dtype=tf.float32)\n gamma = tf.constant(gamma, dtype=tf.float32)\n\n def binary_focal_loss_fixed(y_true, y_pred):\n \"\"\"\n y_true shape need be (None,1)\n y_pred need be compute after sigmoid\n \"\"\"\n y_true = tf.cast(y_true, tf.float32)\n alpha_t = y_true * alpha + (K.ones_like(y_true) - y_true) * (1 - alpha)\n\n p_t = y_true * y_pred + (K.ones_like(y_true) - y_true) * (K.ones_like(y_true) - y_pred) + K.epsilon()\n focal_loss = - alpha_t * K.pow((K.ones_like(y_true) - p_t), gamma) * K.log(p_t)\n return K.mean(focal_loss)\n\n return binary_focal_loss_fixed\n\n\nclass JasonTools(object):\n def __init__(self, x_train, y_train, x_test, y_test):\n self.x_train = x_train\n self.y_train = y_train\n self.x_test = x_test\n self.y_test = y_test\n\n def train_model(self, model, model_path, original_model):\n # model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n model.compile(optimizer='adam', loss=binary_focal_loss(gamma=2, alpha=0.25), metrics=['accuracy'])\n checkpoint = tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss', mode='min', verbose=1,\n save_best_only=True, save_weights_only=1, period=1)\n earlystop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=8, verbose=0, mode='min',\n restore_best_weights=True)\n model_history = model.fit(self.x_train, self.y_train, shuffle=True, epochs=EPOCHS, validation_split=0.1,\n callbacks=[checkpoint, earlystop])\n\n model = original_model\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n model.load_weights(model_path)\n test_loss, test_acc = model.evaluate(self.x_test, self.y_test, verbose=0)\n\n logging.info('best_model_path: %s' % model_path)\n logging.info('test_loss: %.3f - test_acc: %.3f' % (test_loss, test_acc))\n self.cal_pr(model, self.x_test, self.y_test)\n\n return model_history\n\n def finetune_model(self, model, model_path, x_datas, y_datas, class_weight=None):\n model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.0005), loss='binary_crossentropy', metrics=['accuracy'])\n checkpoint = tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss',\n mode='min', verbose=1,\n save_best_only=True, save_weights_only=1, period=1)\n earlystop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=8, verbose=0, mode='min',\n restore_best_weights=True)\n if class_weight:\n model.fit(x_datas, y_datas, shuffle=True, epochs=EPOCHS,\n validation_split=0.1,\n callbacks=[checkpoint, earlystop], class_weight=class_weight, batch_size=8)\n else:\n model.fit(x_datas, y_datas, shuffle=True, epochs=EPOCHS,\n validation_split=0.1,\n callbacks=[checkpoint, earlystop], batch_size=8)\n\n model.load_weights(model_path)\n test_loss, test_acc = model.evaluate(self.x_test, self.y_test, verbose=0)\n logging.info('best_finetune_model_path: %s' % model_path)\n logging.info('best_model_path: %s' % model_path)\n logging.info('test_loss: %.3f - test_acc: %.3f' % (test_loss, test_acc))\n self.cal_pr(model, self.x_test, self.y_test)\n\n @staticmethod\n def cal_pr(model, x_test, y_test):\n pred = model.predict(x_test)\n pred = [i[0] for i in pred]\n pred = [1 if i >= 0.5 else 0 for i in pred]\n true = y_test\n # pred = list(np.argmax(pred, axis=1))\n # true = list(np.argmax(self.y_test, axis=1))\n report = classification_report(true, pred)\n logging.info('classification_report: \\n')\n logging.info(report)\n logging.info('confusion_matrix: \\n')\n logging.info(confusion_matrix(true, pred))\n tn, fp, fn, tp = confusion_matrix(true, pred).ravel()\n logging.info('tn: %d, fp: %d, fn: %d, tp: %d' % (tn, fp, fn, tp))\n return report\n @staticmethod\n def plot_history(histories, path='model/acc_char.png', key='accuracy'):\n plt.figure(figsize=(16, 10))\n\n for name, history in histories:\n val = plt.plot(history.epoch, history.history['val_' + key], '--', label=name.title() + ' Val')\n plt.plot(history.epoch, history.history[key], color=val[0].get_color(), label=name.title() + ' Train')\n plt.xlabel('Epochs')\n plt.ylabel(key.replace('_', ' ').title())\n plt.legend()\n\n plt.xlim([0, max(history.epoch)])\n plt.savefig(path)\n\n\n# if __name__ == '__main__':\n# load datas\nMAX_LEN = 100\nEMBEDDING_DIM = 100\nEPOCHS = 20\n\npath_datas = '/home/jasoncheung/project/trans/trans_datas/weibo_senti_100k.csv'\ndf = pd.read_csv(path_datas)\ndatas = df.review.tolist()\nlabels = df.label.tolist()\nlabels = np.array(labels)\n\ndir_path = '/home/jasoncheung/project/trans/trans_models/'\nidx2char = json.load(open(dir_path+'idx2char.json', 'r'))\nchar2idx = json.load(open(dir_path+'char2idx.json', 'r'))\n\n\nprint('before: ', len(datas), datas[0])\ndatas = corpus2label(datas)\nprint('after: ', len(datas), datas[0])\n\n\ntrain_datas, test_datas, train_labels, test_labels = train_test_split(datas, labels, test_size=0.1)\n\n# load model\ntrainer = JasonTools(train_datas, train_labels, test_datas, test_labels)\nmodel = Cnn_softmax(MAX_LEN)\n\nmodel_path = './model/cnn_char/cnn_char.h5'\nrecord = []\n\nrecord.append(('cnn_char', trainer.train_model(model, model_path, Cnn_softmax(MAX_LEN))))\n\ntrainer.plot_history(record, path='model/acc_char_emotion_positive.png')\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.cast", "sklearn.metrics.confusion_matrix", "tensorflow.keras.backend.ones_like", "tensorflow.keras.backend.log", "sklearn.metrics.classification_report", "tensorflow.keras.layers.Dropout", "pandas.read_csv", "tensorflow.keras.Input", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.layers.Flatten", "matplotlib.pyplot.figure", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.layers.Dense", "sklearn.model_selection.train_test_split", "tensorflow.keras.Model", "matplotlib.pyplot.savefig", "numpy.array", "tensorflow.constant", "tensorflow.keras.layers.MaxPool1D", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.backend.mean", "matplotlib.pyplot.xlabel", "tensorflow.keras.backend.epsilon", "tensorflow.keras.preprocessing.sequence.pad_sequences" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
Sudeepdharam/WEATHER_PREDICTION
[ "2df27c5faa386710c9937daa8da386175019692b" ]
[ "train.py" ]
[ "from keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.utils import np_utils\nimport numpy as np\n\nseed = 7\nnp.random.seed(seed)\ndata = np.loadtxt(\"data.txt\")\nX_train = data[:4646,:12]\nY_train = data[:4646,12:13]\nX_test = data[4646:6936,:12]\nY_test = data[4646:6936,12:13]\nlen_y_train = len(Y_train)\nlen_y_test = len(Y_test)\n\nprint(len(Y_train))\nprint(len(Y_test))\n\nfor i in range(0,len_y_train):\n if(Y_train[i]==1000):\n Y_train[i] = 3\n Y_train[i] = int(Y_train[i])\n elif(Y_train[i]==100):\n Y_train[i] = 2\n Y_train[i] = int(Y_train[i])\n elif(Y_train[i]==10):\n Y_train[i] = 1\n Y_train[i] = int(Y_train[i])\n elif(Y_train[i]==1):\n Y_train[i] = 0\n Y_train[i] = int(Y_train[i])\n\nfor i in range(0,len_y_test):\n if(Y_test[i]==1000):\n Y_test[i] = 3\n Y_test[i] = int(Y_test[i])\n elif(Y_test[i]==100):\n Y_test[i] = 2\n Y_test[i] = int(Y_test[i])\n elif(Y_test[i]==10):\n Y_test[i] = 1\n Y_test[i] = int(Y_test[i])\n elif(Y_test[i]==1):\n Y_test[i] = 0\n Y_test[i] = int(Y_test[i])\n\nprint(Y_train)\nprint(Y_test)\n\nY_train = Y_train.astype('int32')\nY_train = np_utils.to_categorical(Y_train,4)\nY_test = Y_test.astype('int32')\nY_test = np_utils.to_categorical(Y_test,4)\nmodel = Sequential()\nmodel.add(Dense(100, input_dim=12, init='uniform', activation='relu'))\nmodel.add(Dense(80, init='uniform', activation='relu'))\nmodel.add(Dense(60, init='uniform', activation='relu'))\nmodel.add(Dense(60, init='uniform', activation='relu'))\nmodel.add(Dense(4))\nmodel.add(Activation('softmax'))\nmodel.summary()\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.fit(X_train, Y_train, nb_epoch=150, batch_size=10, verbose=2, validation_data=(X_test,Y_test))\nscores = model.evaluate(X_test, Y_test, verbose=0)\n\nprint(\"\\n\")\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\njson_string = model.to_json()\nopen('model_architecture.json', 'w').write(json_string)\nmodel.save_weights('weights.h5',overwrite=True)\n" ]
[ [ "numpy.loadtxt", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mathanraj-Sharma/OpenCV_Sample_Codes
[ "a20710fa05d7817b9c4c78acc64b852b0cde7583" ]
[ "05_translation_transformation.py" ]
[ "import cv2\nimport numpy as np\nimport argparse\n\n\nap = argparse.ArgumentParser()\nap.add_argument('-i','-image', required = True, help = 'Enter the path for the image')\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args['i'])\n\n\ndef translate(image, tx, ty):\n\t\"\"\"\n\tThis function will create a translation matrix using tx, ty and return a translated image\n\tthink interms of matrix multiplication for translation\n\t-- -- -- --\n\t| 1 0 tx | | x |\n\t| 0 1 ty | * | y |\n\t| 0 0 1 | | 1 |\n\t-- -- -- --\n\t\"\"\"\n\tM = np.float32([[1,0,tx], [0,1,ty]])\n\ttranslated_image = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))\n\treturn translated_image\n\n\nif __name__ == '__main__':\n\tcv2.imshow('Original Image', image)\n\tcv2.waitKey(0)\n\n\ttest_translation = translate(image, 50, 100)\n\tcv2.imshow('Translated Image', test_translation)\n\tcv2.waitKey(0)\n\n\t\"\"\" \n\t\tSince the origin of image is on the left top corner\n\t\t\n\t\tNegative value for ty will move the image up\n\t\tPositive value for ty will move the image down\n\n\t\tNegative value for tx will move the image left\n\t\tPositive value for tx will move the image right\n\t\"\"\"\n" ]
[ [ "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shaochenze/RSI-NAT
[ "dcc5ae2a4fbfc5d9ba8f3bf51dc6aacd284c74e5" ]
[ "model.py" ]
[ "import numpy as np\nimport ipdb\nimport torch\nfrom torch import nn\nimport torch.nn.init as init\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable, Function\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\nfrom collections import Counter\n\nimport math\nimport random\n\nfrom utils import computeGLEU, masked_sort, unsorted, make_decoder_masks, query_trg_len_dic,my_sentence_gleu\nfrom nltk.translate.gleu_score import sentence_gleu, corpus_gleu\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n\nINF = 1e10\nTINY = 1e-9\ndef shape(targets, target_lens):\n\n list_targets = []\n begin = 0\n end = 0\n for length in target_lens:\n end += length\n list_targets.append([str(index) for index in targets[begin:end]])\n begin += length\n\n return list_targets\ndef parallel_gleu( inputs):\n (sample_idx, list_samples, list_targets, count,target_lens) = inputs\n l_samples = shape(sample_idx,target_lens)\n gleus = []\n for j in range(count):\n for k in range(len(l_samples[j])):\n t = l_samples[j][k]\n l_samples[j][k] = list_samples[j][k]\n gleu = my_sentence_gleu([l_samples[j]], list_targets[j])\n l_samples[j][k] = t\n gleus.append(gleu)\n return gleus\n\nclass GradReverse(Function):\n @staticmethod\n def forward(ctx, x):\n return x.view_as(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output.neg()\n\ndef grad_reverse(x):\n return GradReverse.apply(x)\n\ndef positional_encodings_like(x, t=None): # hope to be differentiable\n if t is None:\n positions = torch.arange(0, x.size(-2)) # .expand(*x.size()[:2])\n if x.is_cuda:\n positions = positions.cuda(x.get_device())\n positions = Variable(positions.float())\n else:\n positions = t\n # channels\n channels = torch.arange(0, x.size(-1), 2) / x.size(-1) # 0 2 4 6 ... (256)\n if x.is_cuda:\n channels = channels.cuda(x.get_device())\n channels = 1 / (10000 ** Variable(channels))\n # get the positional encoding: batch x target_len\n encodings = positions.unsqueeze(-1) @ channels.unsqueeze(0) # batch x target_len x 256\n encodings = torch.cat([torch.sin(encodings).unsqueeze(-1), torch.cos(encodings).unsqueeze(-1)], -1)\n encodings = encodings.contiguous().view(*encodings.size()[:-2], -1) # batch x target_len x 512\n\n if encodings.ndimension() == 2:\n encodings = encodings.unsqueeze(0).expand_as(x)\n\n return encodings\n\nclass Linear(nn.Linear):\n def __init__(self, d_in, d_out, bias=True, out_norm=False):\n super().__init__(d_in, d_out, bias)\n self.out_norm = out_norm\n stdv = 1. / math.sqrt(self.weight.size(1))\n init.uniform(self.weight, -stdv, stdv)\n if bias:\n self.bias.data.zero_()\n\n def forward(self, x):\n size = x.size()\n if self.out_norm:\n weight = self.weight / (1e-6 + torch.sqrt((self.weight ** 2).sum(0, keepdim=True)))\n x_ = x / (1e-6 + torch.sqrt((x ** 2).sum(-1, keepdim=True)))\n logit_ = torch.mm(x_.contiguous().view(-1, size[-1]), weight.t()).view(*size[:-1], -1)\n if self.bias:\n logit_ = logit_ + self.bias\n return logit_\n return super().forward(\n x.contiguous().view(-1, size[-1])).view(*size[:-1], -1)\n\ndef demask(inputs, the_mask):\n # inputs: 1-D sequences\n # the_mask: batch x max-len\n outputs = Variable((the_mask == 0).long().view(-1)) # 1-D\n indices = torch.arange(0, outputs.size(0))\n if inputs.is_cuda:\n indices = indices.cuda(inputs.get_device())\n indices = indices.view(*the_mask.size()).long()\n indices = indices[the_mask]\n outputs[indices] = inputs\n return outputs.view(*the_mask.size())\n\n# F.softmax has strange default behavior, normalizing over dim 0 for 3D inputs\ndef softmax(x, T=1):\n return F.softmax(x/T, dim=-1)\n \"\"\"\n if x.dim() == 3:\n return F.softmax(x.transpose(0, 2)).transpose(0, 2)\n return F.softmax(x)\n \"\"\"\n\ndef log_softmax(x):\n if x.dim() == 3:\n return F.log_softmax(x.transpose(0, 2)).transpose(0, 2)\n return F.log_softmax(x)\n\ndef logsumexp(x, dim=-1):\n x_max = x.max(dim, keepdim=True)[0]\n return torch.log(torch.exp(x - x_max.expand_as(x)).sum(dim, keepdim=True) + TINY) + x_max\n\ndef gumbel_softmax(input, beta=0.5, tau=1.0):\n noise = input.data.new(*input.size()).uniform_()\n noise.add_(TINY).log_().neg_().add_(TINY).log_().neg_()\n return softmax((input + beta * Variable(noise)) / tau)\n\n# (4, 3, 2) @ (4, 2) -> (4, 3)\n# (4, 3) @ (4, 3, 2) -> (4, 3)\n# (4, 3, 2) @ (4, 2, 4) -> (4, 3, 4)\ndef matmul(x, y):\n if x.dim() == y.dim():\n return x @ y\n if x.dim() == y.dim() - 1:\n return (x.unsqueeze(-2) @ y).squeeze(-2)\n return (x @ y.unsqueeze(-1)).squeeze(-1)\n\ndef pad_to_match(x, y):\n x_len, y_len = x.size(1), y.size(1)\n if x_len == y_len:\n return x, y\n add_to = x if x_len < y_len else y\n fill = 1 if add_to.dim() == 2 else 0\n extra = add_to.data.new(\n x.size(0), abs(y_len - x_len), *add_to.size()[2:]).fill_(fill)\n if x_len < y_len:\n return torch.cat((x, extra), 1), y\n return x, torch.cat((y, extra), 1)\n\n# --- Top K search with PQ\ndef topK_search(logits, mask_src, N=100):\n # prepare data\n nlogP = -log_softmax(logits).data\n maxL = nlogP.size(-1)\n overmask = torch.cat([mask_src[:, :, None],\n (1 - mask_src[:, :, None]).expand(*mask_src.size(), maxL-1) * INF\n + mask_src[:, :, None]], 2)\n nlogP = nlogP * overmask\n\n batch_size, src_len, L = logits.size()\n _, R = nlogP.sort(-1)\n\n def get_score(data, index):\n # avoid all zero\n # zero_mask = (index.sum(-2) == 0).float() * INF\n return data.gather(-1, index).sum(-2)\n\n heap_scores = torch.ones(batch_size, N) * INF\n heap_inx = torch.zeros(batch_size, src_len, N).long()\n heap_scores[:, :1] = get_score(nlogP, R[:, :, :1])\n if nlogP.is_cuda:\n heap_scores = heap_scores.cuda(nlogP.get_device())\n heap_inx = heap_inx.cuda(nlogP.get_device())\n\n def span(ins):\n inds = torch.eye(ins.size(1)).long()\n if ins.is_cuda:\n inds = inds.cuda(ins.get_device())\n return ins[:, :, None].expand(ins.size(0), ins.size(1), ins.size(1)) + inds[None, :, :]\n\n # iteration starts\n for k in range(1, N):\n cur_inx = heap_inx[:, :, k-1]\n I_t = span(cur_inx).clamp(0, L-1) # B x N x N\n S_t = get_score(nlogP, R.gather(-1, I_t))\n S_t, _inx = torch.cat([heap_scores[:, k:], S_t], 1).sort(1)\n S_t[:, 1:] += ((S_t[:, 1:] - S_t[:, :-1]) == 0).float() * INF # remove duplicates\n S_t, _inx2 = S_t.sort(1)\n I_t = torch.cat([heap_inx[:, :, k:], I_t], 2).gather(\n 2, _inx.gather(1, _inx2)[:, None, :].expand(batch_size, src_len, _inx.size(-1)))\n heap_scores[:, k:] = S_t[:, :N-k]\n heap_inx[:, :, k:] = I_t[:, :, :N-k]\n\n # get the searched\n output = R.gather(-1, heap_inx)\n output = output.transpose(2, 1).contiguous().view(batch_size * N, src_len) # (B x N) x Ts\n output = Variable(output)\n mask_src = mask_src[:, None, :].expand(batch_size, N, src_len).contiguous().view(batch_size * N, src_len)\n\n return output, mask_src\n\nclass LayerNorm(nn.Module):\n\n def __init__(self, d_model, eps=1e-6):\n super().__init__()\n self.gamma = nn.Parameter(torch.ones(d_model))\n self.beta = nn.Parameter(torch.zeros(d_model))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.gamma * (x - mean) / (std + self.eps) + self.beta\n\nclass Attention(nn.Module):\n\n def __init__(self, d_key, drop_ratio, causal, diag=False):\n super().__init__()\n self.scale = math.sqrt(d_key)\n self.dropout = nn.Dropout(drop_ratio)\n self.causal = causal\n self.diag = diag\n\n def forward(self, query, key, value=None, mask=None,\n feedback=None, beta=0, tau=1, weights=None):\n dot_products = matmul(query, key.transpose(1, 2)) # batch x trg_len x trg_len\n\n if weights is not None:\n dot_products = dot_products + weights # additive bias\n\n if query.dim() == 3 and self.causal and (query.size(1) == key.size(1)):\n tri = key.data.new(key.size(1), key.size(1)).fill_(1).triu(1) * INF\n dot_products.data.sub_(tri.unsqueeze(0))\n\n if self.diag:\n inds = torch.arange(0, key.size(1)).long().view(1, 1, -1)\n if key.is_cuda:\n inds = inds.cuda(key.get_device())\n dot_products.data.scatter_(1, inds.expand(dot_products.size(0), 1, inds.size(-1)), -INF)\n # eye = key.data.new(key.size(1), key.size(1)).fill_(1).eye() * INF\n # dot_products.data.sub_(eye.unsqueeze(0))\n\n if mask is not None:\n if dot_products.dim() == 2:\n dot_products.data -= ((1 - mask) * INF)\n else:\n dot_products.data -= ((1 - mask[:, None, :]) * INF)\n\n if value is None:\n return dot_products\n\n logits = dot_products / self.scale\n probs = softmax(logits)\n\n if feedback is not None:\n feedback.append(probs.contiguous())\n\n return matmul(self.dropout(probs), value)\n\nclass MultiHead2(nn.Module):\n\n def __init__(self, d_key, d_value, n_heads, drop_ratio,\n causal=False, diag=False, use_wo=True):\n super().__init__()\n self.attention = Attention(d_key, drop_ratio, causal=causal, diag=diag)\n self.wq = Linear(d_key, d_key, bias=use_wo)\n self.wk = Linear(d_key, d_key, bias=use_wo)\n self.wv = Linear(d_value, d_value, bias=use_wo)\n if use_wo:\n self.wo = Linear(d_value, d_key, bias=use_wo)\n self.use_wo = use_wo\n self.n_heads = n_heads\n\n def forward(self, query, key, value, mask=None, feedback=None, weights=None, beta=0, tau=1):\n # query : B x T1 x D\n # key : B x T2 x D\n # value : B x T2 x D\n query, key, value = self.wq(query), self.wk(key), self.wv(value) # B x T x D\n B, Tq, D = query.size()\n _, Tk, _ = key.size()\n N = self.n_heads\n probs = []\n\n query, key, value = (x.contiguous().view(B, -1, N, D//N).transpose(2, 1).contiguous().view(B*N, -1, D//N)\n for x in (query, key, value))\n if mask is not None:\n mask = mask[:, None, :].expand(B, N, Tk).contiguous().view(B*N, -1)\n outputs = self.attention(query, key, value, mask, probs, beta, tau, weights) # (B x N) x T x (D/N)\n outputs = outputs.contiguous().view(B, N, -1, D//N).transpose(2, 1).contiguous().view(B, -1, D)\n\n if feedback is not None:\n feedback.append(probs[0].view(B, N, Tq, Tk))\n\n if self.use_wo:\n return self.wo(outputs)\n return outputs\n\nclass NonresidualBlock(nn.Module):\n\n def __init__(self, layer, d_model, d_hidden, drop_ratio, pos=0):\n super().__init__()\n self.layer = layer\n self.dropout = nn.Dropout(drop_ratio)\n self.layernorm = LayerNorm(d_model)\n self.pos = pos\n\n def forward(self, *x):\n return self.layernorm(self.dropout(self.layer(*x)))\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, layer, d_model, d_hidden, drop_ratio, pos=0):\n super().__init__()\n self.layer = layer\n self.dropout = nn.Dropout(drop_ratio)\n self.layernorm = LayerNorm(d_model)\n self.pos = pos\n\n def forward(self, *x):\n return self.layernorm(x[self.pos] + self.dropout(self.layer(*x)))\n\nclass HighwayBlock(nn.Module):\n\n def __init__(self, layer, d_model, d_hidden, drop_ratio, pos=0):\n super().__init__()\n self.layer = layer\n self.gate = FeedForward(d_model, d_hidden)\n self.dropout = nn.Dropout(drop_ratio)\n self.layernorm = LayerNorm(d_model)\n self.pos = pos\n\n def forward(self, *x):\n g = F.sigmoid(self.gate(x[self.pos]))\n return self.layernorm(x[self.pos] * g + self.dropout(self.layer(*x)) * (1 - g))\n\nclass FeedForward(nn.Module):\n\n def __init__(self, d_model, d_hidden):\n super().__init__()\n self.linear1 = Linear(d_model, d_hidden)\n self.linear2 = Linear(d_hidden, d_model)\n\n def forward(self, x):\n return self.linear2(F.relu(self.linear1(x)))\n\nclass EncoderLayer(nn.Module):\n\n def __init__(self, args):\n super().__init__()\n self.selfattn = ResidualBlock(\n MultiHead2(args.d_model, args.d_model, args.n_heads,\n args.drop_ratio, use_wo=args.use_wo),\n args.d_model, args.d_hidden, args.drop_ratio)\n self.feedforward = args.block_cls(\n FeedForward(args.d_model, args.d_hidden),\n args.d_model, args.d_hidden, args.drop_ratio )\n\n def forward(self, x, mask=None):\n x = self.selfattn(x, x, x, mask)\n x = self.feedforward(x)\n return x\n\nclass DecoderLayer(nn.Module):\n\n def __init__(self, args, causal=True, diag=False,\n positional=False):\n super().__init__()\n self.positional = positional\n self.selfattn = ResidualBlock(\n MultiHead2(args.d_model, args.d_model, args.n_heads,\n args.drop_ratio, causal=causal, diag=diag,\n use_wo=args.use_wo),\n args.d_model, args.d_hidden, args.drop_ratio)\n\n self.attention = ResidualBlock(\n MultiHead2(args.d_model, args.d_model, args.n_heads,\n args.drop_ratio, use_wo=args.use_wo),\n args.d_model, args.d_hidden, args.drop_ratio)\n\n if positional:\n self.pos_selfattn = ResidualBlock(\n MultiHead2(args.d_model, args.d_model, args.n_heads,\n args.drop_ratio, causal=causal, diag=diag,\n use_wo=args.use_wo),\n args.d_model, args.d_hidden, args.drop_ratio, pos=2)\n\n self.feedforward = args.block_cls(\n FeedForward(args.d_model, args.d_hidden),\n args.d_model, args.d_hidden, args.drop_ratio )\n\n def forward(self, x, encoding, p=None, mask_src=None, mask_trg=None, feedback=None):\n\n feedback_src = []\n feedback_trg = []\n\n x = self.selfattn(x, x, x, mask_trg, feedback_trg) #\n\n if self.positional:\n pos_encoding, weights = positional_encodings_like(x), None\n x = self.pos_selfattn(pos_encoding, pos_encoding, x, mask_trg, None, weights) # positional attention\n\n x = self.attention(x, encoding, encoding, mask_src, feedback_src)\n\n x = self.feedforward(x)\n\n if feedback is not None:\n if 'source' not in feedback:\n feedback['source'] = feedback_src\n else:\n feedback['source'] += feedback_src\n\n if 'target' not in feedback:\n feedback['target'] = feedback_trg\n else:\n feedback['target'] += feedback_trg\n return x\n\nclass Encoder(nn.Module):\n\n def __init__(self, field, args):\n super().__init__()\n\n if args.dataset != \"mscoco\":\n if args.share_embed:\n self.out = Linear(args.d_model, len(field.vocab), bias=False)\n else:\n self.embed = nn.Embedding(len(field.vocab), args.d_model)\n self.layers = nn.ModuleList(\n [EncoderLayer(args) for i in range(args.n_layers)])\n self.dropout = nn.Dropout(args.input_drop_ratio)\n if args.dataset != \"mscoco\":\n self.field = field\n self.d_model = args.d_model\n self.share_embed = args.share_embed\n self.dataset = args.dataset\n\n def forward(self, x, mask=None):\n if self.dataset != \"mscoco\":\n if self.share_embed:\n x = F.embedding(x, self.out.weight * math.sqrt(self.d_model))\n else:\n x = self.embed(x)\n x += positional_encodings_like(x)\n encoding = [x]\n\n x = self.dropout(x)\n for layer in self.layers:\n x = layer(x, mask)\n encoding.append(x)\n return encoding\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass EncoderCNN(nn.Module):\n def __init__(self, args):\n super(EncoderCNN, self).__init__()\n\n self.d_encoder = 512 # hardcoded because of resnet 512 hidden size\n self.d_model = args.d_model\n if self.d_encoder != self.d_model:\n self.conv = conv3x3(self.d_encoder, self.d_model, stride=1)\n self.bn = nn.BatchNorm2d(self.d_model)\n\n def forward(self, features):\n if self.d_encoder != self.d_model:\n return self.bn(self.conv(features))\n else:\n return features\n\nclass Decoder(nn.Module):\n\n def __init__(self, field, args, causal=True, positional=False, diag=False, out=None):\n\n super().__init__()\n\n self.layers = nn.ModuleList(\n [DecoderLayer(args, causal, diag, positional)\n for i in range(args.n_layers)])\n\n if out is None:\n self.out = Linear(args.d_model, len(field.vocab), bias=False, out_norm=args.out_norm)\n else:\n self.out = out\n\n self.dropout = nn.Dropout(args.input_drop_ratio)\n self.out_norm = args.out_norm\n self.d_model = args.d_model\n self.field = field\n self.length_ratio = args.length_ratio\n self.positional = positional\n self.enc_last = args.enc_last\n self.dataset = args.dataset\n self.length_dec = args.length_dec\n\n def forward(self, x, encoding, source_masks=None, decoder_masks=None,\n input_embeddings=False, positions=None, feedback=None):\n # x : decoder_inputs\n\n if self.out_norm:\n out_weight = self.out.weight / (1e-6 + torch.sqrt((self.out.weight ** 2).sum(0, keepdim=True)))\n else:\n out_weight = self.out.weight\n\n if not input_embeddings: # NOTE only for Transformer\n if x.ndimension() == 2:\n x = F.embedding(x, out_weight * math.sqrt(self.d_model))\n elif x.ndimension() == 3: # softmax relaxiation\n x = x @ out_weight * math.sqrt(self.d_model) # batch x len x embed_size\n\n x += positional_encodings_like(x)\n x = self.dropout(x)\n\n if self.enc_last:\n for l, layer in enumerate(self.layers):\n x = layer(x, encoding[-1], mask_src=source_masks, mask_trg=decoder_masks, feedback=feedback)\n else:\n for l, (layer, enc) in enumerate(zip(self.layers, encoding[1:])):\n x = layer(x, enc, mask_src=source_masks, mask_trg=decoder_masks, feedback=feedback)\n return x\n\n def greedy(self, encoding, mask_src=None, mask_trg=None, feedback=None):\n\n encoding = encoding[1:]\n B, T, C = encoding[0].size() # batch-size, decoding-length, size\n if self.dataset == \"mscoco\":\n T = self.length_dec\n else:\n T *= self.length_ratio\n\n outs = Variable(encoding[0].data.new(B, T + 1).long().fill_(\n self.field.vocab.stoi['<init>']))\n hiddens = [Variable(encoding[0].data.new(B, T, C).zero_())\n for l in range(len(self.layers) + 1)]\n embedW = self.out.weight * math.sqrt(self.d_model)\n hiddens[0] = hiddens[0] + positional_encodings_like(hiddens[0])\n\n eos_yet = encoding[0].data.new(B).byte().zero_()\n\n attentions = []\n\n for t in range(T):\n #torch.cuda.nvtx.mark(f'greedy:{t}')\n torch.cuda.nvtx.mark('greedy:{}'.format(t))\n hiddens[0][:, t] = self.dropout(\n hiddens[0][:, t] + F.embedding(outs[:, t], embedW))\n\n inter_attention = []\n for l in range(len(self.layers)):\n x = hiddens[l][:, :t+1]\n x = self.layers[l].selfattn(hiddens[l][:, t:t+1], x, x) # we need to make the dimension 3D\n hiddens[l + 1][:, t] = self.layers[l].feedforward(\n self.layers[l].attention(x, encoding[l], encoding[l], mask_src, inter_attention))[:, 0]\n\n inter_attention = torch.cat(inter_attention, 1)\n attentions.append(inter_attention)\n\n _, preds = self.out(hiddens[-1][:, t]).max(-1)\n preds[eos_yet] = self.field.vocab.stoi['<pad>']\n\n eos_yet = eos_yet | (preds.data == self.field.vocab.stoi['<eos>'])\n outs[:, t + 1] = preds\n if eos_yet.all():\n break\n\n if feedback is not None:\n feedback['source'] = torch.cat(attentions, 2)\n\n return outs[:, 1:t+2]\n\n def beam_search(self, encoding, mask_src=None, mask_trg=None, width=2, alpha=0.6): # width: beamsize, alpha: length-norm\n encoding = encoding[1:]\n W = width\n B, T, C = encoding[0].size()\n\n # expanding\n for i in range(len(encoding)):\n encoding[i] = encoding[i][:, None, :].expand(\n B, W, T, C).contiguous().view(B * W, T, C)\n mask_src = mask_src[:, None, :].expand(B, W, T).contiguous().view(B * W, T)\n\n T *= self.length_ratio\n outs = Variable(encoding[0].data.new(B, W, T + 1).long().fill_(\n self.field.vocab.stoi['<init>']))\n\n logps = Variable(encoding[0].data.new(B, W).float().fill_(0)) # scores\n hiddens = [Variable(encoding[0].data.new(B, W, T, C).zero_()) # decoder states: batch x beamsize x len x h\n for l in range(len(self.layers) + 1)]\n embedW = self.out.weight * math.sqrt(self.d_model)\n hiddens[0] = hiddens[0] + positional_encodings_like(hiddens[0])\n eos_yet = encoding[0].data.new(B, W).byte().zero_() # batch x beamsize, all the sentences are not finished yet.\n eos_mask = eos_yet.float().fill_(-INF)[:, :, None].expand(B, W, W)\n eos_mask[:, :, 0] = 0 # batch x beam x beam\n\n for t in range(T):\n hiddens[0][:, :, t] = self.dropout(\n hiddens[0][:, :, t] + F.embedding(outs[:, :, t], embedW))\n for l in range(len(self.layers)):\n x = hiddens[l][:, :, :t + 1].contiguous().view(B * W, -1, C)\n x = self.layers[l].selfattn(x[:, -1:, :], x, x)\n hiddens[l + 1][:, :, t] = self.layers[l].feedforward(\n self.layers[l].attention(x, encoding[l], encoding[l], mask_src)).view(\n B, W, C)\n\n # topk2_logps: scores, topk2_inds: top word index at each beam, batch x beam x beam\n topk2_logps, topk2_inds = log_softmax(\n self.out(hiddens[-1][:, :, t])).topk(W, dim=-1)\n\n # mask out the sentences which are finished\n topk2_logps = topk2_logps * Variable(eos_yet[:, :, None].float() * eos_mask + 1 - eos_yet[:, :, None].float())\n topk2_logps = topk2_logps + logps[:, :, None]\n\n if t == 0:\n logps, topk_inds = topk2_logps[:, 0].topk(W, dim=-1)\n else:\n logps, topk_inds = topk2_logps.view(B, W * W).topk(W, dim=-1)\n\n topk_beam_inds = topk_inds.div(W)\n topk_token_inds = topk2_inds.view(B, W * W).gather(1, topk_inds)\n eos_yet = eos_yet.gather(1, topk_beam_inds.data)\n\n logps = logps * (1 - Variable(eos_yet.float()) * 1 / (t + 2)).pow(alpha)\n outs = outs.gather(1, topk_beam_inds[:, :, None].expand_as(outs))\n outs[:, :, t + 1] = topk_token_inds\n topk_beam_inds = topk_beam_inds[:, :, None, None].expand_as(\n hiddens[0])\n for i in range(len(hiddens)):\n hiddens[i] = hiddens[i].gather(1, topk_beam_inds)\n eos_yet = eos_yet | (topk_token_inds.data == self.field.vocab.stoi['<eos>'])\n if eos_yet.all():\n return outs[:, 0, 1:]\n return outs[:, 0, 1:]\n\nclass Transformer(nn.Module):\n\n def __init__(self, src=None, trg=None, args=None):\n super().__init__()\n if args.dataset != \"mscoco\":\n self.is_mscoco = False\n # prepare regular translation encoder and decoder\n self.encoder = Encoder(src, args)\n self.decoder = Decoder(trg, args)\n self.field = trg\n self.share_embed = args.share_embed\n if args.share_embed:\n self.encoder.out.weight = self.decoder.out.weight\n else:\n # prepare image encoder and decoder\n self.is_mscoco = True\n mscoco_dataset = trg\n #self.encoder = EncoderCNN(args)\n self.encoder = Encoder(src, args)\n self.decoder = Decoder(mscoco_dataset, args)\n self.field = mscoco_dataset\n self.share_embed = False\n\n self.n_layers = args.n_layers\n self.d_model = args.d_model\n\n def denum(self, data, target=True):\n field = self.decoder.field if target else self.encoder.field\n return field.reverse(data.unsqueeze(0))[0]\n\n def apply_mask(self, inputs, mask, p=1):\n _mask = Variable(mask.long())\n #outputs = inputs * _mask + (1 - _mask) * p\n outputs = inputs * _mask + (torch.mul(_mask, -1) + 1 ) * p\n return outputs\n\n def apply_mask_cost(self, loss, mask, batched=False):\n loss.data *= mask\n cost = loss.sum() / (mask.sum() + TINY)\n\n if not batched:\n return cost\n\n loss = loss.sum(1, keepdim=True) / (TINY + Variable(mask).sum(1, keepdim=True))\n return cost, loss\n\n def output_decoding(self, outputs, unbpe=True):\n field, text = outputs\n if field is 'src':\n return self.encoder.field.reverse(text.data, unbpe)\n else:\n return self.decoder.field.reverse(text.data, unbpe)\n\n def prepare_sources(self, batch, masks=None):\n masks = self.prepare_masks(batch.src) if masks is None else masks\n return batch.src, masks\n\n def encoding(self, encoder_inputs, source_masks=None):\n if self.is_mscoco:\n return self.encoder(encoder_inputs)\n else:\n return self.encoder(encoder_inputs, source_masks)\n\n def prepare_targets(self, batch, targets=None, masks=None):\n if targets is None:\n targets = batch.trg[:, 1:].contiguous()\n masks = self.prepare_masks(targets) if masks is None else masks\n return targets, masks\n\n def prepare_decoder_inputs(self, trg_inputs, inputs=None, masks=None, bp=1.00):\n decoder_inputs = trg_inputs[:, :-1].contiguous()\n decoder_masks = self.prepare_masks(trg_inputs[:, 1:], bp=bp) if masks is None else masks\n # NOTE why [1:], not [:-1]?\n\n return decoder_inputs, decoder_masks\n\n def change_bp_masks(self, masks, bp):\n input_lengths = np.int32( masks.sum(1).cpu().numpy() )\n batch_size, seq_len = masks.size()\n add_pad = [ int( math.floor( each_len * ( (1 / bp) - 1.0 ) ) ) for each_len in input_lengths]\n if max(add_pad) > 0 :\n add_mask = torch.zeros((batch_size, max(add_pad))).float() # NOTE we add masks of ones at the front!\n if masks.is_cuda:\n add_mask = add_mask.cuda(masks.get_device())\n masks = torch.cat((masks, add_mask), dim=1)\n for bidx in range(input_lengths.shape[0]):\n if add_pad[bidx] > 0:\n masks[bidx, input_lengths[bidx]:input_lengths[bidx]+add_pad[bidx]] = 1\n return masks\n\n def prepare_masks(self, inputs, bp=1.0):\n if inputs.ndimension() == 2:\n masks = (inputs.data != self.field.vocab.stoi['<pad>']).float()\n else: # NOTE FALSE\n masks = (inputs.data[:, :, self.field.vocab.stoi['<pad>']] != 1).float()\n\n if bp < 1.0:\n masks = self.change_bp_masks(masks, bp)\n\n return masks\n\n def find_captions_length(self, all_captions):\n # find length of each caption\n all_captions_lengths = []\n # list of lists\n if type(all_captions[0]) == list:\n num_captions = len(all_captions[0])\n for i in range(num_captions):\n caption_length = 0\n for j in range(len(all_captions)):\n caption_length += len(all_captions[j][i].split(' '))\n caption_length = int(caption_length / len(all_captions))\n all_captions_lengths.append(caption_length)\n else:\n for cap in all_captions:\n all_captions_lengths.append(len(cap.split(' ')))\n\n return all_captions_lengths\n\n def quick_prepare_mscoco(self, batch, all_captions=None, fast=True, inputs_dec='pool', trg_len_option=None, max_len=20, trg_len_dic=None, decoder_inputs=None, targets=None, decoder_masks=None, target_masks=None, source_masks=None, bp=1.00, gpu=True):\n features_beforepool, captions = batch[0], batch[1]\n batch_size, d_model = features_beforepool.size(0), features_beforepool.size(1)\n\n # batch_size x 49 x 512\n features_beforepool = features_beforepool.view(batch_size, d_model, 49).transpose(1, 2)\n if gpu:\n encoding = self.encoding(Variable(features_beforepool, requires_grad=False).cuda(), source_masks) # batch of resnet features\n source_masks = torch.FloatTensor(batch_size, 49).fill_(1).cuda()\n targets = self.prepare_target_captions(captions, self.field.vocab.stoi).cuda()\n else:\n encoding = self.encoding(Variable(features_beforepool, requires_grad=False), source_masks) # batch of resnet features\n source_masks = torch.FloatTensor(batch_size, 49).fill_(1)\n targets = self.prepare_target_captions(captions, self.field.vocab.stoi)\n\n # list of batch_size\n all_captions_lengths = self.find_captions_length(all_captions)\n\n # predicted decoder lens\n if trg_len_option == \"predict\":\n # batch_size tensor\n if gpu:\n target_len = Variable(torch.from_numpy(np.clip(np.array(all_captions_lengths), 0, self.max_offset)).cuda(), requires_grad=False)\n else:\n target_len = Variable(torch.from_numpy(np.clip(np.array(all_captions_lengths), 0, self.max_offset)), requires_grad=False)\n\n # HARDCODED (4 layer model) !!!\n pred_target_len_logits = self.pred_len((encoding[0]+encoding[1]+encoding[2]+encoding[3]+encoding[4]).mean(1))\n pred_target_len_loss = F.cross_entropy(pred_target_len_logits, target_len.long())\n pred_target_len = pred_target_len_logits.max(-1)[1]\n\n if fast == False:\n decoder_inputs, decoder_masks = self.prepare_decoder_inputs(targets, decoder_inputs, decoder_masks) # prepare decoder-inputs\n else:\n if trg_len_option == \"fixed\":\n decoder_len = int(max_len)\n decoder_masks = torch.ones(batch_size, decoder_len)\n if gpu:\n decoder_masks = decoder_masks.cuda(encoding[0].get_device())\n\n # TODO ADD BP OPTION\n elif trg_len_option == \"reference\" or (trg_len_option == \"predict\" and self.use_predicted_trg_len == False):\n decoder_len = max(all_captions_lengths)\n decoder_masks = np.zeros((batch_size, decoder_len))\n for idx in range(decoder_masks.shape[0]):\n decoder_masks[idx][:all_captions_lengths[idx]] = 1\n decoder_masks = torch.from_numpy(decoder_masks).float()\n if gpu:\n decoder_masks = decoder_masks.cuda(encoding[0].get_device())\n\n if trg_len_option == \"predict\":\n if self.use_predicted_trg_len:\n pred_target_len = pred_target_len.data.cpu().numpy()\n decoder_len = np.max(pred_target_len)\n decoder_masks = np.zeros((batch_size, decoder_len))\n for idx in range(pred_target_len.shape[0]):\n decoder_masks[idx][:pred_target_len[idx]] = 1\n decoder_masks = torch.from_numpy(decoder_masks).float()\n if gpu:\n decoder_masks = decoder_masks.cuda(encoding[0].get_device())\n if bp < 1.0:\n decoder_masks = self.change_bp_masks(decoder_masks, bp)\n\n if not self.use_predicted_trg_len:\n pred_target_len = pred_target_len.data.cpu().numpy()\n\n target_len = target_len.data.cpu().numpy()\n\n # calculate error for predicted target length\n pred_target_len_correct = np.sum(pred_target_len == target_len)*100/batch_size\n pred_target_len_approx = np.sum(np.abs(pred_target_len - target_len) < 5)*100/batch_size\n average_target_len_correct = 0\n average_target_len_approx = 0\n\n rest = [pred_target_len_loss, pred_target_len_correct, pred_target_len_approx, average_target_len_correct, average_target_len_approx]\n\n if inputs_dec == 'pool':\n # batch_size x 1 x 512\n decoder_inputs = torch.mean(features_beforepool, 1, keepdim=True)\n decoder_inputs = decoder_inputs.repeat(1, int(decoder_len), 1)\n decoder_inputs = Variable(decoder_inputs, requires_grad=False)\n if gpu:\n decoder_inputs = decoder_inputs.cuda(encoding[0].get_device())\n elif inputs_dec == 'zeros':\n decoder_inputs = Variable(torch.zeros(batch_size, int(decoder_len), d_model), requires_grad=False)\n if gpu:\n decoder_inputs = decoder_inputs.cuda(encoding[0].get_device())\n\n # REMOVE THE FIRST <INIT> TAG FROM CAPTIONS\n targets = targets[:, 1:]\n if gpu:\n target_masks = (targets != 1).float().cuda().data\n else:\n target_masks = (targets != 1).float().data\n\n if trg_len_option != \"predict\":\n rest = []\n sources = None\n\n return decoder_inputs, decoder_masks, targets, target_masks, sources, source_masks, encoding, decoder_inputs.size(0), rest\n\n def prepare_target_captions(self, captions, vocab):\n # captions : batch_size X seq_len\n lst = []\n batch_size = len(captions)\n for bidx in range(batch_size):\n lst.append( [\"<init>\"] + captions[ bidx ].lower().split() + [\"<eos>\"] )\n #lst.append( [ vocab[idx] for idx in captions[ random.randint(0,4) ][ bidx ].lower().split() ] )\n lst = [[vocab[idx] if idx in vocab else 0 for idx in sentence] for sentence in lst]\n seq_len = max( [len(xx) for xx in lst] )\n captions = np.ones((batch_size, seq_len))\n for bidx in range(batch_size):\n min_len = min(seq_len, len(lst[bidx]))\n captions[bidx, :min_len] = np.array(lst[bidx][:min_len])\n captions = torch.from_numpy(captions).long()\n return Variable(captions, requires_grad=False)\n\n def quick_prepare(self, batch, fast=True, trg_len_option=None, trg_len_ratio=2.0, trg_len_dic=None, decoder_inputs=None, targets=None, decoder_masks=None, target_masks=None, source_masks=None, bp=1.00):\n sources, source_masks = self.prepare_sources(batch, source_masks)\n encoding = self.encoding(sources, source_masks)\n targets, target_masks = self.prepare_targets(batch, targets, decoder_masks) # prepare decoder-targets\n\n # predicted decoder masks\n if trg_len_option == \"predict\":\n target_offset = Variable((target_masks.sum(-1) - source_masks.sum(-1)).clamp_(-self.max_offset, self.max_offset), requires_grad=False) # batch_size tensor\n source_len = Variable(source_masks.sum(-1), requires_grad=False)\n\n pred_target_offset_logits = self.pred_len((encoding[0]+encoding[1]+encoding[2]+encoding[3]+encoding[4]+encoding[5]).mean(1))\n pred_target_offset_logits = self.pred_len_drop( pred_target_offset_logits )\n pred_target_len_loss = F.cross_entropy(pred_target_offset_logits, (target_offset + self.max_offset).long())\n pred_target_offset = pred_target_offset_logits.max(-1)[1] - self.max_offset\n pred_target_len = source_len.long() + pred_target_offset\n\n d_model = encoding[0].size(-1)\n batch_size, src_max_len = source_masks.size()\n rest = []\n\n if fast:\n # compute decoder_masks\n if trg_len_option == \"reference\":\n _, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks, bp=bp)\n\n elif trg_len_option == \"noisy_ref\":\n bp = np.random.uniform(bp, 1.0)\n _, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks, bp=bp)\n\n elif trg_len_option == \"average\":\n decoder_masks = make_decoder_masks(source_masks, trg_len_dic)\n # we use the average target lengths\n\n elif trg_len_option == \"predict\":\n # convert to numpy arrays first\n source_len = source_masks.sum(-1).cpu().numpy()\n target_len = target_masks.sum(-1).cpu().numpy()\n pred_target_len = pred_target_len.data.cpu().numpy()\n\n if not self.use_predicted_trg_len:\n _, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks, bp=bp)\n else:\n decoder_max_len = max(pred_target_len)\n decoder_masks = np.zeros((batch_size, decoder_max_len))\n for idx in range(pred_target_len.shape[0]):\n decoder_masks[idx][:pred_target_len[idx]] = 1\n decoder_masks = torch.from_numpy(decoder_masks).float()\n if source_masks.is_cuda:\n decoder_masks = decoder_masks.cuda(source_masks.get_device())\n if bp < 1.0:\n decoder_masks = self.change_bp_masks(decoder_masks, bp)\n\n # check the results of predicting target length\n pred_target_len_correct = np.sum(pred_target_len == target_len)*100/batch_size\n pred_target_len_approx = np.sum(np.abs(pred_target_len - target_len) < 5)*100/batch_size\n\n # results with average len\n average_target_len = [query_trg_len_dic(trg_len_dic, source) for source in source_len]\n average_target_len = np.array(average_target_len)\n average_target_len_correct = np.sum(average_target_len == target_len)*100/batch_size\n average_target_len_approx = np.sum(np.abs(average_target_len - target_len) < 5)*100/batch_size\n\n rest = [pred_target_len_loss, pred_target_len_correct, pred_target_len_approx, average_target_len_correct, average_target_len_approx]\n\n elif \"fixed\" in trg_len_option:\n trg_len = (batch.trg != 1).sum(-1).int().data.cpu().numpy().tolist()\n\n source_lens = source_masks.sum(-1).cpu().numpy()\n decoder_masks = torch.zeros(batch_size, int(round(trg_len_ratio * src_max_len)))\n dec_len = int(round(trg_len_ratio * src_max_len))\n\n for bi in range(batch_size):\n ss = source_lens[bi]\n decoder_masks[bi,:int(round(trg_len_ratio*ss))] = 1\n\n if encoding[0].is_cuda:\n decoder_masks = decoder_masks.cuda(encoding[0].get_device())\n decoder_inputs, decoder_masks = self.prepare_initial(encoding, sources, source_masks, decoder_masks)\n else:\n decoder_inputs, decoder_masks = self.prepare_decoder_inputs(batch.trg, decoder_inputs, decoder_masks) # prepare decoder-inputs\n\n return decoder_inputs, decoder_masks, targets, target_masks, sources, source_masks, encoding, decoder_inputs.size(0), rest\n\n def forward(self, encoding, source_masks, decoder_inputs, decoder_masks,\n decoding=False, beam=1, alpha=0.6, return_probs=False, positions=None, feedback=None):\n\n if (return_probs and decoding) or (not decoding):\n out = self.decoder(decoder_inputs, encoding, source_masks, decoder_masks)\n\n if decoding:\n if beam == 1: # greedy decoding\n output = self.decoder.greedy(encoding, source_masks, decoder_masks, feedback=feedback)\n else:\n output = self.decoder.beam_search(encoding, source_masks, decoder_masks, beam, alpha)\n\n if return_probs:\n return output, out, self.decoder.out(out) # NOTE don't do softmax for validation\n #return output, out, softmax(self.decoder.out(out))\n return output\n\n if return_probs:\n return out, softmax(self.decoder.out(out))\n return out\n\n def cost(self, decoder_targets, decoder_masks, out=None):\n # get loss in a sequence-format to save computational time.\n decoder_targets, out = prepare_cost(decoder_targets, out, decoder_masks.byte())\n logits = self.decoder.out(out)\n loss = F.cross_entropy(logits, decoder_targets)\n return loss\n\n def batched_cost(self, decoder_targets, decoder_masks, probs, batched=False):\n # get loss in a batch-mode\n\n if decoder_targets.ndimension() == 2: # batch x length\n loss = -torch.log(probs + TINY).gather(2, decoder_targets[:, :, None])[:, :, 0] # batch x length\n else:\n loss = -(torch.log(probs + TINY) * decoder_targets).sum(-1)\n return self.apply_mask_cost(loss, decoder_masks, batched)\n\nclass FastTransformer(Transformer):\n\n def __init__(self, src=None, trg=None, args=None):\n super(Transformer, self).__init__()\n self.is_mscoco = args.dataset == \"mscoco\"\n self.decoder_input_how = args.decoder_input_how\n self.encoder = Encoder(src, args)\n '''\n if self.is_mscoco == False:\n self.encoder = Encoder(src, args)\n else:\n self.encoder = EncoderCNN(args)\n '''\n self.decoder = nn.ModuleList()\n for ni in range(args.num_decs):\n self.decoder.append(Decoder(trg, args,\n causal=False,\n positional=args.positional,\n diag=args.diag,\n out=self.encoder.out if args.share_embed_enc_dec1 and ni == 0 else None)\n )\n self.field = trg\n if self.is_mscoco == False:\n self.share_embed = args.share_embed\n else:\n self.share_embed = False\n self.train_repeat_dec = args.train_repeat_dec\n self.num_decs = args.num_decs\n if args.trg_len_option == \"predict\":\n if args.dataset != \"mscoco\":\n self.pred_len = Linear(args.d_model, 2*args.max_offset + 1)\n else:\n self.pred_len = Linear(args.d_model, args.max_offset+1)\n self.pred_len_drop = nn.Dropout(args.drop_len_pred)\n self.max_offset = args.max_offset\n self.use_predicted_trg_len = args.use_predicted_trg_len\n self.n_layers = args.n_layers\n self.d_model = args.d_model\n self.softmax = nn.Softmax(dim = -1)\n\n def output_decoding(self, outputs, unbpe = True):\n field, text = outputs\n if field is 'src':\n return self.encoder.field.reverse(text.data, unbpe)\n else:\n return self.decoder[0].field.reverse(text.data, unbpe)\n\n # decoder_masks already decided\n # computes decoder_inputs\n def prepare_initial(self, encoding, source=None, source_masks=None, decoder_masks=None,\n N=1, tau=1):\n \n decoder_input_how = self.decoder_input_how\n d_model = encoding[0].size()[-1]\n attention = linear_attention(source_masks, decoder_masks, decoder_input_how)\n\n if decoder_input_how in [\"copy\", \"pad\", \"wrap\"]:\n attention = self.apply_mask(attention, decoder_masks, p=1) # p doesn't matter cos masked out\n attention = attention[:,:,None].expand(*attention.size(), d_model)\n decoder_inputs = torch.gather(encoding[0], dim=1, index=attention)\n\n elif decoder_input_how == \"interpolate\":\n decoder_inputs = matmul(attention, encoding[0]) # batch x max_trg x size\n\n return decoder_inputs, decoder_masks\n\n def forward(self, encoding, source_masks, decoder_inputs, decoder_masks,\n decoding=False, beam=1, alpha=0.6,\n return_probs=False, positions=None, feedback=None, iter_=0, T=1):\n\n thedecoder = self.decoder[iter_]\n out = thedecoder(decoder_inputs, encoding, source_masks, decoder_masks,\n input_embeddings=True, positions=positions, feedback=feedback)\n # out : output from the (-1)-th DecoderLayer\n\n if not decoding: # NOTE training\n if not return_probs:\n return out\n return out, softmax(thedecoder.out(out), T=T) # probs\n\n logits = thedecoder.out(out)\n\n if beam == 1:\n output = self.apply_mask(logits.max(-1)[1], decoder_masks) # NOTE given mask, set non-mask to 1\n else:\n output, decoder_masks = topK_search(logits, decoder_masks, N=beam)\n output = self.apply_mask(output, decoder_masks)\n\n if not return_probs:\n return output\n else:\n return output, out, logits # NOTE don't do softmax for validation\n #return output, out, softmax(logits, T=T)\n\n def cost(self, targets, target_mask, out=None, iter_=0, return_logits=False):\n # get loss in a sequence-format to save computational time.\n targets, out = prepare_cost(targets, out, target_mask.byte())\n logits = self.decoder[iter_].out(out)\n loss = F.cross_entropy(logits, targets)\n if return_logits:\n return loss, logits\n return loss\n\n def rf_cost(self, args, targets, target_mask, out=None, iter_=0, return_logits=False):\n # REINFORCE, Eq.(11)\n targets, out = prepare_cost(targets, out, target_mask.byte())\n logits = self.decoder[iter_].out(out)\n probs = self.softmax(logits)\n\n sample_index = torch.multinomial(probs,1)\n sample_prob = torch.gather(probs, -1, sample_index)\n target_lens = torch.sum(target_mask, dim = -1).long().tolist()\n targets = targets.data.tolist()\n sample_index =sample_index.data.view(-1).tolist()\n if args.sample_method == 'sentence':\n gleu = self.compute_sentence_gleu(args, sample_index, targets, target_lens)\n else:\n gleu = self.compute_stepwise_gleu(args.stepwise_sampletimes, args.workers, sample_index, probs, targets, target_lens)\n \n loss = torch.sum((-1 * torch.log(sample_prob).view(-1) * gleu),dim = 0).div(len(targets))\n return loss\n\n \n def nat_cost(self, args, targets, target_mask, out=None, iter_=0, return_logits=False):\n # RF-NAT, Eq.(12)\n targets, out = prepare_cost(targets, out, target_mask.byte())\n logits = self.decoder[iter_].out(out)\n probs = self.softmax(logits)\n target_lens = torch.sum(target_mask, dim = -1).long().tolist()\n targets = targets.data.tolist()\n \n top_probs, top_index = torch.topk(probs, args.topk, dim = -1)\n\n a,b = probs.size()\n copy_probs = Variable(torch.zeros(a,b ).cuda(probs.get_device()))\n copy_probs.data.copy_(probs.data)\n copy_probs.scatter_add_(1, top_index, -1 * top_probs)\n sample_index = torch.multinomial(copy_probs,1)\n sample_prob = torch.gather(probs, -1, sample_index)\n sample_index = sample_index.data.view(-1).tolist()\n\n top_index = top_index.t().data.tolist()\n weight = torch.sum(top_probs, dim = -1).detach()\n gleus = []\n\n for i in range(args.topk):\n top_idx = top_index[i]\n gleu = self.compute_stepwise_gleu(args.stepwise_sampletimes, args.workers, top_idx, probs, targets, target_lens)\n gleus.append(gleu)\n gleus = torch.stack(gleus, dim = 1)\n loss_traverse = -1 * torch.sum(top_probs * gleus)\n\n gleu = self.compute_stepwise_gleu(args.stepwise_sampletimes, args.workers, sample_index, probs, targets, target_lens)\n loss_sample = torch.sum((-1 * (1-weight) * torch.log(sample_prob).view(-1) * (gleu - ave_gleu)),dim = 0)\n\n loss = (loss_sample + loss_traverse).div(len(targets))\n return loss\n\n def compute_sentence_gleu(self, args, sample_index, targets, target_lens):\n\n #tokenizer = lambda x: x.replace('@@ ', '').split()\n list_targets = self.shape(targets,target_lens)\n list_samples = self.shape(sample_index,target_lens)\n gleus = computeGLEU(list_samples, list_targets)\n gleus = self.deshape(gleus,target_lens)\n\n return Variable(torch.Tensor(gleus).cuda(args.gpu))\n\n def compute_stepwise_gleu(self, sample_times, workers, sample_index, sample_prob, targets, target_lens):\n\n list_targets = self.shape(targets,target_lens)\n list_samples = self.shape(sample_index,target_lens)\n count = len(list_samples)\n gleus = []\n sample_idxs = [torch.multinomial(sample_prob,1).data.view(-1).tolist() for i in range(sample_times)]\n inputs = [(sample_idxs[i], list_samples, list_targets, count, target_lens) for i in range(sample_times)]\n pool = ProcessPoolExecutor(max_workers=workers)\n gleus = list(pool.map(parallel_gleu, inputs))\n gleus = Variable(torch.Tensor(gleus).cuda(sample_prob.get_device()))\n gleus = torch.mean(gleus,dim = 0)\n \n return gleus\n \n def shape(self, targets, target_lens):\n\n list_targets = []\n begin = 0\n end = 0\n for length in target_lens:\n end += length\n list_targets.append([str(index) for index in targets[begin:end]])\n begin += length\n \n return list_targets\n\n def deshape(self, prev_targets, target_lens):\n targets = []\n for i in range(len(target_lens)):\n targets += ([prev_targets[i]] * target_lens[i])\n return targets\n\n\n\ndef mask(targets, out, input_mask=None, return_mask=False):\n if input_mask is None:\n input_mask = (targets != 1)\n out_mask = input_mask.unsqueeze(-1).expand_as(out)\n\n if return_mask:\n return targets[input_mask], out[out_mask].view(-1, out.size(-1)), the_mask\n return targets[input_mask], out[out_mask].view(-1, out.size(-1))\n\ndef prepare_cost(targets, out, target_mask=None, return_mask=None):\n # targets : batch_size, seq_len\n # out : batch_size, seq_len, vocab_size\n # target_mask : batch_size, seq_len\n if target_mask is None:\n target_mask = (targets != 1)\n\n if targets.size(1) < out.size(1):\n out = out[:, :targets.size(1), :]\n elif targets.size(1) > out.size(1):\n targets = targets[:, :out.size(1)]\n target_mask = target_mask[:, :out.size(1)]\n\n out_mask = target_mask.unsqueeze(-1).expand_as(out)\n\n if return_mask:\n return targets[target_mask], out[out_mask].view(-1, out.size(-1)), out_mask\n else:\n return targets[target_mask], out[out_mask].view(-1, out.size(-1))\n\ndef linear_attention(source_masks, decoder_masks, decoder_input_how):\n if decoder_input_how == \"copy\":\n max_src_len = source_masks.size(1)\n max_trg_len = decoder_masks.size(1)\n\n src_lens = source_masks.sum(-1).float()-1 # batch_size\n trg_lens = decoder_masks.sum(-1).float()-1 # batch_size\n steps = src_lens / trg_lens # batch_size\n\n index_s = torch.arange(max_trg_len) # max_trg_len\n if decoder_masks.is_cuda:\n index_s = index_s.cuda(decoder_masks.get_device())\n\n index_s = steps[:,None] * index_s[None,:] # batch_size X max_trg_len\n index_s = Variable(torch.round(index_s), requires_grad=False).long()\n return index_s\n\n elif decoder_input_how == \"wrap\":\n batch_size, max_src_len = source_masks.size()\n max_trg_len = decoder_masks.size(1)\n\n src_lens = source_masks.sum(-1).int() # batch_size\n\n index_s = torch.arange(max_trg_len)[None,:] # max_trg_len\n index_s = index_s.repeat(batch_size, 1) # (batch_size, max_trg_len)\n\n for sin in range(batch_size):\n if src_lens[sin]+1 < max_trg_len:\n index_s[sin, src_lens[sin]:2*src_lens[sin]] = index_s[sin, :src_lens[sin]]\n\n if decoder_masks.is_cuda:\n index_s = index_s.cuda(decoder_masks.get_device())\n\n return Variable(index_s, requires_grad=False).long()\n\n elif decoder_input_how == \"pad\":\n batch_size, max_src_len = source_masks.size()\n max_trg_len = decoder_masks.size(1)\n\n src_lens = source_masks.sum(-1).int() - 1 # batch_size\n\n index_s = torch.arange(max_trg_len)[None,:] # max_trg_len\n index_s = index_s.repeat(batch_size, 1) # (batch_size, max_trg_len)\n\n for sin in range(batch_size):\n if src_lens[sin]+1 < max_trg_len:\n index_s[sin, src_lens[sin]+1:] = index_s[sin, src_lens[sin]]\n\n if decoder_masks.is_cuda:\n index_s = index_s.cuda(decoder_masks.get_device())\n\n return Variable(index_s, requires_grad=False).long()\n\n elif decoder_input_how == \"interpolate\":\n max_src_len = source_masks.size(1)\n max_trg_len = decoder_masks.size(1)\n src_lens = source_masks.sum(-1).float() # batchsize\n trg_lens = decoder_masks.sum(-1).float() # batchsize\n steps = src_lens / trg_lens # batchsize\n index_t = torch.arange(0, max_trg_len) # max_trg_len\n if decoder_masks.is_cuda:\n index_t = index_t.cuda(decoder_masks.get_device())\n index_t = steps[:, None] @ index_t[None, :] # batch x max_trg_len\n index_s = torch.arange(0, max_src_len) # max_src_len\n if decoder_masks.is_cuda:\n index_s = index_s.cuda(decoder_masks.get_device())\n indexxx_ = (index_s[None, None, :] - index_t[:, :, None]) ** 2 # batch x max_trg x max_src\n indexxx = softmax(Variable(-indexxx_.float() / 0.3 - INF * (1 - source_masks[:, None, :].float() ))) # batch x max_trg x max_src\n return indexxx\n" ]
[ [ "torch.nn.Softmax", "torch.nn.functional.softmax", "torch.mean", "torch.cat", "torch.zeros", "torch.sin", "torch.sum", "torch.multinomial", "numpy.max", "torch.FloatTensor", "torch.topk", "torch.autograd.Variable", "torch.nn.Dropout", "torch.ones", "torch.round", "torch.from_numpy", "torch.mul", "torch.arange", "numpy.zeros", "torch.cos", "torch.nn.init.uniform", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.log", "torch.nn.BatchNorm2d", "torch.stack", "numpy.array", "numpy.sum", "torch.nn.functional.embedding", "numpy.abs", "torch.nn.functional.log_softmax", "torch.Tensor", "torch.nn.functional.cross_entropy", "numpy.ones", "torch.gather", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
denniswon/hand-gesture-recognition
[ "f07f80370602bae12f8c4ca3697c14dc13530898" ]
[ "simple.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport csv\nimport copy\nimport argparse\nimport itertools\nfrom collections import Counter\nfrom collections import deque\n\nimport cv2 as cv\nimport numpy as np\nimport mediapipe as mp\n\nfrom utils import CvFpsCalc\nfrom model import KeyPointClassifier\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--width\", help='cap width', type=int, default=960)\n parser.add_argument(\"--height\", help='cap height', type=int, default=540)\n\n parser.add_argument('--use_static_image_mode', action='store_true')\n parser.add_argument(\"--min_detection_confidence\",\n help='min_detection_confidence',\n type=float,\n default=0.7)\n parser.add_argument(\"--min_tracking_confidence\",\n help='min_tracking_confidence',\n type=int,\n default=0.5)\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n # Argument parsing #################################################################\n args = get_args()\n\n cap_device = args.device\n cap_width = args.width\n cap_height = args.height\n\n use_static_image_mode = args.use_static_image_mode\n min_detection_confidence = args.min_detection_confidence\n min_tracking_confidence = args.min_tracking_confidence\n\n use_brect = True\n\n # Camera preparation ###############################################################\n cap = cv.VideoCapture(cap_device)\n cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)\n cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)\n\n # Model load #############################################################\n mp_hands = mp.solutions.hands\n hands = mp_hands.Hands(\n static_image_mode=use_static_image_mode,\n max_num_hands=1,\n min_detection_confidence=min_detection_confidence,\n min_tracking_confidence=min_tracking_confidence,\n )\n\n keypoint_classifier = KeyPointClassifier()\n\n # Read labels ###########################################################\n with open('model/keypoint_classifier/keypoint_classifier_label.csv',\n encoding='utf-8-sig') as f:\n keypoint_classifier_labels = csv.reader(f)\n keypoint_classifier_labels = [\n row[0] for row in keypoint_classifier_labels\n ]\n\n # FPS Measurement ########################################################\n cvFpsCalc = CvFpsCalc(buffer_len=10)\n\n # ########################################################################\n mode = 0\n\n while True:\n fps = cvFpsCalc.get()\n\n # Process Key (ESC: end) #################################################\n key = cv.waitKey(10)\n if key == 27: # ESC\n break\n\n # Camera capture #####################################################\n ret, image = cap.read()\n if not ret:\n break\n image = cv.flip(image, 1) # Mirror display\n debug_image = copy.deepcopy(image)\n\n # Detection implementation #############################################################\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n\n image.flags.writeable = False\n results = hands.process(image)\n image.flags.writeable = True\n\n # ####################################################################\n if results.multi_hand_landmarks is not None:\n for hand_landmarks, handedness in zip(results.multi_hand_landmarks,\n results.multi_handedness):\n # Bounding box calculation\n brect = calc_bounding_rect(debug_image, hand_landmarks)\n # Landmark calculation\n landmark_list = calc_landmark_list(debug_image, hand_landmarks)\n\n # Conversion to relative coordinates / normalized coordinates\n pre_processed_landmark_list = pre_process_landmark(\n landmark_list)\n \n # Hand sign classification\n hand_sign_id = keypoint_classifier(pre_processed_landmark_list)\n\n # Drawing part\n debug_image = draw_bounding_rect(use_brect, debug_image, brect)\n debug_image = draw_landmarks(debug_image, landmark_list)\n debug_image = draw_info_text(\n debug_image,\n brect,\n handedness,\n keypoint_classifier_labels[hand_sign_id],\n )\n\n debug_image = draw_info(debug_image, fps)\n\n # Screen reflection #############################################################\n cv.imshow('Hand Gesture Recognition', debug_image)\n\n cap.release()\n cv.destroyAllWindows()\n\ndef calc_bounding_rect(image, landmarks):\n image_width, image_height = image.shape[1], image.shape[0]\n\n landmark_array = np.empty((0, 2), int)\n\n for _, landmark in enumerate(landmarks.landmark):\n landmark_x = min(int(landmark.x * image_width), image_width - 1)\n landmark_y = min(int(landmark.y * image_height), image_height - 1)\n\n landmark_point = [np.array((landmark_x, landmark_y))]\n\n landmark_array = np.append(landmark_array, landmark_point, axis=0)\n\n x, y, w, h = cv.boundingRect(landmark_array)\n\n return [x, y, x + w, y + h]\n\n\ndef calc_landmark_list(image, landmarks):\n image_width, image_height = image.shape[1], image.shape[0]\n\n landmark_point = []\n\n # Keypoint\n for _, landmark in enumerate(landmarks.landmark):\n landmark_x = min(int(landmark.x * image_width), image_width - 1)\n landmark_y = min(int(landmark.y * image_height), image_height - 1)\n # landmark_z = landmark.z\n\n landmark_point.append([landmark_x, landmark_y])\n\n return landmark_point\n\n\ndef pre_process_landmark(landmark_list):\n temp_landmark_list = copy.deepcopy(landmark_list)\n\n # Convert to relative coordinates\n base_x, base_y = 0, 0\n for index, landmark_point in enumerate(temp_landmark_list):\n if index == 0:\n base_x, base_y = landmark_point[0], landmark_point[1]\n\n temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x\n temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y\n\n # Convert to a one-dimensional list\n temp_landmark_list = list(\n itertools.chain.from_iterable(temp_landmark_list))\n\n # Normalization\n max_value = max(list(map(abs, temp_landmark_list)))\n\n def normalize_(n):\n return n / max_value\n\n temp_landmark_list = list(map(normalize_, temp_landmark_list))\n\n return temp_landmark_list\n\ndef draw_landmarks(image, landmark_point):\n if len(landmark_point) > 0:\n # Thumb\n cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),\n (255, 255, 255), 2)\n\n # Index finger\n cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),\n (255, 255, 255), 2)\n\n # Middle finger\n cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),\n (255, 255, 255), 2)\n\n # Ring finger\n cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),\n (255, 255, 255), 2)\n\n # Little finger\n cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),\n (255, 255, 255), 2)\n\n # Palm\n cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),\n (255, 255, 255), 2)\n cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),\n (0, 0, 0), 6)\n cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),\n (255, 255, 255), 2)\n\n # Key Points\n for index, landmark in enumerate(landmark_point):\n if index == 0:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 1:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 2:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 3:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 4:\n cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)\n if index == 5:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 6:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 7:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 8:\n cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)\n if index == 9:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 10:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 11:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 12:\n cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)\n if index == 13:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 14:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 15:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 16:\n cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)\n if index == 17:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 18:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 19:\n cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)\n if index == 20:\n cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),\n -1)\n cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)\n\n return image\n\n\ndef draw_bounding_rect(use_brect, image, brect):\n if use_brect:\n # Outer rectangle\n cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),\n (0, 0, 0), 1)\n\n return image\n\n\ndef draw_info_text(image, brect, handedness, hand_sign_text):\n cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[1] - 22),\n (0, 0, 0), -1)\n\n info_text = handedness.classification[0].label[0:]\n if hand_sign_text != \"\":\n info_text = info_text + ':' + hand_sign_text\n cv.putText(image, info_text, (brect[0] + 5, brect[1] - 4),\n cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv.LINE_AA)\n\n return image\n\ndef draw_info(image, fps):\n cv.putText(image, \"FPS:\" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,\n 1.0, (0, 0, 0), 4, cv.LINE_AA)\n cv.putText(image, \"FPS:\" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,\n 1.0, (255, 255, 255), 2, cv.LINE_AA)\n return image\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.append", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brandondavid/scipy
[ "286037791681f4191f3bb741ae4a707671aa5ad0" ]
[ "scipy/spatial/distance.py" ]
[ "\"\"\"\nDistance computations (:mod:`scipy.spatial.distance`)\n=====================================================\n\n.. sectionauthor:: Damian Eads\n\nFunction reference\n------------------\n\nDistance matrix computation from a collection of raw observation vectors\nstored in a rectangular array.\n\n.. autosummary::\n :toctree: generated/\n\n pdist -- pairwise distances between observation vectors.\n cdist -- distances between two collections of observation vectors\n squareform -- convert distance matrix to a condensed one and vice versa\n directed_hausdorff -- directed Hausdorff distance between arrays\n\nPredicates for checking the validity of distance matrices, both\ncondensed and redundant. Also contained in this module are functions\nfor computing the number of observations in a distance matrix.\n\n.. autosummary::\n :toctree: generated/\n\n is_valid_dm -- checks for a valid distance matrix\n is_valid_y -- checks for a valid condensed distance matrix\n num_obs_dm -- # of observations in a distance matrix\n num_obs_y -- # of observations in a condensed distance matrix\n\nDistance functions between two numeric vectors ``u`` and ``v``. Computing\ndistances over a large collection of vectors is inefficient for these\nfunctions. Use ``pdist`` for this purpose.\n\n.. autosummary::\n :toctree: generated/\n\n braycurtis -- the Bray-Curtis distance.\n canberra -- the Canberra distance.\n chebyshev -- the Chebyshev distance.\n cityblock -- the Manhattan distance.\n correlation -- the Correlation distance.\n cosine -- the Cosine distance.\n euclidean -- the Euclidean distance.\n jensenshannon -- the Jensen-Shannon distance.\n mahalanobis -- the Mahalanobis distance.\n minkowski -- the Minkowski distance.\n seuclidean -- the normalized Euclidean distance.\n sqeuclidean -- the squared Euclidean distance.\n wminkowski -- (deprecated) alias of `minkowski`.\n\nDistance functions between two boolean vectors (representing sets) ``u`` and\n``v``. As in the case of numerical vectors, ``pdist`` is more efficient for\ncomputing the distances between all pairs.\n\n.. autosummary::\n :toctree: generated/\n\n dice -- the Dice dissimilarity.\n hamming -- the Hamming distance.\n jaccard -- the Jaccard distance.\n kulsinski -- the Kulsinski distance.\n rogerstanimoto -- the Rogers-Tanimoto dissimilarity.\n russellrao -- the Russell-Rao dissimilarity.\n sokalmichener -- the Sokal-Michener dissimilarity.\n sokalsneath -- the Sokal-Sneath dissimilarity.\n yule -- the Yule dissimilarity.\n\n:func:`hamming` also operates over discrete numerical vectors.\n\"\"\"\n\n# Copyright (C) Damian Eads, 2007-2008. New BSD License.\n\n__all__ = [\n 'braycurtis',\n 'canberra',\n 'cdist',\n 'chebyshev',\n 'cityblock',\n 'correlation',\n 'cosine',\n 'dice',\n 'directed_hausdorff',\n 'euclidean',\n 'hamming',\n 'is_valid_dm',\n 'is_valid_y',\n 'jaccard',\n 'jensenshannon',\n 'kulsinski',\n 'mahalanobis',\n 'matching',\n 'minkowski',\n 'num_obs_dm',\n 'num_obs_y',\n 'pdist',\n 'rogerstanimoto',\n 'russellrao',\n 'seuclidean',\n 'sokalmichener',\n 'sokalsneath',\n 'sqeuclidean',\n 'squareform',\n 'wminkowski',\n 'yule'\n]\n\n\nimport warnings\nimport numpy as np\nimport dataclasses\n\nfrom typing import List, Optional, Set, Callable\n\nfrom functools import partial\nfrom scipy._lib._util import _asarray_validated\nfrom scipy._lib.deprecation import _deprecated\n\nfrom . import _distance_wrap\nfrom . import _hausdorff\nfrom ..linalg import norm\nfrom ..special import rel_entr\n\n\ndef _copy_array_if_base_present(a):\n \"\"\"Copy the array if its base points to a parent array.\"\"\"\n if a.base is not None:\n return a.copy()\n return a\n\n\ndef _correlation_cdist_wrap(XA, XB, dm, **kwargs):\n XA = XA - XA.mean(axis=1, keepdims=True)\n XB = XB - XB.mean(axis=1, keepdims=True)\n _distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)\n\n\ndef _correlation_pdist_wrap(X, dm, **kwargs):\n X2 = X - X.mean(axis=1, keepdims=True)\n _distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)\n\n\ndef _convert_to_type(X, out_type):\n return np.ascontiguousarray(X, dtype=out_type)\n\n\ndef _nbool_correspond_all(u, v, w=None):\n if u.dtype == v.dtype == bool and w is None:\n not_u = ~u\n not_v = ~v\n nff = (not_u & not_v).sum()\n nft = (not_u & v).sum()\n ntf = (u & not_v).sum()\n ntt = (u & v).sum()\n else:\n dtype = np.find_common_type([int], [u.dtype, v.dtype])\n u = u.astype(dtype)\n v = v.astype(dtype)\n not_u = 1.0 - u\n not_v = 1.0 - v\n if w is not None:\n not_u = w * not_u\n u = w * u\n nff = (not_u * not_v).sum()\n nft = (not_u * v).sum()\n ntf = (u * not_v).sum()\n ntt = (u * v).sum()\n return (nff, nft, ntf, ntt)\n\n\ndef _nbool_correspond_ft_tf(u, v, w=None):\n if u.dtype == v.dtype == bool and w is None:\n not_u = ~u\n not_v = ~v\n nft = (not_u & v).sum()\n ntf = (u & not_v).sum()\n else:\n dtype = np.find_common_type([int], [u.dtype, v.dtype])\n u = u.astype(dtype)\n v = v.astype(dtype)\n not_u = 1.0 - u\n not_v = 1.0 - v\n if w is not None:\n not_u = w * not_u\n u = w * u\n nft = (not_u * v).sum()\n ntf = (u * not_v).sum()\n return (nft, ntf)\n\n\ndef _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs):\n # get supported types\n types = metric_info.types\n # choose best type\n typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]\n # validate data\n XA = _convert_to_type(XA, out_type=typ)\n XB = _convert_to_type(XB, out_type=typ)\n\n # validate kwargs\n _validate_kwargs = metric_info.validator\n if _validate_kwargs:\n kwargs = _validate_kwargs((XA, XB), mA + mB, n, **kwargs)\n return XA, XB, typ, kwargs\n\n\ndef _validate_weight_with_size(X, m, n, **kwargs):\n w = kwargs.pop('w', None)\n if w is None:\n return kwargs\n\n if w.ndim != 1 or w.shape[0] != n:\n raise ValueError(\"Weights must have same size as input vector. \"\n f\"{w.shape[0]} vs. {n}\")\n\n kwargs['w'] = _validate_weights(w)\n return kwargs\n\n\ndef _validate_hamming_kwargs(X, m, n, **kwargs):\n w = kwargs.get('w', np.ones((n,), dtype='double'))\n\n if w.ndim != 1 or w.shape[0] != n:\n raise ValueError(\"Weights must have same size as input vector. %d vs. %d\" % (w.shape[0], n))\n\n kwargs['w'] = _validate_weights(w)\n return kwargs\n\n\ndef _validate_mahalanobis_kwargs(X, m, n, **kwargs):\n VI = kwargs.pop('VI', None)\n if VI is None:\n if m <= n:\n # There are fewer observations than the dimension of\n # the observations.\n raise ValueError(\"The number of observations (%d) is too \"\n \"small; the covariance matrix is \"\n \"singular. For observations with %d \"\n \"dimensions, at least %d observations \"\n \"are required.\" % (m, n, n + 1))\n if isinstance(X, tuple):\n X = np.vstack(X)\n CV = np.atleast_2d(np.cov(X.astype(np.double, copy=False).T))\n VI = np.linalg.inv(CV).T.copy()\n kwargs[\"VI\"] = _convert_to_double(VI)\n return kwargs\n\n\ndef _validate_minkowski_kwargs(X, m, n, **kwargs):\n kwargs = _validate_weight_with_size(X, m, n, **kwargs)\n if 'p' not in kwargs:\n kwargs['p'] = 2.\n else:\n if kwargs['p'] < 1:\n raise ValueError(\"p must be at least 1\")\n\n return kwargs\n\n\ndef _validate_pdist_input(X, m, n, metric_info, **kwargs):\n # get supported types\n types = metric_info.types\n # choose best type\n typ = types[types.index(X.dtype)] if X.dtype in types else types[0]\n # validate data\n X = _convert_to_type(X, out_type=typ)\n\n # validate kwargs\n _validate_kwargs = metric_info.validator\n if _validate_kwargs:\n kwargs = _validate_kwargs(X, m, n, **kwargs)\n return X, typ, kwargs\n\n\ndef _validate_seuclidean_kwargs(X, m, n, **kwargs):\n V = kwargs.pop('V', None)\n if V is None:\n if isinstance(X, tuple):\n X = np.vstack(X)\n V = np.var(X.astype(np.double, copy=False), axis=0, ddof=1)\n else:\n V = np.asarray(V, order='c')\n if len(V.shape) != 1:\n raise ValueError('Variance vector V must '\n 'be one-dimensional.')\n if V.shape[0] != n:\n raise ValueError('Variance vector V must be of the same '\n 'dimension as the vectors on which the distances '\n 'are computed.')\n kwargs['V'] = _convert_to_double(V)\n return kwargs\n\n\ndef _validate_vector(u, dtype=None):\n # XXX Is order='c' really necessary?\n u = np.asarray(u, dtype=dtype, order='c')\n if u.ndim == 1:\n return u\n\n # Ensure values such as u=1 and u=[1] still return 1-D arrays.\n u = np.atleast_1d(u.squeeze())\n if u.ndim > 1:\n raise ValueError(\"Input vector should be 1-D.\")\n warnings.warn(\n \"scipy.spatial.distance metrics ignoring length-1 dimensions is \"\n \"deprecated in SciPy 1.7 and will raise an error in SciPy 1.9.\",\n DeprecationWarning)\n return u\n\n\ndef _validate_weights(w, dtype=np.double):\n w = _validate_vector(w, dtype=dtype)\n if np.any(w < 0):\n raise ValueError(\"Input weights should be all non-negative\")\n return w\n\n\n@_deprecated(\n msg=\"'wminkowski' metric is deprecated and will be removed in\"\n \" SciPy 1.8.0, use 'minkowski' instead.\")\ndef _validate_wminkowski_kwargs(X, m, n, **kwargs):\n w = kwargs.pop('w', None)\n if w is None:\n raise ValueError('weighted minkowski requires a weight '\n 'vector `w` to be given.')\n kwargs['w'] = _validate_weights(w)\n if 'p' not in kwargs:\n kwargs['p'] = 2.\n return kwargs\n\n\ndef directed_hausdorff(u, v, seed=0):\n \"\"\"\n Compute the directed Hausdorff distance between two N-D arrays.\n\n Distances between pairs are calculated using a Euclidean metric.\n\n Parameters\n ----------\n u : (M,N) array_like\n Input array.\n v : (O,N) array_like\n Input array.\n seed : int or None\n Local `numpy.random.RandomState` seed. Default is 0, a random\n shuffling of u and v that guarantees reproducibility.\n\n Returns\n -------\n d : double\n The directed Hausdorff distance between arrays `u` and `v`,\n\n index_1 : int\n index of point contributing to Hausdorff pair in `u`\n\n index_2 : int\n index of point contributing to Hausdorff pair in `v`\n\n Raises\n ------\n ValueError\n An exception is thrown if `u` and `v` do not have\n the same number of columns.\n\n Notes\n -----\n Uses the early break technique and the random sampling approach\n described by [1]_. Although worst-case performance is ``O(m * o)``\n (as with the brute force algorithm), this is unlikely in practice\n as the input data would have to require the algorithm to explore\n every single point interaction, and after the algorithm shuffles\n the input points at that. The best case performance is O(m), which\n is satisfied by selecting an inner loop distance that is less than\n cmax and leads to an early break as often as possible. The authors\n have formally shown that the average runtime is closer to O(m).\n\n .. versionadded:: 0.19.0\n\n References\n ----------\n .. [1] A. A. Taha and A. Hanbury, \"An efficient algorithm for\n calculating the exact Hausdorff distance.\" IEEE Transactions On\n Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,\n 2015.\n\n See Also\n --------\n scipy.spatial.procrustes : Another similarity test for two data sets\n\n Examples\n --------\n Find the directed Hausdorff distance between two 2-D arrays of\n coordinates:\n\n >>> from scipy.spatial.distance import directed_hausdorff\n >>> u = np.array([(1.0, 0.0),\n ... (0.0, 1.0),\n ... (-1.0, 0.0),\n ... (0.0, -1.0)])\n >>> v = np.array([(2.0, 0.0),\n ... (0.0, 2.0),\n ... (-2.0, 0.0),\n ... (0.0, -4.0)])\n\n >>> directed_hausdorff(u, v)[0]\n 2.23606797749979\n >>> directed_hausdorff(v, u)[0]\n 3.0\n\n Find the general (symmetric) Hausdorff distance between two 2-D\n arrays of coordinates:\n\n >>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])\n 3.0\n\n Find the indices of the points that generate the Hausdorff distance\n (the Hausdorff pair):\n\n >>> directed_hausdorff(v, u)[1:]\n (3, 3)\n\n \"\"\"\n u = np.asarray(u, dtype=np.float64, order='c')\n v = np.asarray(v, dtype=np.float64, order='c')\n if u.shape[1] != v.shape[1]:\n raise ValueError('u and v need to have the same '\n 'number of columns')\n result = _hausdorff.directed_hausdorff(u, v, seed)\n return result\n\n\ndef minkowski(u, v, p=2, w=None):\n \"\"\"\n Compute the Minkowski distance between two 1-D arrays.\n\n The Minkowski distance between 1-D arrays `u` and `v`,\n is defined as\n\n .. math::\n\n {||u-v||}_p = (\\\\sum{|u_i - v_i|^p})^{1/p}.\n\n\n \\\\left(\\\\sum{w_i(|(u_i - v_i)|^p)}\\\\right)^{1/p}.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n p : scalar\n The order of the norm of the difference :math:`{||u-v||}_p`.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n minkowski : double\n The Minkowski distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)\n 2.0\n >>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)\n 1.4142135623730951\n >>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)\n 1.2599210498948732\n >>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)\n 1.0\n >>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)\n 1.0\n >>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)\n 1.0\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if p < 1:\n raise ValueError(\"p must be at least 1\")\n u_v = u - v\n if w is not None:\n w = _validate_weights(w)\n if p == 1:\n root_w = w\n elif p == 2:\n # better precision and speed\n root_w = np.sqrt(w)\n elif p == np.inf:\n root_w = (w != 0)\n else:\n root_w = np.power(w, 1/p)\n u_v = root_w * u_v\n dist = norm(u_v, ord=p)\n return dist\n\n\ndef wminkowski(u, v, p, w):\n \"\"\"\n Compute the weighted Minkowski distance between two 1-D arrays.\n\n The weighted Minkowski distance between `u` and `v`, defined as\n\n .. math::\n\n \\\\left(\\\\sum{(|w_i (u_i - v_i)|^p)}\\\\right)^{1/p}.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n p : scalar\n The order of the norm of the difference :math:`{||u-v||}_p`.\n w : (N,) array_like\n The weight vector.\n\n Returns\n -------\n wminkowski : double\n The weighted Minkowski distance between vectors `u` and `v`.\n\n Notes\n -----\n `wminkowski` is deprecated and will be removed in SciPy 1.8.0.\n Use `minkowski` with the ``w`` argument instead.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 1, np.ones(3))\n 2.0\n >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 2, np.ones(3))\n 1.4142135623730951\n >>> distance.wminkowski([1, 0, 0], [0, 1, 0], 3, np.ones(3))\n 1.2599210498948732\n >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 1, np.ones(3))\n 1.0\n >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 2, np.ones(3))\n 1.0\n >>> distance.wminkowski([1, 1, 0], [0, 1, 0], 3, np.ones(3))\n 1.0\n\n \"\"\"\n warnings.warn(\n message=\"scipy.distance.wminkowski is deprecated and will be removed \"\n \"in SciPy 1.8.0, use scipy.distance.minkowski instead.\",\n category=DeprecationWarning)\n w = _validate_weights(w)\n return minkowski(u, v, p=p, w=w**p)\n\n\ndef euclidean(u, v, w=None):\n \"\"\"\n Computes the Euclidean distance between two 1-D arrays.\n\n The Euclidean distance between 1-D arrays `u` and `v`, is defined as\n\n .. math::\n\n {||u-v||}_2\n\n \\\\left(\\\\sum{(w_i |(u_i - v_i)|^2)}\\\\right)^{1/2}\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n euclidean : double\n The Euclidean distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.euclidean([1, 0, 0], [0, 1, 0])\n 1.4142135623730951\n >>> distance.euclidean([1, 1, 0], [0, 1, 0])\n 1.0\n\n \"\"\"\n return minkowski(u, v, p=2, w=w)\n\n\ndef sqeuclidean(u, v, w=None):\n \"\"\"\n Compute the squared Euclidean distance between two 1-D arrays.\n\n The squared Euclidean distance between `u` and `v` is defined as\n\n .. math::\n\n {||u-v||}_2^2\n\n \\\\left(\\\\sum{(w_i |(u_i - v_i)|^2)}\\\\right)\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n sqeuclidean : double\n The squared Euclidean distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])\n 2.0\n >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])\n 1.0\n\n \"\"\"\n # Preserve float dtypes, but convert everything else to np.float64\n # for stability.\n utype, vtype = None, None\n if not (hasattr(u, \"dtype\") and np.issubdtype(u.dtype, np.inexact)):\n utype = np.float64\n if not (hasattr(v, \"dtype\") and np.issubdtype(v.dtype, np.inexact)):\n vtype = np.float64\n\n u = _validate_vector(u, dtype=utype)\n v = _validate_vector(v, dtype=vtype)\n u_v = u - v\n u_v_w = u_v # only want weights applied once\n if w is not None:\n w = _validate_weights(w)\n u_v_w = w * u_v\n return np.dot(u_v, u_v_w)\n\n\ndef correlation(u, v, w=None, centered=True):\n \"\"\"\n Compute the correlation distance between two 1-D arrays.\n\n The correlation distance between `u` and `v`, is\n defined as\n\n .. math::\n\n 1 - \\\\frac{(u - \\\\bar{u}) \\\\cdot (v - \\\\bar{v})}\n {{||(u - \\\\bar{u})||}_2 {||(v - \\\\bar{v})||}_2}\n\n where :math:`\\\\bar{u}` is the mean of the elements of `u`\n and :math:`x \\\\cdot y` is the dot product of :math:`x` and :math:`y`.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n centered : bool, optional\n If True, `u` and `v` will be centered. Default is True.\n\n Returns\n -------\n correlation : double\n The correlation distance between 1-D array `u` and `v`.\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is not None:\n w = _validate_weights(w)\n if centered:\n umu = np.average(u, weights=w)\n vmu = np.average(v, weights=w)\n u = u - umu\n v = v - vmu\n uv = np.average(u * v, weights=w)\n uu = np.average(np.square(u), weights=w)\n vv = np.average(np.square(v), weights=w)\n dist = 1.0 - uv / np.sqrt(uu * vv)\n # Return absolute value to avoid small negative value due to rounding\n return np.abs(dist)\n\n\ndef cosine(u, v, w=None):\n \"\"\"\n Compute the Cosine distance between 1-D arrays.\n\n The Cosine distance between `u` and `v`, is defined as\n\n .. math::\n\n 1 - \\\\frac{u \\\\cdot v}\n {||u||_2 ||v||_2}.\n\n where :math:`u \\\\cdot v` is the dot product of :math:`u` and\n :math:`v`.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n cosine : double\n The Cosine distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.cosine([1, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.cosine([100, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.cosine([1, 1, 0], [0, 1, 0])\n 0.29289321881345254\n\n \"\"\"\n # cosine distance is also referred to as 'uncentered correlation',\n # or 'reflective correlation'\n # clamp the result to 0-2\n return max(0, min(correlation(u, v, w=w, centered=False), 2.0))\n\n\ndef hamming(u, v, w=None):\n \"\"\"\n Compute the Hamming distance between two 1-D arrays.\n\n The Hamming distance between 1-D arrays `u` and `v`, is simply the\n proportion of disagreeing components in `u` and `v`. If `u` and `v` are\n boolean vectors, the Hamming distance is\n\n .. math::\n\n \\\\frac{c_{01} + c_{10}}{n}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n`.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n hamming : double\n The Hamming distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.hamming([1, 0, 0], [0, 1, 0])\n 0.66666666666666663\n >>> distance.hamming([1, 0, 0], [1, 1, 0])\n 0.33333333333333331\n >>> distance.hamming([1, 0, 0], [2, 0, 0])\n 0.33333333333333331\n >>> distance.hamming([1, 0, 0], [3, 0, 0])\n 0.33333333333333331\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if u.shape != v.shape:\n raise ValueError('The 1d arrays must have equal lengths.')\n u_ne_v = u != v\n if w is not None:\n w = _validate_weights(w)\n return np.average(u_ne_v, weights=w)\n\n\ndef jaccard(u, v, w=None):\n \"\"\"\n Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.\n\n The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,\n is defined as\n\n .. math::\n\n \\\\frac{c_{TF} + c_{FT}}\n {c_{TT} + c_{FT} + c_{TF}}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input array.\n v : (N,) array_like, bool\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n jaccard : double\n The Jaccard distance between vectors `u` and `v`.\n\n Notes\n -----\n When both `u` and `v` lead to a `0/0` division i.e. there is no overlap\n between the items in the vectors the returned distance is 0. See the\n Wikipedia page on the Jaccard index [1]_, and this paper [2]_.\n\n .. versionchanged:: 1.2.0\n Previously, when `u` and `v` lead to a `0/0` division, the function\n would return NaN. This was changed to return 0 instead.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Jaccard_index\n .. [2] S. Kosub, \"A note on the triangle inequality for the Jaccard\n distance\", 2016, :arxiv:`1612.02696`\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.jaccard([1, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.jaccard([1, 0, 0], [1, 1, 0])\n 0.5\n >>> distance.jaccard([1, 0, 0], [1, 2, 0])\n 0.5\n >>> distance.jaccard([1, 0, 0], [1, 1, 1])\n 0.66666666666666663\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n\n nonzero = np.bitwise_or(u != 0, v != 0)\n unequal_nonzero = np.bitwise_and((u != v), nonzero)\n if w is not None:\n w = _validate_weights(w)\n nonzero = w * nonzero\n unequal_nonzero = w * unequal_nonzero\n a = np.double(unequal_nonzero.sum())\n b = np.double(nonzero.sum())\n return (a / b) if b != 0 else 0\n\n\ndef kulsinski(u, v, w=None):\n \"\"\"\n Compute the Kulsinski dissimilarity between two boolean 1-D arrays.\n\n The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,\n is defined as\n\n .. math::\n\n \\\\frac{c_{TF} + c_{FT} - c_{TT} + n}\n {c_{FT} + c_{TF} + n}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input array.\n v : (N,) array_like, bool\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n kulsinski : double\n The Kulsinski distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.kulsinski([1, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.kulsinski([1, 0, 0], [1, 1, 0])\n 0.75\n >>> distance.kulsinski([1, 0, 0], [2, 1, 0])\n 0.33333333333333331\n >>> distance.kulsinski([1, 0, 0], [3, 1, 0])\n -0.5\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is None:\n n = float(len(u))\n else:\n w = _validate_weights(w)\n n = w.sum()\n (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)\n\n return (ntf + nft - ntt + n) / (ntf + nft + n)\n\n\ndef seuclidean(u, v, V):\n \"\"\"\n Return the standardized Euclidean distance between two 1-D arrays.\n\n The standardized Euclidean distance between `u` and `v`.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n V : (N,) array_like\n `V` is an 1-D array of component variances. It is usually computed\n among a larger collection vectors.\n\n Returns\n -------\n seuclidean : double\n The standardized Euclidean distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])\n 4.4721359549995796\n >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])\n 3.3166247903553998\n >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])\n 3.1780497164141406\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n V = _validate_vector(V, dtype=np.float64)\n if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:\n raise TypeError('V must be a 1-D array of the same dimension '\n 'as u and v.')\n return euclidean(u, v, w=1/V)\n\n\ndef cityblock(u, v, w=None):\n \"\"\"\n Compute the City Block (Manhattan) distance.\n\n Computes the Manhattan distance between two 1-D arrays `u` and `v`,\n which is defined as\n\n .. math::\n\n \\\\sum_i {\\\\left| u_i - v_i \\\\right|}.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n cityblock : double\n The City Block (Manhattan) distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.cityblock([1, 0, 0], [0, 1, 0])\n 2\n >>> distance.cityblock([1, 0, 0], [0, 2, 0])\n 3\n >>> distance.cityblock([1, 0, 0], [1, 1, 0])\n 1\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n l1_diff = abs(u - v)\n if w is not None:\n w = _validate_weights(w)\n l1_diff = w * l1_diff\n return l1_diff.sum()\n\n\ndef mahalanobis(u, v, VI):\n \"\"\"\n Compute the Mahalanobis distance between two 1-D arrays.\n\n The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as\n\n .. math::\n\n \\\\sqrt{ (u-v) V^{-1} (u-v)^T }\n\n where ``V`` is the covariance matrix. Note that the argument `VI`\n is the inverse of ``V``.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n VI : array_like\n The inverse of the covariance matrix.\n\n Returns\n -------\n mahalanobis : double\n The Mahalanobis distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]\n >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)\n 1.0\n >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)\n 1.0\n >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)\n 1.7320508075688772\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n VI = np.atleast_2d(VI)\n delta = u - v\n m = np.dot(np.dot(delta, VI), delta)\n return np.sqrt(m)\n\n\ndef chebyshev(u, v, w=None):\n \"\"\"\n Compute the Chebyshev distance.\n\n Computes the Chebyshev distance between two 1-D arrays `u` and `v`,\n which is defined as\n\n .. math::\n\n \\\\max_i {|u_i-v_i|}.\n\n Parameters\n ----------\n u : (N,) array_like\n Input vector.\n v : (N,) array_like\n Input vector.\n w : (N,) array_like, optional\n Unused, as 'max' is a weightless operation. Here for API consistency.\n\n Returns\n -------\n chebyshev : double\n The Chebyshev distance between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.chebyshev([1, 0, 0], [0, 1, 0])\n 1\n >>> distance.chebyshev([1, 1, 0], [0, 1, 0])\n 1\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is not None:\n w = _validate_weights(w)\n has_weight = w > 0\n if has_weight.sum() < w.size:\n u = u[has_weight]\n v = v[has_weight]\n return max(abs(u - v))\n\n\ndef braycurtis(u, v, w=None):\n \"\"\"\n Compute the Bray-Curtis distance between two 1-D arrays.\n\n Bray-Curtis distance is defined as\n\n .. math::\n\n \\\\sum{|u_i-v_i|} / \\\\sum{|u_i+v_i|}\n\n The Bray-Curtis distance is in the range [0, 1] if all coordinates are\n positive, and is undefined if the inputs are of length zero.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n braycurtis : double\n The Bray-Curtis distance between 1-D arrays `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.braycurtis([1, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.braycurtis([1, 1, 0], [0, 1, 0])\n 0.33333333333333331\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v, dtype=np.float64)\n l1_diff = abs(u - v)\n l1_sum = abs(u + v)\n if w is not None:\n w = _validate_weights(w)\n l1_diff = w * l1_diff\n l1_sum = w * l1_sum\n return l1_diff.sum() / l1_sum.sum()\n\n\ndef canberra(u, v, w=None):\n \"\"\"\n Compute the Canberra distance between two 1-D arrays.\n\n The Canberra distance is defined as\n\n .. math::\n\n d(u,v) = \\\\sum_i \\\\frac{|u_i-v_i|}\n {|u_i|+|v_i|}.\n\n Parameters\n ----------\n u : (N,) array_like\n Input array.\n v : (N,) array_like\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n canberra : double\n The Canberra distance between vectors `u` and `v`.\n\n Notes\n -----\n When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is\n used in the calculation.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.canberra([1, 0, 0], [0, 1, 0])\n 2.0\n >>> distance.canberra([1, 1, 0], [0, 1, 0])\n 1.0\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v, dtype=np.float64)\n if w is not None:\n w = _validate_weights(w)\n with np.errstate(invalid='ignore'):\n abs_uv = abs(u - v)\n abs_u = abs(u)\n abs_v = abs(v)\n d = abs_uv / (abs_u + abs_v)\n if w is not None:\n d = w * d\n d = np.nansum(d)\n return d\n\n\ndef jensenshannon(p, q, base=None):\n \"\"\"\n Compute the Jensen-Shannon distance (metric) between\n two 1-D probability arrays. This is the square root\n of the Jensen-Shannon divergence.\n\n The Jensen-Shannon distance between two probability\n vectors `p` and `q` is defined as,\n\n .. math::\n\n \\\\sqrt{\\\\frac{D(p \\\\parallel m) + D(q \\\\parallel m)}{2}}\n\n where :math:`m` is the pointwise mean of :math:`p` and :math:`q`\n and :math:`D` is the Kullback-Leibler divergence.\n\n This routine will normalize `p` and `q` if they don't sum to 1.0.\n\n Parameters\n ----------\n p : (N,) array_like\n left probability vector\n q : (N,) array_like\n right probability vector\n base : double, optional\n the base of the logarithm used to compute the output\n if not given, then the routine uses the default base of\n scipy.stats.entropy.\n\n Returns\n -------\n js : double\n The Jensen-Shannon distance between `p` and `q`\n\n .. versionadded:: 1.2.0\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)\n 1.0\n >>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])\n 0.46450140402245893\n >>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])\n 0.0\n\n \"\"\"\n p = np.asarray(p)\n q = np.asarray(q)\n p = p / np.sum(p, axis=0)\n q = q / np.sum(q, axis=0)\n m = (p + q) / 2.0\n left = rel_entr(p, m)\n right = rel_entr(q, m)\n js = np.sum(left, axis=0) + np.sum(right, axis=0)\n if base is not None:\n js /= np.log(base)\n return np.sqrt(js / 2.0)\n\n\ndef yule(u, v, w=None):\n \"\"\"\n Compute the Yule dissimilarity between two boolean 1-D arrays.\n\n The Yule dissimilarity is defined as\n\n .. math::\n\n \\\\frac{R}{c_{TT} * c_{FF} + \\\\frac{R}{2}}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input array.\n v : (N,) array_like, bool\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n yule : double\n The Yule dissimilarity between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.yule([1, 0, 0], [0, 1, 0])\n 2.0\n >>> distance.yule([1, 1, 0], [0, 1, 0])\n 0.0\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is not None:\n w = _validate_weights(w)\n (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)\n half_R = ntf * nft\n if half_R == 0:\n return 0.0\n else:\n return float(2.0 * half_R / (ntt * nff + half_R))\n\n\[email protected](message=\"spatial.distance.matching is deprecated in scipy 1.0.0; \"\n \"use spatial.distance.hamming instead.\")\ndef matching(u, v, w=None):\n \"\"\"\n Compute the Hamming distance between two boolean 1-D arrays.\n\n This is a deprecated synonym for :func:`hamming`.\n \"\"\"\n return hamming(u, v, w=w)\n\n\ndef dice(u, v, w=None):\n \"\"\"\n Compute the Dice dissimilarity between two boolean 1-D arrays.\n\n The Dice dissimilarity between `u` and `v`, is\n\n .. math::\n\n \\\\frac{c_{TF} + c_{FT}}\n {2c_{TT} + c_{FT} + c_{TF}}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input 1-D array.\n v : (N,) array_like, bool\n Input 1-D array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n dice : double\n The Dice dissimilarity between 1-D arrays `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.dice([1, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.dice([1, 0, 0], [1, 1, 0])\n 0.3333333333333333\n >>> distance.dice([1, 0, 0], [2, 0, 0])\n -0.3333333333333333\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is not None:\n w = _validate_weights(w)\n if u.dtype == v.dtype == bool and w is None:\n ntt = (u & v).sum()\n else:\n dtype = np.find_common_type([int], [u.dtype, v.dtype])\n u = u.astype(dtype)\n v = v.astype(dtype)\n if w is None:\n ntt = (u * v).sum()\n else:\n ntt = (u * v * w).sum()\n (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)\n return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))\n\n\ndef rogerstanimoto(u, v, w=None):\n \"\"\"\n Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.\n\n The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays\n `u` and `v`, is defined as\n\n .. math::\n \\\\frac{R}\n {c_{TT} + c_{FF} + R}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input array.\n v : (N,) array_like, bool\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n rogerstanimoto : double\n The Rogers-Tanimoto dissimilarity between vectors\n `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])\n 0.8\n >>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])\n 0.5\n >>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])\n -1.0\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is not None:\n w = _validate_weights(w)\n (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)\n return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))\n\n\ndef russellrao(u, v, w=None):\n \"\"\"\n Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.\n\n The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and\n `v`, is defined as\n\n .. math::\n\n \\\\frac{n - c_{TT}}\n {n}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input array.\n v : (N,) array_like, bool\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n russellrao : double\n The Russell-Rao dissimilarity between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.russellrao([1, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.russellrao([1, 0, 0], [1, 1, 0])\n 0.6666666666666666\n >>> distance.russellrao([1, 0, 0], [2, 0, 0])\n 0.3333333333333333\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if u.dtype == v.dtype == bool and w is None:\n ntt = (u & v).sum()\n n = float(len(u))\n elif w is None:\n ntt = (u * v).sum()\n n = float(len(u))\n else:\n w = _validate_weights(w)\n ntt = (u * v * w).sum()\n n = w.sum()\n return float(n - ntt) / n\n\n\ndef sokalmichener(u, v, w=None):\n \"\"\"\n Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.\n\n The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,\n is defined as\n\n .. math::\n\n \\\\frac{R}\n {S + R}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and\n :math:`S = c_{FF} + c_{TT}`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input array.\n v : (N,) array_like, bool\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n sokalmichener : double\n The Sokal-Michener dissimilarity between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.sokalmichener([1, 0, 0], [0, 1, 0])\n 0.8\n >>> distance.sokalmichener([1, 0, 0], [1, 1, 0])\n 0.5\n >>> distance.sokalmichener([1, 0, 0], [2, 0, 0])\n -1.0\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if w is not None:\n w = _validate_weights(w)\n nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)\n return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))\n\n\ndef sokalsneath(u, v, w=None):\n \"\"\"\n Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.\n\n The Sokal-Sneath dissimilarity between `u` and `v`,\n\n .. math::\n\n \\\\frac{R}\n {c_{TT} + R}\n\n where :math:`c_{ij}` is the number of occurrences of\n :math:`\\\\mathtt{u[k]} = i` and :math:`\\\\mathtt{v[k]} = j` for\n :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.\n\n Parameters\n ----------\n u : (N,) array_like, bool\n Input array.\n v : (N,) array_like, bool\n Input array.\n w : (N,) array_like, optional\n The weights for each value in `u` and `v`. Default is None,\n which gives each value a weight of 1.0\n\n Returns\n -------\n sokalsneath : double\n The Sokal-Sneath dissimilarity between vectors `u` and `v`.\n\n Examples\n --------\n >>> from scipy.spatial import distance\n >>> distance.sokalsneath([1, 0, 0], [0, 1, 0])\n 1.0\n >>> distance.sokalsneath([1, 0, 0], [1, 1, 0])\n 0.66666666666666663\n >>> distance.sokalsneath([1, 0, 0], [2, 1, 0])\n 0.0\n >>> distance.sokalsneath([1, 0, 0], [3, 1, 0])\n -2.0\n\n \"\"\"\n u = _validate_vector(u)\n v = _validate_vector(v)\n if u.dtype == v.dtype == bool and w is None:\n ntt = (u & v).sum()\n elif w is None:\n ntt = (u * v).sum()\n else:\n w = _validate_weights(w)\n ntt = (u * v * w).sum()\n (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)\n denom = np.array(ntt + 2.0 * (ntf + nft))\n if not denom.any():\n raise ValueError('Sokal-Sneath dissimilarity is not defined for '\n 'vectors that are entirely false.')\n return float(2.0 * (ntf + nft)) / denom\n\n\n_convert_to_double = partial(_convert_to_type, out_type=np.double)\n_convert_to_bool = partial(_convert_to_type, out_type=bool)\n\n# adding python-only wrappers to _distance_wrap module\n_distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap\n_distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap\n\n\[email protected](frozen=True)\nclass CDistMetricWrapper:\n metric_name: str\n\n def __call__(self, XA, XB, *, out=None, **kwargs):\n XA = np.ascontiguousarray(XA)\n XB = np.ascontiguousarray(XB)\n mA, n = XA.shape\n mB, _ = XB.shape\n metric_name = self.metric_name\n metric_info = _METRICS[metric_name]\n XA, XB, typ, kwargs = _validate_cdist_input(\n XA, XB, mA, mB, n, metric_info, **kwargs)\n\n w = kwargs.pop('w', None)\n if w is not None:\n metric = metric_info.dist_func\n return _cdist_callable(\n XA, XB, metric=metric, out=out, w=w, **kwargs)\n\n dm = _prepare_out_argument(out, np.double, (mA, mB))\n # get cdist wrapper\n cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')\n cdist_fn(XA, XB, dm, **kwargs)\n return dm\n\n\[email protected](frozen=True)\nclass CDistWeightedMetricWrapper:\n metric_name: str\n weighted_metric: str\n\n def __call__(self, XA, XB, *, out=None, **kwargs):\n XA = np.ascontiguousarray(XA)\n XB = np.ascontiguousarray(XB)\n mA, n = XA.shape\n mB, _ = XB.shape\n metric_name = self.metric_name\n XA, XB, typ, kwargs = _validate_cdist_input(\n XA, XB, mA, mB, n, _METRICS[metric_name], **kwargs)\n dm = _prepare_out_argument(out, np.double, (mA, mB))\n\n w = kwargs.pop('w', None)\n if w is not None:\n metric_name = self.weighted_metric\n kwargs['w'] = w\n\n # get cdist wrapper\n cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')\n cdist_fn(XA, XB, dm, **kwargs)\n return dm\n\n\[email protected](frozen=True)\nclass PDistMetricWrapper:\n metric_name: str\n\n def __call__(self, X, *, out=None, **kwargs):\n X = np.ascontiguousarray(X)\n m, n = X.shape\n metric_name = self.metric_name\n metric_info = _METRICS[metric_name]\n X, typ, kwargs = _validate_pdist_input(\n X, m, n, metric_info, **kwargs)\n out_size = (m * (m - 1)) // 2\n w = kwargs.pop('w', None)\n if w is not None:\n metric = metric_info.dist_func\n return _pdist_callable(\n X, metric=metric, out=out, w=w, **kwargs)\n\n dm = _prepare_out_argument(out, np.double, (out_size,))\n # get pdist wrapper\n pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')\n pdist_fn(X, dm, **kwargs)\n return dm\n\n\[email protected](frozen=True)\nclass PDistWeightedMetricWrapper:\n metric_name: str\n weighted_metric: str\n\n def __call__(self, X, *, out=None, **kwargs):\n X = np.ascontiguousarray(X)\n m, n = X.shape\n metric_name = self.metric_name\n X, typ, kwargs = _validate_pdist_input(\n X, m, n, _METRICS[metric_name], **kwargs)\n out_size = (m * (m - 1)) // 2\n dm = _prepare_out_argument(out, np.double, (out_size,))\n\n w = kwargs.pop('w', None)\n if w is not None:\n metric_name = self.weighted_metric\n kwargs['w'] = w\n\n # get pdist wrapper\n pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')\n pdist_fn(X, dm, **kwargs)\n return dm\n\n\[email protected](frozen=True)\nclass MetricInfo:\n # Name of python distance function\n canonical_name: str\n # All aliases, including canonical_name\n aka: Set[str]\n # unvectorized distance function\n dist_func: Callable\n # Optimized cdist function\n cdist_func: Callable\n # Optimized pdist function\n pdist_func: Callable\n # function that checks kwargs and computes default values:\n # f(X, m, n, **kwargs)\n validator: Optional[Callable] = None\n # list of supported types:\n # X (pdist) and XA (cdist) are used to choose the type. if there is no\n # match the first type is used. Default double\n types: List[str] = dataclasses.field(default_factory=lambda: ['double'])\n\n\n# Registry of implemented metrics:\n_METRIC_INFOS = [\n MetricInfo(\n canonical_name='braycurtis',\n aka={'braycurtis'},\n dist_func=braycurtis,\n cdist_func=CDistMetricWrapper('braycurtis'),\n pdist_func=PDistMetricWrapper('braycurtis'),\n ),\n MetricInfo(\n canonical_name='canberra',\n aka={'canberra'},\n dist_func=canberra,\n cdist_func=CDistMetricWrapper('canberra'),\n pdist_func=PDistMetricWrapper('canberra'),\n ),\n MetricInfo(\n canonical_name='chebyshev',\n aka={'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'},\n dist_func=chebyshev,\n validator=_validate_weight_with_size,\n cdist_func=CDistWeightedMetricWrapper(\n 'chebyshev', 'weighted_chebyshev'),\n pdist_func=PDistWeightedMetricWrapper(\n 'chebyshev', 'weighted_chebyshev'),\n ),\n MetricInfo(\n canonical_name='cityblock',\n aka={'cityblock', 'cblock', 'cb', 'c'},\n dist_func=cityblock,\n cdist_func=CDistMetricWrapper('cityblock'),\n pdist_func=PDistMetricWrapper('cityblock'),\n ),\n MetricInfo(\n canonical_name='correlation',\n aka={'correlation', 'co'},\n dist_func=correlation,\n cdist_func=CDistMetricWrapper('correlation'),\n pdist_func=PDistMetricWrapper('correlation'),\n ),\n MetricInfo(\n canonical_name='cosine',\n aka={'cosine', 'cos'},\n dist_func=cosine,\n cdist_func=CDistMetricWrapper('cosine'),\n pdist_func=PDistMetricWrapper('cosine'),\n ),\n MetricInfo(\n canonical_name='dice',\n aka={'dice'},\n types=['bool'],\n dist_func=dice,\n cdist_func=CDistMetricWrapper('dice'),\n pdist_func=PDistMetricWrapper('dice'),\n ),\n MetricInfo(\n canonical_name='euclidean',\n aka={'euclidean', 'euclid', 'eu', 'e'},\n dist_func=euclidean,\n cdist_func=CDistMetricWrapper('euclidean'),\n pdist_func=PDistMetricWrapper('euclidean'),\n ),\n MetricInfo(\n canonical_name='hamming',\n aka={'matching', 'hamming', 'hamm', 'ha', 'h'},\n types=['double', 'bool'],\n validator=_validate_hamming_kwargs,\n dist_func=hamming,\n cdist_func=CDistWeightedMetricWrapper('hamming', 'hamming'),\n pdist_func=PDistWeightedMetricWrapper('hamming', 'hamming'),\n ),\n MetricInfo(\n canonical_name='jaccard',\n aka={'jaccard', 'jacc', 'ja', 'j'},\n types=['double', 'bool'],\n dist_func=jaccard,\n cdist_func=CDistMetricWrapper('jaccard'),\n pdist_func=PDistMetricWrapper('jaccard'),\n ),\n MetricInfo(\n canonical_name='jensenshannon',\n aka={'jensenshannon', 'js'},\n dist_func=jensenshannon,\n cdist_func=CDistMetricWrapper('jensenshannon'),\n pdist_func=PDistMetricWrapper('jensenshannon'),\n ),\n MetricInfo(\n canonical_name='kulsinski',\n aka={'kulsinski'},\n types=['bool'],\n dist_func=kulsinski,\n cdist_func=CDistMetricWrapper('kulsinski'),\n pdist_func=PDistMetricWrapper('kulsinski'),\n ),\n MetricInfo(\n canonical_name='mahalanobis',\n aka={'mahalanobis', 'mahal', 'mah'},\n validator=_validate_mahalanobis_kwargs,\n dist_func=mahalanobis,\n cdist_func=CDistMetricWrapper('mahalanobis'),\n pdist_func=PDistMetricWrapper('mahalanobis'),\n ),\n MetricInfo(\n canonical_name='minkowski',\n aka={'minkowski', 'mi', 'm', 'pnorm'},\n validator=_validate_minkowski_kwargs,\n dist_func=minkowski,\n cdist_func=CDistWeightedMetricWrapper(\n 'minkowski', 'weighted_minkowski'),\n pdist_func=PDistWeightedMetricWrapper(\n 'minkowski', 'weighted_minkowski'),\n ),\n MetricInfo(\n canonical_name='rogerstanimoto',\n aka={'rogerstanimoto'},\n types=['bool'],\n dist_func=rogerstanimoto,\n cdist_func=CDistMetricWrapper('rogerstanimoto'),\n pdist_func=PDistMetricWrapper('rogerstanimoto'),\n ),\n MetricInfo(\n canonical_name='russellrao',\n aka={'russellrao'},\n types=['bool'],\n dist_func=russellrao,\n cdist_func=CDistMetricWrapper('russellrao'),\n pdist_func=PDistMetricWrapper('russellrao'),\n ),\n MetricInfo(\n canonical_name='seuclidean',\n aka={'seuclidean', 'se', 's'},\n validator=_validate_seuclidean_kwargs,\n dist_func=seuclidean,\n cdist_func=CDistMetricWrapper('seuclidean'),\n pdist_func=PDistMetricWrapper('seuclidean'),\n ),\n MetricInfo(\n canonical_name='sokalmichener',\n aka={'sokalmichener'},\n types=['bool'],\n dist_func=sokalmichener,\n cdist_func=CDistMetricWrapper('sokalmichener'),\n pdist_func=PDistMetricWrapper('sokalmichener'),\n ),\n MetricInfo(\n canonical_name='sokalsneath',\n aka={'sokalsneath'},\n types=['bool'],\n dist_func=sokalsneath,\n cdist_func=CDistMetricWrapper('sokalsneath'),\n pdist_func=PDistMetricWrapper('sokalsneath'),\n ),\n MetricInfo(\n canonical_name='sqeuclidean',\n aka={'sqeuclidean', 'sqe', 'sqeuclid'},\n dist_func=sqeuclidean,\n cdist_func=CDistMetricWrapper('sqeuclidean'),\n pdist_func=PDistMetricWrapper('sqeuclidean'),\n ),\n MetricInfo(\n canonical_name='wminkowski',\n aka={'wminkowski', 'wmi', 'wm', 'wpnorm'},\n validator=_validate_wminkowski_kwargs,\n dist_func=wminkowski,\n cdist_func=CDistWeightedMetricWrapper(\n 'wminkowski', 'old_weighted_minkowski'),\n pdist_func=PDistWeightedMetricWrapper(\n 'wminkowski', 'old_weighted_minkowski'),\n ),\n MetricInfo(\n canonical_name='yule',\n aka={'yule'},\n types=['bool'],\n dist_func=yule,\n cdist_func=CDistMetricWrapper('yule'),\n pdist_func=PDistMetricWrapper('yule'),\n ),\n]\n\n_METRICS = {info.canonical_name: info for info in _METRIC_INFOS}\n_METRIC_ALIAS = dict((alias, info)\n for info in _METRIC_INFOS\n for alias in info.aka)\n\n_METRICS_NAMES = list(_METRICS.keys())\n\n_TEST_METRICS = {'test_' + info.canonical_name: info for info in _METRIC_INFOS}\n\n\ndef pdist(X, metric='euclidean', *, out=None, **kwargs):\n \"\"\"\n Pairwise distances between observations in n-dimensional space.\n\n See Notes for common calling conventions.\n\n Parameters\n ----------\n X : array_like\n An m by n array of m original observations in an\n n-dimensional space.\n metric : str or function, optional\n The distance metric to use. The distance function can\n be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',\n 'correlation', 'cosine', 'dice', 'euclidean', 'hamming',\n 'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching',\n 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.\n **kwargs : dict, optional\n Extra arguments to `metric`: refer to each metric documentation for a\n list of all possible arguments.\n\n Some possible arguments:\n\n p : scalar\n The p-norm to apply for Minkowski, weighted and unweighted.\n Default: 2.\n\n w : ndarray\n The weight vector for metrics that support weights (e.g., Minkowski).\n\n V : ndarray\n The variance vector for standardized Euclidean.\n Default: var(X, axis=0, ddof=1)\n\n VI : ndarray\n The inverse of the covariance matrix for Mahalanobis.\n Default: inv(cov(X.T)).T\n\n out : ndarray.\n The output array\n If not None, condensed distance matrix Y is stored in this array.\n\n Returns\n -------\n Y : ndarray\n Returns a condensed distance matrix Y. For each :math:`i` and :math:`j`\n (where :math:`i<j<m`),where m is the number of original observations.\n The metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``m\n * i + j - ((i + 2) * (i + 1)) // 2``.\n\n See Also\n --------\n squareform : converts between condensed distance matrices and\n square distance matrices.\n\n Notes\n -----\n See ``squareform`` for information on how to calculate the index of\n this entry or to convert the condensed distance matrix to a\n redundant square matrix.\n\n The following are common calling conventions.\n\n 1. ``Y = pdist(X, 'euclidean')``\n\n Computes the distance between m points using Euclidean distance\n (2-norm) as the distance metric between the points. The points\n are arranged as m n-dimensional row vectors in the matrix X.\n\n 2. ``Y = pdist(X, 'minkowski', p=2.)``\n\n Computes the distances using the Minkowski distance\n :math:`||u-v||_p` (p-norm) where :math:`p \\\\geq 1`.\n\n 3. ``Y = pdist(X, 'cityblock')``\n\n Computes the city block or Manhattan distance between the\n points.\n\n 4. ``Y = pdist(X, 'seuclidean', V=None)``\n\n Computes the standardized Euclidean distance. The standardized\n Euclidean distance between two n-vectors ``u`` and ``v`` is\n\n .. math::\n\n \\\\sqrt{\\\\sum {(u_i-v_i)^2 / V[x_i]}}\n\n\n V is the variance vector; V[i] is the variance computed over all\n the i'th components of the points. If not passed, it is\n automatically computed.\n\n 5. ``Y = pdist(X, 'sqeuclidean')``\n\n Computes the squared Euclidean distance :math:`||u-v||_2^2` between\n the vectors.\n\n 6. ``Y = pdist(X, 'cosine')``\n\n Computes the cosine distance between vectors u and v,\n\n .. math::\n\n 1 - \\\\frac{u \\\\cdot v}\n {{||u||}_2 {||v||}_2}\n\n where :math:`||*||_2` is the 2-norm of its argument ``*``, and\n :math:`u \\\\cdot v` is the dot product of ``u`` and ``v``.\n\n 7. ``Y = pdist(X, 'correlation')``\n\n Computes the correlation distance between vectors u and v. This is\n\n .. math::\n\n 1 - \\\\frac{(u - \\\\bar{u}) \\\\cdot (v - \\\\bar{v})}\n {{||(u - \\\\bar{u})||}_2 {||(v - \\\\bar{v})||}_2}\n\n where :math:`\\\\bar{v}` is the mean of the elements of vector v,\n and :math:`x \\\\cdot y` is the dot product of :math:`x` and :math:`y`.\n\n 8. ``Y = pdist(X, 'hamming')``\n\n Computes the normalized Hamming distance, or the proportion of\n those vector elements between two n-vectors ``u`` and ``v``\n which disagree. To save memory, the matrix ``X`` can be of type\n boolean.\n\n 9. ``Y = pdist(X, 'jaccard')``\n\n Computes the Jaccard distance between the points. Given two\n vectors, ``u`` and ``v``, the Jaccard distance is the\n proportion of those elements ``u[i]`` and ``v[i]`` that\n disagree.\n\n 10. ``Y = pdist(X, 'chebyshev')``\n\n Computes the Chebyshev distance between the points. The\n Chebyshev distance between two n-vectors ``u`` and ``v`` is the\n maximum norm-1 distance between their respective elements. More\n precisely, the distance is given by\n\n .. math::\n\n d(u,v) = \\\\max_i {|u_i-v_i|}\n\n 11. ``Y = pdist(X, 'canberra')``\n\n Computes the Canberra distance between the points. The\n Canberra distance between two points ``u`` and ``v`` is\n\n .. math::\n\n d(u,v) = \\\\sum_i \\\\frac{|u_i-v_i|}\n {|u_i|+|v_i|}\n\n\n 12. ``Y = pdist(X, 'braycurtis')``\n\n Computes the Bray-Curtis distance between the points. The\n Bray-Curtis distance between two points ``u`` and ``v`` is\n\n\n .. math::\n\n d(u,v) = \\\\frac{\\\\sum_i {|u_i-v_i|}}\n {\\\\sum_i {|u_i+v_i|}}\n\n 13. ``Y = pdist(X, 'mahalanobis', VI=None)``\n\n Computes the Mahalanobis distance between the points. The\n Mahalanobis distance between two points ``u`` and ``v`` is\n :math:`\\\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``\n variable) is the inverse covariance. If ``VI`` is not None,\n ``VI`` will be used as the inverse covariance matrix.\n\n 14. ``Y = pdist(X, 'yule')``\n\n Computes the Yule distance between each pair of boolean\n vectors. (see yule function documentation)\n\n 15. ``Y = pdist(X, 'matching')``\n\n Synonym for 'hamming'.\n\n 16. ``Y = pdist(X, 'dice')``\n\n Computes the Dice distance between each pair of boolean\n vectors. (see dice function documentation)\n\n 17. ``Y = pdist(X, 'kulsinski')``\n\n Computes the Kulsinski distance between each pair of\n boolean vectors. (see kulsinski function documentation)\n\n 18. ``Y = pdist(X, 'rogerstanimoto')``\n\n Computes the Rogers-Tanimoto distance between each pair of\n boolean vectors. (see rogerstanimoto function documentation)\n\n 19. ``Y = pdist(X, 'russellrao')``\n\n Computes the Russell-Rao distance between each pair of\n boolean vectors. (see russellrao function documentation)\n\n 20. ``Y = pdist(X, 'sokalmichener')``\n\n Computes the Sokal-Michener distance between each pair of\n boolean vectors. (see sokalmichener function documentation)\n\n 21. ``Y = pdist(X, 'sokalsneath')``\n\n Computes the Sokal-Sneath distance between each pair of\n boolean vectors. (see sokalsneath function documentation)\n\n 22. ``Y = pdist(X, 'wminkowski', p=2, w=w)``\n\n Computes the weighted Minkowski distance between each pair of\n vectors. (see wminkowski function documentation)\n\n 'wminkowski' is deprecated and will be removed in SciPy 1.8.0.\n Use 'minkowski' instead.\n\n 23. ``Y = pdist(X, f)``\n\n Computes the distance between all pairs of vectors in X\n using the user supplied 2-arity function f. For example,\n Euclidean distance between the vectors could be computed\n as follows::\n\n dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))\n\n Note that you should avoid passing a reference to one of\n the distance functions defined in this library. For example,::\n\n dm = pdist(X, sokalsneath)\n\n would calculate the pair-wise distances between the vectors in\n X using the Python function sokalsneath. This would result in\n sokalsneath being called :math:`{n \\\\choose 2}` times, which\n is inefficient. Instead, the optimized C version is more\n efficient, and we call it using the following syntax.::\n\n dm = pdist(X, 'sokalsneath')\n\n \"\"\"\n # You can also call this as:\n # Y = pdist(X, 'test_abc')\n # where 'abc' is the metric being tested. This computes the distance\n # between all pairs of vectors in X using the distance metric 'abc' but\n # with a more succinct, verifiable, but less efficient implementation.\n\n X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,\n check_finite=False)\n\n s = X.shape\n if len(s) != 2:\n raise ValueError('A 2-dimensional array must be passed.')\n\n m, n = s\n\n if callable(metric):\n mstr = getattr(metric, '__name__', 'UnknownCustomMetric')\n metric_info = _METRIC_ALIAS.get(mstr, None)\n\n if metric_info is not None:\n X, typ, kwargs = _validate_pdist_input(\n X, m, n, metric_info, **kwargs)\n\n return _pdist_callable(X, metric=metric, out=out, **kwargs)\n elif isinstance(metric, str):\n mstr = metric.lower()\n metric_info = _METRIC_ALIAS.get(mstr, None)\n\n if metric_info is not None:\n pdist_fn = metric_info.pdist_func\n return pdist_fn(X, out=out, **kwargs)\n elif mstr.startswith(\"test_\"):\n metric_info = _TEST_METRICS.get(mstr, None)\n if metric_info is None:\n raise ValueError(f'Unknown \"Test\" Distance Metric: {mstr[5:]}')\n X, typ, kwargs = _validate_pdist_input(\n X, m, n, metric_info, **kwargs)\n return _pdist_callable(\n X, metric=metric_info.dist_func, out=out, **kwargs)\n else:\n raise ValueError('Unknown Distance Metric: %s' % mstr)\n else:\n raise TypeError('2nd argument metric must be a string identifier '\n 'or a function.')\n\n\ndef squareform(X, force=\"no\", checks=True):\n \"\"\"\n Convert a vector-form distance vector to a square-form distance\n matrix, and vice-versa.\n\n Parameters\n ----------\n X : array_like\n Either a condensed or redundant distance matrix.\n force : str, optional\n As with MATLAB(TM), if force is equal to ``'tovector'`` or\n ``'tomatrix'``, the input will be treated as a distance matrix or\n distance vector respectively.\n checks : bool, optional\n If set to False, no checks will be made for matrix\n symmetry nor zero diagonals. This is useful if it is known that\n ``X - X.T1`` is small and ``diag(X)`` is close to zero.\n These values are ignored any way so they do not disrupt the\n squareform transformation.\n\n Returns\n -------\n Y : ndarray\n If a condensed distance matrix is passed, a redundant one is\n returned, or if a redundant one is passed, a condensed distance\n matrix is returned.\n\n Notes\n -----\n 1. ``v = squareform(X)``\n\n Given a square n-by-n symmetric distance matrix ``X``,\n ``v = squareform(X)`` returns a ``n * (n-1) / 2``\n (i.e. binomial coefficient n choose 2) sized vector `v`\n where :math:`v[{n \\\\choose 2} - {n-i \\\\choose 2} + (j-i-1)]`\n is the distance between distinct points ``i`` and ``j``.\n If ``X`` is non-square or asymmetric, an error is raised.\n\n 2. ``X = squareform(v)``\n\n Given a ``n * (n-1) / 2`` sized vector ``v``\n for some integer ``n >= 1`` encoding distances as described,\n ``X = squareform(v)`` returns a n-by-n distance matrix ``X``.\n The ``X[i, j]`` and ``X[j, i]`` values are set to\n :math:`v[{n \\\\choose 2} - {n-i \\\\choose 2} + (j-i-1)]`\n and all diagonal elements are zero.\n\n In SciPy 0.19.0, ``squareform`` stopped casting all input types to\n float64, and started returning arrays of the same dtype as the input.\n\n \"\"\"\n\n X = np.ascontiguousarray(X)\n\n s = X.shape\n\n if force.lower() == 'tomatrix':\n if len(s) != 1:\n raise ValueError(\"Forcing 'tomatrix' but input X is not a \"\n \"distance vector.\")\n elif force.lower() == 'tovector':\n if len(s) != 2:\n raise ValueError(\"Forcing 'tovector' but input X is not a \"\n \"distance matrix.\")\n\n # X = squareform(v)\n if len(s) == 1:\n if s[0] == 0:\n return np.zeros((1, 1), dtype=X.dtype)\n\n # Grab the closest value to the square root of the number\n # of elements times 2 to see if the number of elements\n # is indeed a binomial coefficient.\n d = int(np.ceil(np.sqrt(s[0] * 2)))\n\n # Check that v is of valid dimensions.\n if d * (d - 1) != s[0] * 2:\n raise ValueError('Incompatible vector size. It must be a binomial '\n 'coefficient n choose 2 for some integer n >= 2.')\n\n # Allocate memory for the distance matrix.\n M = np.zeros((d, d), dtype=X.dtype)\n\n # Since the C code does not support striding using strides.\n # The dimensions are used instead.\n X = _copy_array_if_base_present(X)\n\n # Fill in the values of the distance matrix.\n _distance_wrap.to_squareform_from_vector_wrap(M, X)\n\n # Return the distance matrix.\n return M\n elif len(s) == 2:\n if s[0] != s[1]:\n raise ValueError('The matrix argument must be square.')\n if checks:\n is_valid_dm(X, throw=True, name='X')\n\n # One-side of the dimensions is set here.\n d = s[0]\n\n if d <= 1:\n return np.array([], dtype=X.dtype)\n\n # Create a vector.\n v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)\n\n # Since the C code does not support striding using strides.\n # The dimensions are used instead.\n X = _copy_array_if_base_present(X)\n\n # Convert the vector to squareform.\n _distance_wrap.to_vector_from_squareform_wrap(X, v)\n return v\n else:\n raise ValueError(('The first argument must be one or two dimensional '\n 'array. A %d-dimensional array is not '\n 'permitted') % len(s))\n\n\ndef is_valid_dm(D, tol=0.0, throw=False, name=\"D\", warning=False):\n \"\"\"\n Return True if input array is a valid distance matrix.\n\n Distance matrices must be 2-dimensional numpy arrays.\n They must have a zero-diagonal, and they must be symmetric.\n\n Parameters\n ----------\n D : array_like\n The candidate object to test for validity.\n tol : float, optional\n The distance matrix should be symmetric. `tol` is the maximum\n difference between entries ``ij`` and ``ji`` for the distance\n metric to be considered symmetric.\n throw : bool, optional\n An exception is thrown if the distance matrix passed is not valid.\n name : str, optional\n The name of the variable to checked. This is useful if\n throw is set to True so the offending variable can be identified\n in the exception message when an exception is thrown.\n warning : bool, optional\n Instead of throwing an exception, a warning message is\n raised.\n\n Returns\n -------\n valid : bool\n True if the variable `D` passed is a valid distance matrix.\n\n Notes\n -----\n Small numerical differences in `D` and `D.T` and non-zeroness of\n the diagonal are ignored if they are within the tolerance specified\n by `tol`.\n\n \"\"\"\n D = np.asarray(D, order='c')\n valid = True\n try:\n s = D.shape\n if len(D.shape) != 2:\n if name:\n raise ValueError(('Distance matrix \\'%s\\' must have shape=2 '\n '(i.e. be two-dimensional).') % name)\n else:\n raise ValueError('Distance matrix must have shape=2 (i.e. '\n 'be two-dimensional).')\n if tol == 0.0:\n if not (D == D.T).all():\n if name:\n raise ValueError(('Distance matrix \\'%s\\' must be '\n 'symmetric.') % name)\n else:\n raise ValueError('Distance matrix must be symmetric.')\n if not (D[range(0, s[0]), range(0, s[0])] == 0).all():\n if name:\n raise ValueError(('Distance matrix \\'%s\\' diagonal must '\n 'be zero.') % name)\n else:\n raise ValueError('Distance matrix diagonal must be zero.')\n else:\n if not (D - D.T <= tol).all():\n if name:\n raise ValueError(('Distance matrix \\'%s\\' must be '\n 'symmetric within tolerance %5.5f.')\n % (name, tol))\n else:\n raise ValueError('Distance matrix must be symmetric within'\n ' tolerance %5.5f.' % tol)\n if not (D[range(0, s[0]), range(0, s[0])] <= tol).all():\n if name:\n raise ValueError(('Distance matrix \\'%s\\' diagonal must be'\n ' close to zero within tolerance %5.5f.')\n % (name, tol))\n else:\n raise ValueError(('Distance matrix \\'%s\\' diagonal must be'\n ' close to zero within tolerance %5.5f.')\n % tol)\n except Exception as e:\n if throw:\n raise\n if warning:\n warnings.warn(str(e))\n valid = False\n return valid\n\n\ndef is_valid_y(y, warning=False, throw=False, name=None):\n \"\"\"\n Return True if the input array is a valid condensed distance matrix.\n\n Condensed distance matrices must be 1-dimensional numpy arrays.\n Their length must be a binomial coefficient :math:`{n \\\\choose 2}`\n for some positive integer n.\n\n Parameters\n ----------\n y : array_like\n The condensed distance matrix.\n warning : bool, optional\n Invokes a warning if the variable passed is not a valid\n condensed distance matrix. The warning message explains why\n the distance matrix is not valid. `name` is used when\n referencing the offending variable.\n throw : bool, optional\n Throws an exception if the variable passed is not a valid\n condensed distance matrix.\n name : bool, optional\n Used when referencing the offending variable in the\n warning or exception message.\n\n \"\"\"\n y = np.asarray(y, order='c')\n valid = True\n try:\n if len(y.shape) != 1:\n if name:\n raise ValueError(('Condensed distance matrix \\'%s\\' must '\n 'have shape=1 (i.e. be one-dimensional).')\n % name)\n else:\n raise ValueError('Condensed distance matrix must have shape=1 '\n '(i.e. be one-dimensional).')\n n = y.shape[0]\n d = int(np.ceil(np.sqrt(n * 2)))\n if (d * (d - 1) / 2) != n:\n if name:\n raise ValueError(('Length n of condensed distance matrix '\n '\\'%s\\' must be a binomial coefficient, i.e.'\n 'there must be a k such that '\n '(k \\\\choose 2)=n)!') % name)\n else:\n raise ValueError('Length n of condensed distance matrix must '\n 'be a binomial coefficient, i.e. there must '\n 'be a k such that (k \\\\choose 2)=n)!')\n except Exception as e:\n if throw:\n raise\n if warning:\n warnings.warn(str(e))\n valid = False\n return valid\n\n\ndef num_obs_dm(d):\n \"\"\"\n Return the number of original observations that correspond to a\n square, redundant distance matrix.\n\n Parameters\n ----------\n d : array_like\n The target distance matrix.\n\n Returns\n -------\n num_obs_dm : int\n The number of observations in the redundant distance matrix.\n\n \"\"\"\n d = np.asarray(d, order='c')\n is_valid_dm(d, tol=np.inf, throw=True, name='d')\n return d.shape[0]\n\n\ndef num_obs_y(Y):\n \"\"\"\n Return the number of original observations that correspond to a\n condensed distance matrix.\n\n Parameters\n ----------\n Y : array_like\n Condensed distance matrix.\n\n Returns\n -------\n n : int\n The number of observations in the condensed distance matrix `Y`.\n\n \"\"\"\n Y = np.asarray(Y, order='c')\n is_valid_y(Y, throw=True, name='Y')\n k = Y.shape[0]\n if k == 0:\n raise ValueError(\"The number of observations cannot be determined on \"\n \"an empty distance matrix.\")\n d = int(np.ceil(np.sqrt(k * 2)))\n if (d * (d - 1) / 2) != k:\n raise ValueError(\"Invalid condensed distance matrix passed. Must be \"\n \"some k where k=(n choose 2) for some n >= 2.\")\n return d\n\n\ndef _prepare_out_argument(out, dtype, expected_shape):\n if out is None:\n return np.empty(expected_shape, dtype=dtype)\n\n if out.shape != expected_shape:\n raise ValueError(\"Output array has incorrect shape.\")\n if not out.flags.c_contiguous:\n raise ValueError(\"Output array must be C-contiguous.\")\n if out.dtype != np.double:\n raise ValueError(\"Output array must be double type.\")\n return out\n\n\ndef _pdist_callable(X, *, out, metric, **kwargs):\n n = X.shape[0]\n out_size = (n * (n - 1)) // 2\n dm = _prepare_out_argument(out, np.double, (out_size,))\n k = 0\n for i in range(X.shape[0] - 1):\n for j in range(i + 1, X.shape[0]):\n dm[k] = metric(X[i], X[j], **kwargs)\n k += 1\n return dm\n\n\ndef _cdist_callable(XA, XB, *, out, metric, **kwargs):\n mA = XA.shape[0]\n mB = XB.shape[0]\n dm = _prepare_out_argument(out, np.double, (mA, mB))\n for i in range(mA):\n for j in range(mB):\n dm[i, j] = metric(XA[i], XB[j], **kwargs)\n return dm\n\n\ndef cdist(XA, XB, metric='euclidean', *, out=None, **kwargs):\n \"\"\"\n Compute distance between each pair of the two collections of inputs.\n\n See Notes for common calling conventions.\n\n Parameters\n ----------\n XA : array_like\n An :math:`m_A` by :math:`n` array of :math:`m_A`\n original observations in an :math:`n`-dimensional space.\n Inputs are converted to float type.\n XB : array_like\n An :math:`m_B` by :math:`n` array of :math:`m_B`\n original observations in an :math:`n`-dimensional space.\n Inputs are converted to float type.\n metric : str or callable, optional\n The distance metric to use. If a string, the distance function can be\n 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',\n 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',\n 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',\n 'sqeuclidean', 'wminkowski', 'yule'.\n **kwargs : dict, optional\n Extra arguments to `metric`: refer to each metric documentation for a\n list of all possible arguments.\n\n Some possible arguments:\n\n p : scalar\n The p-norm to apply for Minkowski, weighted and unweighted.\n Default: 2.\n\n w : array_like\n The weight vector for metrics that support weights (e.g., Minkowski).\n\n V : array_like\n The variance vector for standardized Euclidean.\n Default: var(vstack([XA, XB]), axis=0, ddof=1)\n\n VI : array_like\n The inverse of the covariance matrix for Mahalanobis.\n Default: inv(cov(vstack([XA, XB].T))).T\n\n out : ndarray\n The output array\n If not None, the distance matrix Y is stored in this array.\n\n Returns\n -------\n Y : ndarray\n A :math:`m_A` by :math:`m_B` distance matrix is returned.\n For each :math:`i` and :math:`j`, the metric\n ``dist(u=XA[i], v=XB[j])`` is computed and stored in the\n :math:`ij` th entry.\n\n Raises\n ------\n ValueError\n An exception is thrown if `XA` and `XB` do not have\n the same number of columns.\n\n Notes\n -----\n The following are common calling conventions:\n\n 1. ``Y = cdist(XA, XB, 'euclidean')``\n\n Computes the distance between :math:`m` points using\n Euclidean distance (2-norm) as the distance metric between the\n points. The points are arranged as :math:`m`\n :math:`n`-dimensional row vectors in the matrix X.\n\n 2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``\n\n Computes the distances using the Minkowski distance\n :math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\\\geq 1`.\n\n 3. ``Y = cdist(XA, XB, 'cityblock')``\n\n Computes the city block or Manhattan distance between the\n points.\n\n 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``\n\n Computes the standardized Euclidean distance. The standardized\n Euclidean distance between two n-vectors ``u`` and ``v`` is\n\n .. math::\n\n \\\\sqrt{\\\\sum {(u_i-v_i)^2 / V[x_i]}}.\n\n V is the variance vector; V[i] is the variance computed over all\n the i'th components of the points. If not passed, it is\n automatically computed.\n\n 5. ``Y = cdist(XA, XB, 'sqeuclidean')``\n\n Computes the squared Euclidean distance :math:`||u-v||_2^2` between\n the vectors.\n\n 6. ``Y = cdist(XA, XB, 'cosine')``\n\n Computes the cosine distance between vectors u and v,\n\n .. math::\n\n 1 - \\\\frac{u \\\\cdot v}\n {{||u||}_2 {||v||}_2}\n\n where :math:`||*||_2` is the 2-norm of its argument ``*``, and\n :math:`u \\\\cdot v` is the dot product of :math:`u` and :math:`v`.\n\n 7. ``Y = cdist(XA, XB, 'correlation')``\n\n Computes the correlation distance between vectors u and v. This is\n\n .. math::\n\n 1 - \\\\frac{(u - \\\\bar{u}) \\\\cdot (v - \\\\bar{v})}\n {{||(u - \\\\bar{u})||}_2 {||(v - \\\\bar{v})||}_2}\n\n where :math:`\\\\bar{v}` is the mean of the elements of vector v,\n and :math:`x \\\\cdot y` is the dot product of :math:`x` and :math:`y`.\n\n\n 8. ``Y = cdist(XA, XB, 'hamming')``\n\n Computes the normalized Hamming distance, or the proportion of\n those vector elements between two n-vectors ``u`` and ``v``\n which disagree. To save memory, the matrix ``X`` can be of type\n boolean.\n\n 9. ``Y = cdist(XA, XB, 'jaccard')``\n\n Computes the Jaccard distance between the points. Given two\n vectors, ``u`` and ``v``, the Jaccard distance is the\n proportion of those elements ``u[i]`` and ``v[i]`` that\n disagree where at least one of them is non-zero.\n\n 10. ``Y = cdist(XA, XB, 'chebyshev')``\n\n Computes the Chebyshev distance between the points. The\n Chebyshev distance between two n-vectors ``u`` and ``v`` is the\n maximum norm-1 distance between their respective elements. More\n precisely, the distance is given by\n\n .. math::\n\n d(u,v) = \\\\max_i {|u_i-v_i|}.\n\n 11. ``Y = cdist(XA, XB, 'canberra')``\n\n Computes the Canberra distance between the points. The\n Canberra distance between two points ``u`` and ``v`` is\n\n .. math::\n\n d(u,v) = \\\\sum_i \\\\frac{|u_i-v_i|}\n {|u_i|+|v_i|}.\n\n 12. ``Y = cdist(XA, XB, 'braycurtis')``\n\n Computes the Bray-Curtis distance between the points. The\n Bray-Curtis distance between two points ``u`` and ``v`` is\n\n\n .. math::\n\n d(u,v) = \\\\frac{\\\\sum_i (|u_i-v_i|)}\n {\\\\sum_i (|u_i+v_i|)}\n\n 13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``\n\n Computes the Mahalanobis distance between the points. The\n Mahalanobis distance between two points ``u`` and ``v`` is\n :math:`\\\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``\n variable) is the inverse covariance. If ``VI`` is not None,\n ``VI`` will be used as the inverse covariance matrix.\n\n 14. ``Y = cdist(XA, XB, 'yule')``\n\n Computes the Yule distance between the boolean\n vectors. (see `yule` function documentation)\n\n 15. ``Y = cdist(XA, XB, 'matching')``\n\n Synonym for 'hamming'.\n\n 16. ``Y = cdist(XA, XB, 'dice')``\n\n Computes the Dice distance between the boolean vectors. (see\n `dice` function documentation)\n\n 17. ``Y = cdist(XA, XB, 'kulsinski')``\n\n Computes the Kulsinski distance between the boolean\n vectors. (see `kulsinski` function documentation)\n\n 18. ``Y = cdist(XA, XB, 'rogerstanimoto')``\n\n Computes the Rogers-Tanimoto distance between the boolean\n vectors. (see `rogerstanimoto` function documentation)\n\n 19. ``Y = cdist(XA, XB, 'russellrao')``\n\n Computes the Russell-Rao distance between the boolean\n vectors. (see `russellrao` function documentation)\n\n 20. ``Y = cdist(XA, XB, 'sokalmichener')``\n\n Computes the Sokal-Michener distance between the boolean\n vectors. (see `sokalmichener` function documentation)\n\n 21. ``Y = cdist(XA, XB, 'sokalsneath')``\n\n Computes the Sokal-Sneath distance between the vectors. (see\n `sokalsneath` function documentation)\n\n\n 22. ``Y = cdist(XA, XB, 'wminkowski', p=2., w=w)``\n\n Computes the weighted Minkowski distance between the\n vectors. (see `wminkowski` function documentation)\n\n 'wminkowski' is deprecated and will be removed in SciPy 1.8.0.\n Use 'minkowski' instead.\n\n 23. ``Y = cdist(XA, XB, f)``\n\n Computes the distance between all pairs of vectors in X\n using the user supplied 2-arity function f. For example,\n Euclidean distance between the vectors could be computed\n as follows::\n\n dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))\n\n Note that you should avoid passing a reference to one of\n the distance functions defined in this library. For example,::\n\n dm = cdist(XA, XB, sokalsneath)\n\n would calculate the pair-wise distances between the vectors in\n X using the Python function `sokalsneath`. This would result in\n sokalsneath being called :math:`{n \\\\choose 2}` times, which\n is inefficient. Instead, the optimized C version is more\n efficient, and we call it using the following syntax::\n\n dm = cdist(XA, XB, 'sokalsneath')\n\n Examples\n --------\n Find the Euclidean distances between four 2-D coordinates:\n\n >>> from scipy.spatial import distance\n >>> coords = [(35.0456, -85.2672),\n ... (35.1174, -89.9711),\n ... (35.9728, -83.9422),\n ... (36.1667, -86.7833)]\n >>> distance.cdist(coords, coords, 'euclidean')\n array([[ 0. , 4.7044, 1.6172, 1.8856],\n [ 4.7044, 0. , 6.0893, 3.3561],\n [ 1.6172, 6.0893, 0. , 2.8477],\n [ 1.8856, 3.3561, 2.8477, 0. ]])\n\n\n Find the Manhattan distance from a 3-D point to the corners of the unit\n cube:\n\n >>> a = np.array([[0, 0, 0],\n ... [0, 0, 1],\n ... [0, 1, 0],\n ... [0, 1, 1],\n ... [1, 0, 0],\n ... [1, 0, 1],\n ... [1, 1, 0],\n ... [1, 1, 1]])\n >>> b = np.array([[ 0.1, 0.2, 0.4]])\n >>> distance.cdist(a, b, 'cityblock')\n array([[ 0.7],\n [ 0.9],\n [ 1.3],\n [ 1.5],\n [ 1.5],\n [ 1.7],\n [ 2.1],\n [ 2.3]])\n\n \"\"\"\n # You can also call this as:\n # Y = cdist(XA, XB, 'test_abc')\n # where 'abc' is the metric being tested. This computes the distance\n # between all pairs of vectors in XA and XB using the distance metric 'abc'\n # but with a more succinct, verifiable, but less efficient implementation.\n\n XA = np.asarray(XA)\n XB = np.asarray(XB)\n\n s = XA.shape\n sB = XB.shape\n\n if len(s) != 2:\n raise ValueError('XA must be a 2-dimensional array.')\n if len(sB) != 2:\n raise ValueError('XB must be a 2-dimensional array.')\n if s[1] != sB[1]:\n raise ValueError('XA and XB must have the same number of columns '\n '(i.e. feature dimension.)')\n\n mA = s[0]\n mB = sB[0]\n n = s[1]\n\n if callable(metric):\n mstr = getattr(metric, '__name__', 'Unknown')\n metric_info = _METRIC_ALIAS.get(mstr, None)\n if metric_info is not None:\n XA, XB, typ, kwargs = _validate_cdist_input(\n XA, XB, mA, mB, n, metric_info, **kwargs)\n return _cdist_callable(XA, XB, metric=metric, out=out, **kwargs)\n elif isinstance(metric, str):\n mstr = metric.lower()\n metric_info = _METRIC_ALIAS.get(mstr, None)\n if metric_info is not None:\n cdist_fn = metric_info.cdist_func\n return cdist_fn(XA, XB, out=out, **kwargs)\n elif mstr.startswith(\"test_\"):\n metric_info = _TEST_METRICS.get(mstr, None)\n if metric_info is None:\n raise ValueError(f'Unknown \"Test\" Distance Metric: {mstr[5:]}')\n XA, XB, typ, kwargs = _validate_cdist_input(\n XA, XB, mA, mB, n, metric_info, **kwargs)\n return _cdist_callable(\n XA, XB, metric=metric_info.dist_func, out=out, **kwargs)\n else:\n raise ValueError('Unknown Distance Metric: %s' % mstr)\n else:\n raise TypeError('2nd argument metric must be a string identifier '\n 'or a function.')\n" ]
[ [ "numpy.deprecate", "numpy.dot", "numpy.sqrt", "numpy.asarray", "numpy.vstack", "numpy.issubdtype", "scipy._lib._util._asarray_validated", "numpy.any", "numpy.square", "numpy.nansum", "numpy.zeros", "numpy.bitwise_or", "numpy.log", "scipy._lib.deprecation._deprecated", "numpy.ascontiguousarray", "numpy.linalg.inv", "numpy.power", "numpy.atleast_2d", "numpy.errstate", "numpy.find_common_type", "numpy.array", "numpy.sum", "numpy.abs", "numpy.ones", "numpy.bitwise_and", "numpy.average", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zuyezheng/RedditSentiment
[ "c786284323828c1a3e353ee27e1be13421feb0c2" ]
[ "src/transformers/TransformerWrapper.py" ]
[ "import time\n\nimport tensorflow as tf\n\nfrom transformers.TransformerSchedule import TransformerSchedule\n\nTRAIN_STEP_SIGNATURE = [\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n]\n\n\nclass TransformerWrapper:\n\n def __init__(\n self,\n transformer,\n # path to store or load checkpoints\n checkpoint_path,\n # if we should try to restore from checkpoint\n restore\n ):\n self.transformer = transformer\n self.optimizer = tf.keras.optimizers.Adam(\n TransformerSchedule(self.transformer.d_model), beta_1=0.9, beta_2=0.98, epsilon=1e-9\n )\n\n checkpoint = tf.train.Checkpoint(transformer=self.transformer, optimizer=self.optimizer)\n self.checkpoint_manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=5)\n\n if restore and self.checkpoint_manager.latest_checkpoint:\n checkpoint.restore(self.checkpoint_manager.latest_checkpoint)\n print('Restored from latest checkpoint.')\n\n def train(self, epochs, dataset):\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\n @tf.function(input_signature=TRAIN_STEP_SIGNATURE)\n def train_step(inputs, targets):\n # inputs for the decoder, excluding the last since we need something to predict\n target_inputs = targets[:, :-1]\n # inputs offset by 1 since we're trying to predict the next character\n target_reals = targets[:, 1:]\n\n enc_padding_mask, combined_mask, dec_padding_mask = TransformerWrapper.create_masks(inputs, target_inputs)\n\n with tf.GradientTape() as tape:\n predictions, _ = self.transformer(\n inputs, target_inputs, True, enc_padding_mask, combined_mask, dec_padding_mask\n )\n loss = TransformerWrapper.loss_function(target_reals, predictions, loss_object)\n\n gradients = tape.gradient(loss, self.transformer.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.transformer.trainable_variables))\n\n train_loss(loss)\n train_accuracy(target_reals, predictions)\n\n for epoch in range(epochs):\n start = time.time()\n\n for (batch_num, (i, t)) in enumerate(dataset):\n train_step(i, t)\n\n if batch_num % 50 == 0:\n print('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(\n epoch + 1, batch_num, train_loss.result(), train_accuracy.result()\n ))\n\n if (epoch + 1) % 5 == 0:\n ckpt_save_path = self.checkpoint_manager.save()\n print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path))\n\n print('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(\n epoch + 1, train_loss.result(), train_accuracy.result()\n ))\n print('Time taken for 1 epoch: {} secs\\n'.format(time.time() - start))\n\n @staticmethod\n def loss_function(real, pred, loss_object):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss.dtype)\n loss *= mask\n\n return tf.reduce_sum(loss) / tf.reduce_sum(mask)\n\n @staticmethod\n def create_masks(inputs, targets):\n def create_padding_mask(sequence):\n sequence = tf.cast(tf.math.equal(sequence, 0), tf.float32)\n\n # add extra dimensions to add the padding to the attention logits\n # (batch_size, 1, 1, seq_len)\n return sequence[:, tf.newaxis, tf.newaxis, :]\n\n def create_look_ahead_mask(size):\n # (seq_len, seq_len)\n return 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)\n\n # Encoder padding mask\n encoder_padding_mask = create_padding_mask(inputs)\n\n # Pad and mask the encoder outputs used in the 2nd attention block in the decoder.\n decoder_padding_mask = create_padding_mask(inputs)\n\n # Pad and mask future tokens in the input received by the decoder, used in the 1st attention block in the\n # decoder.\n look_ahead_mask = create_look_ahead_mask(tf.shape(targets)[1])\n dec_target_padding_mask = create_padding_mask(targets)\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n\n return encoder_padding_mask, combined_mask, decoder_padding_mask\n" ]
[ [ "tensorflow.train.CheckpointManager", "tensorflow.shape", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.train.Checkpoint", "tensorflow.maximum", "tensorflow.cast", "tensorflow.reduce_sum", "tensorflow.ones", "tensorflow.math.equal", "tensorflow.function", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.TensorSpec", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
AngusNicolson/cvd-vae
[ "9e0ea781e10b0b0ffc5a94d446031c948868389f" ]
[ "run_exp.py" ]
[ "\nfrom pathlib import Path\nfrom argparse import ArgumentParser\nimport json\n\nimport numpy as np\nimport torch\n\nfrom cvd_vae.utils import load_data, create_supervised_vae, load_pretrained\nfrom cvd_vae.trainer import Trainer\n\n# For reproducibility\nnp.random.seed(42)\ntorch.manual_seed(42)\n\n\ndef main(args):\n\n with open(args.config, \"r\") as fp:\n config = json.load(fp)\n\n train_dataset, val_dataset = load_data(config, args.dataset, args.prefix)\n\n vae = create_supervised_vae(config)\n\n if config[\"load\"][\"path\"] is not None:\n vae = load_pretrained(vae, **config[\"load\"])\n\n savedir = Path(args.out_dir)\n savedir.mkdir(exist_ok=True)\n\n trainer = Trainer(vae, \"vae\", **config[\"trainer\"])\n\n if config[\"load\"][\"path\"] is not None:\n optimizer_state_dict = torch.load(config[\"load\"][\"path\"])[\"optimizer\"]\n trainer.load_optimizer(optimizer_state_dict)\n\n train_dir = f\"{str(savedir)}/{trainer.savedir}\"\n Path(train_dir).mkdir(exist_ok=True)\n with open(f\"{train_dir}/config.json\", \"w\") as fp:\n json.dump(config, fp, indent=2)\n\n trainer.train(train_dataset, val_dataset, config[\"epochs\"], save_prefix=str(savedir) + \"/\", **config[\"train\"])\n\n print(\"Done!\")\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--dataset\", type=str, help=\"Path to dataset .json\")\n parser.add_argument(\"--prefix\", type=str, help=\"Prefix for ECG .npy paths\", default=\"\")\n parser.add_argument(\"--out-dir\", type=str, help=\"Output directory\", default=\"./\")\n parser.add_argument(\"--config\", type=str, help=\"Config .json for training\", default=\"./config.json\")\n args = parser.parse_args()\n main(args)\n" ]
[ [ "torch.manual_seed", "numpy.random.seed", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
boangri/gym-grand-prix
[ "f2bf116482d3769bd1cc1d5464af2c4e94030994" ]
[ "gym_grand_prix/envs/cars/world.py" ]
[ "import itertools\nimport random\nfrom abc import ABCMeta, abstractmethod\nfrom cmath import rect, pi, phase\nfrom time import sleep\n\nimport numpy as np\nimport pygame\n\nfrom gym_grand_prix.envs.cars.agent import SimpleCarAgent\nfrom gym_grand_prix.envs.cars.utils import Action\nfrom gym_grand_prix.envs.cars.track import plot_map\nfrom gym_grand_prix.envs.cars.utils import CarState, to_px, rotate, intersect_ray_with_segment, draw_text, angle\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\n\n\nclass World(metaclass=ABCMeta):\n @abstractmethod\n def transition(self):\n pass\n\n @abstractmethod\n def run(self):\n pass\n\n\nclass SimpleCarWorld(World):\n # Научиться не врезаться в стенки\n COLLISION_PENALTY = 32 * 1e0\n HEADING_REWARD = 10 * 1e0\n WRONG_HEADING_PENALTY = 0 * 1e0\n IDLENESS_PENALTY = 32 * 1e-1\n SPEEDING_PENALTY = 0 * 1e-1\n MIN_SPEED = 0.1 * 1e0\n MAX_SPEED = 10 * 1e0\n\n size = (800, 400)\n\n def __init__(self, num_agents, car_map, Physics, agent_class, window=True, **physics_pars):\n \"\"\"\n Инициализирует мир\n :param num_agents: число агентов в мире\n :param car_map: карта, на которой всё происходит (см. track.py0\n :param Physics: класс физики, реализующий столкновения и перемещения\n :param agent_class: класс агентов в мире\n :param physics_pars: дополнительные параметры, передаваемые в конструктор класса физики\n (кроме car_map, являющейся обязательным параметром конструктора)\n \"\"\"\n self.physics = Physics(car_map, **physics_pars)\n self.map = car_map\n self.visual = window\n self.done = False\n self.nrays = 5\n self.steps = 0\n\n # создаём агентов\n self.set_agents(num_agents, agent_class)\n\n self._info_surface = pygame.Surface(self.size)\n\n def set_agents(self, agents=1, agent_class=None):\n \"\"\"\n Поместить в мир агентов\n :param agents: int или список Agent, если int -- то обязателен параметр agent_class, так как в мир присвоятся\n agents агентов класса agent_class; если список, то в мир попадут все агенты из списка\n :param agent_class: класс создаваемых агентов, если agents - это int\n \"\"\"\n pos = (self.map[0][0] + self.map[0][1]) / 2\n vel = 0\n heading = rect(-0.3, 1)\n\n if type(agents) is int:\n self.agents = [agent_class(nrays=self.nrays) for _ in range(agents)]\n elif type(agents) is list:\n self.agents = agents\n else:\n raise ValueError(\"Parameter agent should be int or list of agents instead of %s\" % type(agents))\n\n self.agent_states = {a: CarState(pos, vel, heading) for a in self.agents}\n self.circles = {a: 0 for a in self.agents}\n\n self._agent_surfaces = []\n self._agent_images = []\n\n def transition(self):\n \"\"\"\n Логика основного цикла:\n подсчёт для каждого агента видения агентом мира,\n выбор действия агентом,\n смена состояния\n и обработка реакции мира на выбранное действие\n \"\"\"\n for a in self.agents:\n vision = self.vision_for(a)\n action = a.choose_action(vision)\n next_agent_state, collision = self.physics.move(\n self.agent_states[a], action\n )\n self.circles[a] += angle(self.agent_states[a].position, next_agent_state.position) / (2*pi)\n self.agent_states[a] = next_agent_state\n a.receive_feedback(self.reward(next_agent_state, collision, vision))\n\n def step(self, steering, acceleration):\n action = Action(steering, acceleration)\n for a in self.agents:\n next_agent_state, collision = self.physics.move(self.agent_states[a], action)\n progress = angle(self.agent_states[a].position, next_agent_state.position) / (2 * pi)\n self.circles[a] += progress\n self.agent_states[a] = next_agent_state\n vision = self.vision_for(a)\n reward = self.reward(collision, progress)\n a.sensor_data_history.append(vision)\n a.chosen_actions_history.append(action)\n a.reward_history.append(reward)\n a.step += 1\n q = .001 if a.step > 1000 else 1. / float(a.step)\n a.avg_reward = (1. - q) * a.avg_reward + q * reward\n a.sum_reward += reward\n done = False\n if a.step == self.steps:\n done = True\n a.step = 0\n return np.array(vision), reward, done, {'collision': collision}\n\n def reward(self, collision, progress):\n \"\"\"\n Вычисляем награду агента за его действие\n :param progress: приращение числа пройденных кругов\n :param collision: произошло ли столкновение со стеной на прошлом шаге\n :return reward: награда агента\n \"\"\"\n reward = progress * 1000. - 1. #- 40. * int(collision)\n return reward\n\n def eval_reward(self, state, collision):\n \"\"\"\n Награда \"по умолчанию\", используется в режиме evaluate\n Удобно, чтобы не приходилось отменять свои изменения в функции reward для оценки результата\n \"\"\"\n a = -np.sin(angle(-state.position, state.heading))\n heading_reward = 1 if a > 0.1 else a if a > 0 else 0\n heading_penalty = a if a <= 0 else 0\n idle_penalty = 0 if abs(state.velocity) > self.MIN_SPEED else -self.IDLENESS_PENALTY\n speeding_penalty = 0 if abs(state.velocity) < self.MAX_SPEED else -self.SPEEDING_PENALTY * abs(state.velocity)\n collision_penalty = - max(abs(state.velocity), 0.1) * int(collision) * self.COLLISION_PENALTY\n\n return heading_reward * self.HEADING_REWARD + heading_penalty * self.WRONG_HEADING_PENALTY + collision_penalty \\\n + idle_penalty + speeding_penalty\n\n def run(self, steps=None):\n \"\"\"\n Основной цикл мира; по завершении сохраняет текущие веса агента в файл network_config_agent_n_layers_....txt\n :param steps: количество шагов цикла; до внешней остановки, если None\n \"\"\"\n if self.visual:\n scale = self._prepare_visualization()\n for _ in range(steps) if steps is not None else itertools.count():\n self.transition()\n if self.visual:\n self.visualize(scale)\n if self._update_display() == pygame.QUIT:\n break\n # sleep(0.1)\n\n for i, agent in enumerate(self.agents):\n try:\n filename = \"a_%d_layers_%s.txt\" % (i, \"_\".join(map(str, agent.neural_net.sizes)))\n agent.to_file(filename)\n print(\"Saved agent parameters to '%s'\" % filename)\n print(\"Steps: %d Mean reward: %.3f Circles/1000steps: %.3f\" %\n (agent.step, agent.sum_reward/agent.step, self.circles[agent]*1000/agent.step))\n except AttributeError:\n pass\n\n def evaluate_agent(self, agent, steps=1000):\n \"\"\"\n Прогонка цикла мира для конкретного агента (см. пример использования в комментариях после if _name__ == \"__main__\")\n :param agent: SimpleCarAgent\n :param steps: количество итераций цикла\n :param visual: рисовать картинку или нет\n :return: среднее значение награды агента за шаг\n \"\"\"\n agent.evaluate_mode = True\n self.set_agents([agent])\n rewards = []\n if self.visual:\n scale = self._prepare_visualization()\n for _ in range(steps):\n vision = self.vision_for(agent)\n action = agent.choose_action(vision)\n next_agent_state, collision = self.physics.move(\n self.agent_states[agent], action\n )\n self.circles[agent] += angle(self.agent_states[agent].position, next_agent_state.position) / (2*pi)\n self.agent_states[agent] = next_agent_state\n rewards.append(self.reward(next_agent_state, collision, vision))\n agent.receive_feedback(rewards[-1])\n if self.visual:\n self.visualize(scale)\n if self._update_display() == pygame.QUIT:\n break\n # sleep(0.05)\n\n return np.mean(rewards), self.circles[agent]\n\n def vision_for(self, agent):\n \"\"\"\n Строит видение мира для каждого агента\n :param agent: машинка, из которой мы смотрим\n :return: список из модуля скорости машинки, направленного угла между направлением машинки\n и направлением на центр и `agent.rays` до ближайших стен трека (запустите картинку, и станет совсем понятно)\n \"\"\"\n state = self.agent_states[agent]\n vision = [abs(state.velocity), np.sin(angle(-state.position, state.heading))]\n extras = len(vision)\n\n delta = pi / (agent.rays - 1)\n start = rotate(state.heading, - pi / 2)\n\n sectors = len(self.map)\n for i in range(agent.rays):\n # define ray direction\n ray = rotate(start, i * delta)\n\n # define ray's intersections with walls\n vision.append(np.infty)\n for j in range(sectors):\n inner_wall = self.map[j - 1][0], self.map[j][0]\n outer_wall = self.map[j - 1][1], self.map[j][1]\n\n intersect = intersect_ray_with_segment((state.position, ray), inner_wall)\n intersect = abs(intersect - state.position) if intersect is not None else np.infty\n if intersect < vision[-1]:\n vision[-1] = intersect\n\n intersect = intersect_ray_with_segment((state.position, ray), outer_wall)\n intersect = abs(intersect - state.position) if intersect is not None else np.infty\n if intersect < vision[-1]:\n vision[-1] = intersect\n\n assert vision[-1] < np.infty, \\\n \"Something went wrong: {}, {}\".format(str(state), str(agent.chosen_actions_history[-1]))\n assert len(vision) == agent.rays + extras, \\\n \"Something went wrong: {}, {}\".format(str(state), str(agent.chosen_actions_history[-1]))\n return vision\n\n def visualize(self, scale):\n \"\"\"\n Рисует картинку. Этот и все \"приватные\" (начинающиеся с _) методы необязательны для разбора.\n \"\"\"\n for i, agent in enumerate(self.agents):\n state = self.agent_states[agent]\n surface = self._agent_surfaces[i]\n rays_lengths = self.vision_for(agent)[-agent.rays:]\n self._agent_images[i] = [self._draw_ladar(rays_lengths, state, scale),\n self._get_agent_image(surface, state, scale)]\n\n if len(self.agents) == 1:\n a = self.agents[0]\n if a.step > 0:\n draw_text(\"Reward: %.3f\" % a.reward_history[-1], self._info_surface, scale, self.size,\n text_color=white, bg_color=black)\n draw_text(\"Step: %d Avg reward: %.3f\" % (a.step, a.avg_reward), self._info_surface, scale, self.size,\n text_color=white, bg_color=black, tlpoint=(self._info_surface.get_width() - 790, 10))\n steer, acc = a.chosen_actions_history[-1]\n state = self.agent_states[a]\n draw_text(\"Action: steer.: %.2f, accel: %.2f\" % (steer, acc), self._info_surface, scale,\n self.size, text_color=white, bg_color=black, tlpoint=(self._info_surface.get_width() - 500, 10))\n draw_text(\"Inputs: |v|=%.2f, sin(angle): %.2f, circle: %.2f\" % (\n abs(state.velocity), np.sin(angle(-state.position, state.heading)), self.circles[a]),\n self._info_surface, scale,\n self.size, text_color=white, bg_color=black, tlpoint=(self._info_surface.get_width() - 500, 50))\n return pygame.surfarray.array3d(self._agent_surfaces[0])\n\n def _get_agent_image(self, original, state, scale):\n angle = phase(state.heading) * 180 / pi\n rotated = pygame.transform.rotate(original, angle)\n rectangle = rotated.get_rect()\n rectangle.center = to_px(state.position, scale, self.size)\n return rotated, rectangle\n\n def _draw_ladar(self, sensors, state, scale):\n surface = pygame.display.get_surface().copy()\n surface.fill(white)\n surface.set_colorkey(white)\n start_pos = to_px(state.position, scale, surface.get_size())\n delta = pi / (len(sensors) - 1)\n ray = phase(state.heading) - pi / 2\n for s in sensors:\n end_pos = to_px(rect(s, ray) + state.position, scale, surface.get_size())\n pygame.draw.line(surface, (0, 255, 0), start_pos, end_pos, 2)\n ray += delta\n\n rectangle = surface.get_rect()\n rectangle.topleft = (0, 0)\n return surface, rectangle\n\n def _prepare_visualization(self):\n red = (254, 0, 0)\n pygame.init()\n screen = pygame.display.set_mode(self.size)\n screen.fill(white)\n scale = plot_map(self.map, screen)\n for state in self.agent_states.values():\n s = pygame.Surface((25, 15))\n s.set_colorkey(white)\n s.fill(white)\n pygame.draw.rect(s, red, pygame.Rect(0, 0, 15, 15))\n pygame.draw.polygon(s, red, [(15, 0), (25, 8), (15, 15)], 0)\n self._agent_surfaces.append(s)\n self._agent_images.append([self._get_agent_image(s, state, scale)])\n\n self._map_surface = screen\n return scale\n\n def _update_display(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.display.quit()\n return pygame.QUIT\n display = pygame.display.get_surface()\n display.fill(white)\n\n plot_map(self.map, display)\n for images in self._agent_images:\n for surf, rectangle in images:\n display.blit(surf, rectangle)\n display.blit(self._info_surface, (0, 0), None, pygame.BLEND_RGB_SUB)\n self._info_surface.fill(black) # clear notifications from previous round\n pygame.display.update()\n\n def quit(self):\n pygame.display.quit()" ]
[ [ "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KozakaiAya/PyramidFlow
[ "81039630f3b1950fea4fc89b4dd80eb075da7eb7" ]
[ "Code/PyramidFlow/utils.py" ]
[ "import os\nimport cv2\nimport random\nimport numpy as np\n\nimport myconfig\n\ndef get_frame_tuple_list(path):\n frame_tuple_list = []\n with open(os.path.join(path, 'frame_list.txt'), 'r') as f:\n name = f.readlines()\n frame_tuple_list = [x.strip() for x in name]\n\n return frame_tuple_list\n\ndef resize(img):\n return cv2.resize(img, (myconfig.image_w, myconfig.image_h), interpolation=cv2.INTER_LANCZOS4)\n\ndef data_generator(data_path, batch_size=32):\n frame_list = get_frame_tuple_list(data_path)\n frame_count = len(frame_list)\n batch_count = frame_count // batch_size\n while True:\n random.shuffle(frame_list)\n batch_data_list = []\n batch_target_list = []\n for idx in range(batch_count * batch_size):\n img1_name = os.path.join(data_path, frame_list[idx] + '_0.png')\n img2_name = os.path.join(data_path, frame_list[idx] + '_1.png')\n img3_name = os.path.join(data_path, frame_list[idx] + '_2.png')\n img4_name = os.path.join(data_path, frame_list[idx] + '_3.png')\n img5_name = os.path.join(data_path, frame_list[idx] + '_4.png')\n\n img1 = resize(cv2.imread(img1_name))\n img2 = resize(cv2.imread(img2_name))\n img3 = resize(cv2.imread(img3_name))\n img4 = resize(cv2.imread(img4_name))\n img5 = resize(cv2.imread(img5_name))\n\n data = np.concatenate((img1, img2, img4, img5), axis=2)\n batch_data_list.append(data)\n batch_target_list.append(img3)\n\n if len(batch_data_list) == batch_size:\n batch_data = np.stack(batch_data_list, axis=0)\n batch_target = np.stack(batch_target_list, axis=0)\n\n batch_data_list = []\n batch_target_list = []\n\n yield batch_data.astype('float32') / 255.0, batch_target.astype('float32') / 255.0\n\n\n " ]
[ [ "numpy.concatenate", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zouning68/QueryCorrector
[ "afe3814c7dbd536089611510e82dacc56ef36413" ]
[ "querycorrect/test.py" ]
[ "import tornado, json, sys, logging, traceback, progressbar, Levenshtein, re, random\nfrom tornado.httpclient import HTTPClient\nfrom tqdm import tqdm\nimport numpy as np\nfrom config import config\nfrom correct import Corrector\nfrom collections import defaultdict\nfrom spider import spider\n\nurl = \"http://%s:%s/%s\" % (\"192.168.9.140\", \"1111\", \"query_correct\")\nhttp_client = HTTPClient()\nqc = Corrector()\n\ndef test_levenshtein():\n texta = 'kitten' #'艾伦 图灵传'\n textb = 'sitting' #'艾伦•图灵传'\n print(Levenshtein.distance(texta,textb)) # 计算编辑距离\n print(Levenshtein.hamming(texta,textb)) # 计算汉明距离\n print(Levenshtein.ratio(texta,textb)) # 计算莱文斯坦比\n print(Levenshtein.jaro(texta,textb)) # 计算jaro距离\n print(Levenshtein.jaro_winkler(texta,textb)) # 计算Jaro–Winkler距离\n print(Levenshtein.distance(texta,textb))\n\ndef edit_distance(word1, word2):\n len1, len2 = len(word1), len(word2)\n dp = np.zeros((len1 + 1, len2 + 1))\n for i in range(len1 + 1):\n dp[i][0] = i\n for j in range(len2 + 1):\n dp[0][j] = j\n\n for i in range(1, len1 + 1):\n for j in range(1, len2 + 1):\n delta = 0 if word1[i - 1] == word2[j - 1] else 1\n dp[i][j] = min(dp[i - 1][j - 1] + delta, min(dp[i - 1][j] + 1, dp[i][j - 1] + 1))\n return dp[len1][len2]\n\ndef get_res(txt):\n try:\n obj = {\"header\": {},\"request\": {\"c\": \"\", \"m\": \"query_correct\", \"p\": {\"query\": txt}}}\n response = http_client.fetch(tornado.httpclient.HTTPRequest(\n url=url,\n method=\"POST\",\n headers={'Content-type': 'application/json'},\n body=json.dumps(obj, ensure_ascii=False)\n ))\n result = json.loads(response.buffer.read().decode(\"utf-8\", errors='ignore'))\n res = result['response']['results']\n except Exception as e:\n print(txt)\n return res['corrected_query'], res['detail']\n\nmatchObj = re.compile(r'(.+)&([0-9]+)', re.M | re.I)\ndef test_querys():\n results = []; path = config.query\n with open(path, encoding='utf8') as fin:\n num_lines = len(fin.readlines())\n with open(path, encoding=\"utf8\") as fin:\n for i, line in enumerate(tqdm(fin, total=num_lines)):\n match_res = matchObj.match(line)\n if not match_res: continue\n q, f = match_res.group(1), int(match_res.group(2))\n #correct, detail = spider(q)\n correct, detail = get_res(q)\n #correct, detail = qc.correct(q)\n # print(correct, detail); exit()\n if q == correct or not detail: continue\n results.append(q + ' -> ' + correct)\n if 1:\n random.seed(1)\n for e in t:\n correct, detail = get_res(e)\n #correct, detail = qc.correct(e)\n if not (e == correct or not detail): results.append(e + ' -> ' + correct)\n random.shuffle(results)\n http_client.close()\n with open(\"./querys_test\", 'w', encoding=\"utf8\") as fin:\n for e in results:\n fin.write(e + '\\n')\n\ndef test_jdtitle():\n results = [] ;random.seed(1)\n jdtitles = [line for line in open(config.jd_title, encoding='utf8').readlines() if matchObj.match(line) and int(matchObj.match(line).group(2)) == 1]\n random.shuffle(jdtitles); jdtitle = jdtitles[: 10000]; print(\"\\noriginal data: %d\\tsample data: %d\" % (len(jdtitles), len(jdtitle)))\n for i, line in enumerate(tqdm(jdtitle, total=len(jdtitle))):\n match_res = matchObj.match(line)\n q, f = match_res.group(1), int(match_res.group(2))\n correct, detail = get_res(q)\n #correct, detail = qc.correct(q)\n if q == correct or not detail: continue\n results.append(q + ' -> ' + correct)\n http_client.close()\n with open(\"./jdtitle_test\", 'w', encoding=\"utf8\") as fin:\n for e in results:\n fin.write(e + '\\n')\n\nt=['puthon','jvav架构师','puthon开法','开发工成师','appl官王','行政专远','人力资源找聘','美团,数局挖掘','百读,产品经理','大数据开法,jaca','hadop开发,北京,本科',\\\n '小洪书,jav工成师','andorid开法','gloang']\ndef test():\n for q in t:\n r = get_res(q)['response']['results']['correct_result']\n print(q, r)\n http_client.close()\n\nif __name__ == '__main__':\n try: que = sys.argv[1]\n except: que = \"montage+深圳\"\n #print(json.dumps(get_res(que), ensure_ascii=False))\n #test(); exit()\n test_querys(); exit()\n test_jdtitle()\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mianasbat/test
[ "22867073a5a3e87def68b4a76e70fe54d085be32" ]
[ "tests/algorithms/test_fre_from_fle.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\" Tests concerning computing TRE from FLE. \"\"\"\n\n# pylint: skip-file\n\nimport pytest\nimport numpy as np\nimport sksurgerycore.algorithms.errors as err\n\n\ndef measure_fre_1(mean_fle_squared):\n\n fiducials = np.zeros((4, 3))\n fiducials[0][0] = 1\n fiducials[0][1] = 1\n fiducials[1][0] = -0.5\n fiducials[1][1] = 0.5\n fiducials[2][0] = -1\n fiducials[2][1] = -1\n fiducials[3][0] = 0.5\n fiducials[3][1] = -0.5\n\n error = err.compute_fre_from_fle(fiducials, mean_fle_squared)\n return error\n\ndef test_fre_origin_zero_fle():\n target = np.zeros((1, 3))\n error = measure_fre_1(0)\n assert np.isclose(error, 0)\n\n\ndef test_fre_origin_1mm_fle():\n target = np.zeros((1, 3))\n error = measure_fre_1(1)\n assert np.isclose(error, 0.5)\n\n\ndef test_invalid_because_fiducials_wrong_type():\n with pytest.raises(TypeError):\n err.compute_fre_from_fle(\"not an arrray\", 1)\n\n\ndef test_invalid_because_fiducials_wrong_columns():\n with pytest.raises(ValueError):\n err.compute_fre_from_fle(np.ones((1,4)), 1)\n\n\ndef test_invalid_because_fiducials_wrong_rows():\n with pytest.raises(ValueError):\n err.compute_fre_from_fle(np.ones((2, 3)), 1)\n\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aerorohit/compyle
[ "965e3a4dc5673b63535562363d55de4a8abde3c4" ]
[ "compyle/tests/test_translator.py" ]
[ "from textwrap import dedent\nimport pytest\nimport numpy as np\nimport sys\n\nfrom ..config import get_config\nfrom ..types import annotate, declare\nfrom ..translator import (\n CConverter, CodeGenerationError, CStructHelper, KnownType,\n OpenCLConverter, CUDAConverter, py2c\n)\n\n\n@annotate(i='int', y='floatp', return_='float')\ndef annotated_f(i, y):\n x = declare('LOCAL_MEM matrix(64)')\n return y[i]\n\n\ndef test_simple_assignment_expression():\n # Given\n src = dedent('''\n b = (2*a + 1)*(-a/1.5)%2\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double a;\n double b;\n b = ((((2 * a) + 1) * (-a / 1.5)) % 2);\n ''')\n assert code == expect.strip()\n\n\ndef test_multiple_assignment_expressions():\n # Given\n src = dedent('''\n a = 21.5\n b = (2*a + 1)*(a/1.5)%2\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double a;\n double b;\n a = 21.5;\n b = ((((2 * a) + 1) * (a / 1.5)) % 2);\n ''')\n assert code == expect.strip()\n\n\ndef test_if_block():\n # Given\n src = dedent('''\n a = 21.5\n if a > 20:\n b = a - 1\n elif a < 20:\n b = a + 1\n else:\n b = a\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double a;\n double b;\n a = 21.5;\n if ((a > 20)) {\n b = (a - 1);\n }\n else {\n if ((a < 20)) {\n b = (a + 1);\n }\n else {\n b = a;\n }\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_conditionals():\n # Given\n src = dedent('''\n if (x > 10 and x < 20) or not (x >= 10 and x <= 20):\n y\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n double y;\n if ((((x > 10) && (x < 20)) || !((x >= 10) && (x <= 20)))) {\n y;\n }\n ''')\n assert code.strip() == expect.strip()\n\n # Given\n src = dedent('''\n if x != 10 and x is 100 or (x == 20 and x is not 1):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n if ((((x != 10) && (x == 100)) || ((x == 20) && (x != 1)))) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n # Given\n src = dedent('''\n if x != 10 and x is 100 or (x == 20 and x is not 1):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n if ((((x != 10) && (x == 100)) || ((x == 20) && (x != 1)))) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_ternary_operator():\n # Given\n src = dedent('''\n y = 2.0\n x = 1.0 if y >= 2.0 else 0.0\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n double y;\n y = 2.0;\n x = (y >= 2.0) ? 1.0 : 0.0;\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_multiple_boolops():\n # Given\n src = dedent('''\n if x % 2 == 0 or x % 2 == 1 or x > 0:\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n if ((((x % 2) == 0) || ((x % 2) == 1) || (x > 0))) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_multiple_bitwise_ops():\n # Given\n src = dedent('''\n x = 1 << 5\n y = x >> 2\n z = (x | y) ^ (x & y)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n double y;\n double z;\n x = (1 << 5);\n y = (x >> 2);\n z = ((x | y) ^ (x & y));\n ''')\n\n assert code.strip() == expect.strip()\n\n\ndef test_power():\n # Given\n src = dedent('''\n 1.5*x**2\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n (1.5 * pow(x, 2));\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_only_two_operands_supported_for_comparisons():\n # Given\n src = dedent('''\n if 10 < x < 20:\n pass\n ''')\n\n # When\n with pytest.raises(NotImplementedError):\n py2c(src)\n\n\ndef test_calling_function():\n # Given\n src = dedent('''\n sin(23.2 + 1)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n sin((23.2 + 1));\n ''')\n assert code == expect.strip()\n\n\ndef test_calling_printf_with_string():\n # Given\n src = dedent(r'''\n printf('%s %d %f\\n', 'hello', 1, 2.0)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n printf(\"%s %d %f\\n\", \"hello\", 1, 2.0);\n ''')\n assert code == expect.strip()\n\n\ndef test_subscript():\n # Given\n src = dedent('''\n x[1]\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n x[1];\n ''')\n assert code == expect.strip()\n\n\ndef test_known_math_constants():\n # Given\n src = dedent('''\n x = M_E + M_LOG2E + M_LOG10E + M_LN2 + M_LN10\n x += M_PI + M_PI_2 + M_PI_4 + M_1_PI * M_2_PI\n x += M_2_SQRTPI * M_SQRT2 * M_SQRT1_2\n x = INFINITY\n x = NAN\n x = HUGE_VALF\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n x = ((((M_E + M_LOG2E) + M_LOG10E) + M_LN2) + M_LN10);\n x += (((M_PI + M_PI_2) + M_PI_4) + (M_1_PI * M_2_PI));\n x += ((M_2_SQRTPI * M_SQRT2) * M_SQRT1_2);\n x = INFINITY;\n x = NAN;\n x = HUGE_VALF;\n ''')\n assert code == expect.strip()\n\n\ndef test_simple_function_with_return():\n # Given\n src = dedent('''\n def f(x=0.0):\n 'docstring'\n y = x + 1\n return y\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double f(double x)\n {\n double y;\n y = (x + 1);\n return y;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_simple_function_without_return():\n # Given\n src = dedent('''\n def f(y=0.0, x=0.0):\n z = y + x\n y = z\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n void f(double y, double x)\n {\n double z;\n z = (y + x);\n y = z;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_function_argument_types():\n # Given\n src = dedent('''\n def f(s_idx, s_p, d_idx, d_p, J=0, t=0.0, l=[0,0], xx=(0, 0)):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\nvoid f(long s_idx, double* s_p, long d_idx, double* d_p, long J, double t,\n double* l, double* xx)\n{\n ;\n}\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_known_types_in_funcargs():\n # Given\n src = dedent('''\n def f(x, xx, cond=True):\n pass\n ''')\n\n # When\n known_types = {'xx': KnownType('foo*'), 'x': KnownType('float32')}\n code = py2c(src, known_types=known_types)\n\n # Then\n expect = dedent('''\n void f(float32 x, foo* xx, int cond)\n {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_annotated_function():\n # Given/When\n t = CConverter()\n code = t.parse_function(annotated_f)\n\n # Then\n expect = dedent('''\n float annotated_f(int i, float* y)\n {\n LOCAL_MEM double x[64];\n return y[i];\n }\n ''')\n assert code.strip() == expect.strip()\n\n\[email protected](sys.version_info < (3, 4), reason='Requires Python3')\ndef test_py3_annotations():\n # Given/When\n from .py3_code import py3_f\n t = CConverter()\n code = t.parse_function(py3_f)\n\n # Then\n expect = dedent('''\n int py3_f(int x)\n {\n int y;\n y = (x + 1);\n return (x * y);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_calling_method_of_known_type():\n # Given\n src = dedent('''\n obj.method(1, 2)\n obj.meth()\n ''')\n known = {'obj': KnownType('SomeClass*', base_type='SomeClass')}\n\n # When\n code = py2c(src, known_types=known)\n\n # Then\n expect = dedent('''\n SomeClass_method(obj, 1, 2);\n SomeClass_meth(obj);\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_calling_method_of_known_type_in_method():\n # Given\n src = dedent('''\n class Foo(object):\n def g(self):\n pass\n def f(self, obj):\n obj.method(1, 2)\n self.g()\n ''')\n\n # When\n known = {'obj': KnownType('SomeClass*', base_type='SomeClass')}\n code = py2c(src, known_types=known)\n\n # Then\n expect = dedent('''\n void Foo_g(Foo* self)\n {\n ;\n }\n\n void Foo_f(Foo* self, SomeClass* obj)\n {\n SomeClass_method(obj, 1, 2);\n Foo_g(self);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_raises_error_when_unknown_args_are_given():\n # Given\n src = dedent('''\n def f(x):\n pass\n ''')\n\n # When/Then\n with pytest.raises(CodeGenerationError):\n py2c(src)\n\n # Given\n # Unsupported default arg.\n src = dedent('''\n def f(x=''):\n pass\n ''')\n\n # When/Then\n with pytest.raises(CodeGenerationError):\n py2c(src)\n\n # Given\n # Unsupported default arg list.\n src = dedent('''\n def f(x=(1, '')):\n pass\n ''')\n\n # When/Then\n with pytest.raises(CodeGenerationError):\n py2c(src)\n\n\ndef test_user_supplied_detect_type():\n # Given\n src = dedent('''\n def f(x, xx=[1,2,3], cond=True):\n pass\n ''')\n\n # When\n def dt(name, value):\n return 'double'\n code = py2c(src, detect_type=dt)\n\n # Then\n expect = dedent('''\n void f(double x, double xx, double cond)\n {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_while():\n # Given\n src = dedent('''\n while x < 21:\n do(x)\n do1(x)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n while ((x < 21)) {\n do(x);\n do1(x);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_bool_true_false_and_none():\n # Given\n src = dedent('''\n while True:\n pass\n if False:\n pass\n if x is None or x is not None:\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x;\n while (1) {\n ;\n }\n\n if (0) {\n ;\n }\n\n if (((x == NULL) || (x != NULL))) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_for():\n # Given\n src = dedent('''\n for i in range(5):\n do(i)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n for (long i=0; i<5; i+=1) {\n do(i);\n }\n ''')\n assert code.strip() == expect.strip()\n\n # Given\n src = dedent('''\n for i in range(2, 5):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n for (long i=2; i<5; i+=1) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n # Given\n src = dedent('''\n for i in range(2, 10, 2):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n for (long i=2; i<10; i+=2) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_for_with_decreasing_range():\n # Given\n src = dedent('''\n for i in range(10, -1, -1):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n for (long i=10; i>-1; i+=-1) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_for_with_declare():\n # Given\n src = dedent('''\n i = declare('int')\n for i in range(5):\n do(i)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n int i;\n for (i=0; i<5; i+=1) {\n do(i);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_two_fors():\n # Given\n src = dedent('''\n for i in range(5):\n do(i)\n for i in range(5):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n for (long i=0; i<5; i+=1) {\n do(i);\n }\n\n for (long i=0; i<5; i+=1) {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_for_with_symbols():\n # Given\n src = dedent('''\n n = declare('int')\n n = 25\n for i in range(n):\n pass\n for i in range(0, n+1, step()):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n int n;\n n = 25;\n long __cpy_stop_0 = n;\n for (long i=0; i<__cpy_stop_0; i+=1) {\n ;\n }\n\n __cpy_stop_0 = (n + 1);\n long __cpy_step_0 = step();\n if (__cpy_step_0 < 0) {\n for (long i=0; i>__cpy_stop_0; i+=__cpy_step_0) {\n ;\n }\n }\n else {\n for (long i=0; i<__cpy_stop_0; i+=__cpy_step_0) {\n ;\n }\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_nested_for_with_symbols():\n # Given\n src = dedent('''\n n = declare('int')\n n = 25\n for i in range(n):\n for j in range(0, n+1, step()):\n pass\n for i in range(n+1):\n for j in range(0, n+2, step()):\n pass\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n int n;\n n = 25;\n long __cpy_stop_0 = n;\n for (long i=0; i<__cpy_stop_0; i+=1) {\n long __cpy_stop_1 = (n + 1);\n long __cpy_step_1 = step();\n if (__cpy_step_1 < 0) {\n for (long j=0; j>__cpy_stop_1; j+=__cpy_step_1) {\n ;\n }\n }\n else {\n for (long j=0; j<__cpy_stop_1; j+=__cpy_step_1) {\n ;\n }\n }\n }\n\n __cpy_stop_0 = (n + 1);\n for (long i=0; i<__cpy_stop_0; i+=1) {\n long __cpy_stop_1 = (n + 2);\n long __cpy_step_1 = step();\n if (__cpy_step_1 < 0) {\n for (long j=0; j>__cpy_stop_1; j+=__cpy_step_1) {\n ;\n }\n }\n else {\n for (long j=0; j<__cpy_stop_1; j+=__cpy_step_1) {\n ;\n }\n }\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_for_with_break_continue():\n # Given\n src = dedent('''\n for i in range(10):\n if i%7 == 0:\n break\n if i%2 == 0:\n continue\n do(i)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n for (long i=0; i<10; i+=1) {\n if (((i % 7) == 0)) {\n break;\n }\n if (((i % 2) == 0)) {\n continue;\n }\n do(i);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_for_not_range_and_else_fails():\n # Given\n src = dedent('''\n for i in something():\n pass\n ''')\n\n # When/Then\n with pytest.raises(NotImplementedError):\n py2c(src)\n\n # Given\n src = dedent('''\n for i in range(5):\n pass\n else:\n pass\n ''')\n\n # When/Then\n with pytest.raises(NotImplementedError):\n py2c(src)\n\n # Given\n src = dedent('''\n for i in range(0, 5, 2, 3):\n pass\n ''')\n\n # When/Then\n with pytest.raises(NotImplementedError):\n py2c(src)\n\n\ndef test_while_else_raises_error():\n # Given\n src = dedent('''\n while 1:\n do()\n else:\n do()\n ''')\n\n # When/Then\n with pytest.raises(NotImplementedError):\n py2c(src)\n\n\ndef test_try_block_raises_error():\n # Given\n src = dedent('''\n try:\n do()\n except ImportError:\n pass\n ''')\n\n # When/Then\n with pytest.raises(NotImplementedError):\n py2c(src)\n\n\ndef test_attribute_access():\n # Given\n src = dedent('''\n self.x = 1\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double self;\n self->x = 1;\n ''')\n\n assert code.strip() == expect.strip()\n\n\ndef test_declare_call_declares_variable():\n # Given\n src = dedent('''\n x = declare('int')\n x += 1\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n int x;\n x += 1;\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_declare_matrix():\n # Given\n src = dedent('''\n x = declare('matrix((3,))')\n do(x[0])\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x[3];\n do(x[0]);\n ''')\n assert code.strip() == expect.strip()\n\n # Given\n src = dedent('''\n x = declare('matrix((2, 3))')\n do(x[0][1])\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double x[2][3];\n do(x[0][1]);\n ''')\n assert code.strip() == expect.strip()\n\n # Given\n src = dedent('''\n x = declare('matrix((2, 3), \"int\")')\n do(x[0][1])\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n int x[2][3];\n do(x[0][1]);\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_declare_call_declares_multiple_variables():\n # Given\n src = dedent('''\n x, y = declare('int', 2)\n u, v = declare('matrix(3)', 2)\n A = declare('matrix((2,2), \"long\")')\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n int x, y;\n double u[3], v[3];\n long A[2][2];\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_class():\n # Given\n src = dedent('''\n class Foo(object):\n def g(self, x=0.0):\n return x*2.0\n def f(self, x=0.0):\n y = x + 1\n do(self.a, x)\n z = self.g(y)\n ''')\n\n # When\n code = py2c(src)\n\n # Then\n expect = dedent('''\n double Foo_g(Foo* self, double x)\n {\n return (x * 2.0);\n }\n\n void Foo_f(Foo* self, double x)\n {\n double y;\n double z;\n y = (x + 1);\n do(self->a, x);\n z = Foo_g(self, y);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_unsupported_method():\n # Given\n src = dedent('''\n np.identity(25)\n ''')\n\n # When\n with pytest.raises(NotImplementedError):\n py2c(src)\n\n\ndef test_c_struct_helper():\n # Given\n class Fruit(object):\n pass\n\n f = Fruit()\n f.apple = 1\n f.banana = 2.0\n f.pear = 1.5\n h = CStructHelper(f)\n\n # When\n result = h.get_code()\n\n # Then\n expect = dedent('''\n typedef struct Fruit {\n int apple;\n double banana;\n double pear;\n } Fruit;\n ''')\n assert result.strip() == expect.strip()\n\n # When/Then\n array = h.get_array()\n use_double = get_config().use_double\n fdtype = np.float64 if use_double else np.float32\n expect = np.dtype([('apple', np.int32),\n ('banana', fdtype), ('pear', fdtype)])\n\n assert array.dtype == expect\n assert array['apple'] == 1\n assert array['banana'] == 2.0\n assert array['pear'] == 1.5\n\n\ndef test_c_struct_helper_empty_object():\n # Given\n class Fruit(object):\n pass\n\n f = Fruit()\n h = CStructHelper(f)\n\n # When\n result = h.get_code()\n\n # Then\n expect = dedent('''\n typedef struct Fruit {\n } Fruit;\n ''')\n assert result.strip() == expect.strip()\n\n # When/Then\n assert h.get_array() is None\n\n\ndef test_wrapping_class():\n # Given\n class Dummy(object):\n '''Class Docstring'''\n\n def __init__(self, x=0, f=0.0, s=''):\n \"Constructor docstring\"\n self.x = x\n self.f = f\n self.s = s\n self._private = 1\n\n def method(self):\n '''Method docstring.\n '''\n pass\n\n obj = Dummy()\n\n # When\n c = CConverter()\n result = c.parse_instance(obj)\n\n # Then\n expect = dedent('''\n typedef struct Dummy {\n double f;\n int x;\n } Dummy;\n\n\n void Dummy_method(Dummy* self)\n {\n ;\n }\n ''')\n assert result.strip() == expect.strip()\n\n # When\n h = CStructHelper(obj)\n use_double = get_config().use_double\n fdtype = np.float64 if use_double else np.float32\n dtype = np.dtype([('f', fdtype), ('x', np.int32)])\n expect = np.zeros(1, dtype)\n assert h.get_array() == expect\n\n\ndef test_wrapping_class_with_ignore_methods():\n # Given\n class Dummy1(object):\n '''Class Docstring'''\n\n def f(self):\n pass\n\n def not_me(self):\n pass\n\n obj = Dummy1()\n\n # When\n c = CConverter()\n result = c.parse_instance(obj, ignore_methods=['not_me'])\n\n # Then\n expect = dedent('''\n typedef struct Dummy1 {\n } Dummy1;\n\n void Dummy1_f(Dummy1* self)\n {\n ;\n }\n ''')\n assert result.strip() == expect.strip()\n\n\ndef check_opencl_cuda_conversion(converter_obj):\n # Note that LID_0 etc. are predefined symbols when we include the CLUDA\n # preamble, therefore should be known.\n src = dedent('''\n def f(s_idx, s_p, d_idx, d_p, J=0, t=0.0, l=[0,0], xx=(0, 0)):\n s_p[s_idx] = LID_0*GID_0\n ''')\n\n # When\n known_types = {'d_p': KnownType('GLOBAL_MEM int*')}\n converter = converter_obj(known_types=known_types)\n code = converter.convert(src)\n\n # Then\n expect = dedent('''\nWITHIN_KERNEL void f(long s_idx, GLOBAL_MEM double* s_p, long d_idx,\n GLOBAL_MEM int* d_p, long J, double t, double* l, double* xx)\n{\n s_p[s_idx] = (LID_0 * GID_0);\n}\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_cuda_conversion():\n check_opencl_cuda_conversion(CUDAConverter)\n\n\ndef test_opencl_conversion():\n check_opencl_cuda_conversion(OpenCLConverter)\n\n\ndef test_opencl_class():\n src = dedent('''\n class Foo(object):\n def g(self, x=0.0):\n pass\n ''')\n\n # When\n converter = OpenCLConverter()\n code = converter.convert(src)\n\n # Then\n expect = dedent('''\n WITHIN_KERNEL void Foo_g(GLOBAL_MEM Foo* self, double x)\n {\n ;\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_cuda_local_conversion():\n @annotate(xc='ldoublep', yc='lintp')\n def knl(xc, yc):\n xc[LID_0] = 1\n yc[LID_0] = 1\n\n # When\n converter = CUDAConverter()\n code = converter.parse(knl)\n\n # Then\n expect_1 = dedent('''\nWITHIN_KERNEL void knl(int size_xc, int size_yc)\n{\n extern LOCAL_MEM float shared_buff[];\n double* xc = (double*) shared_buff;\n int* yc = (int*) &xc[size_xc];\n xc[LID_0] = 1;\n yc[LID_0] = 1;\n}\n ''')\n\n expect_2 = dedent('''\nWITHIN_KERNEL void knl(int size_xc, int size_yc)\n{\n extern LOCAL_MEM float shared_buff[];\n int* yc = (int*) shared_buff;\n double* xc = (double*) &yc[size_yc];\n xc[LID_0] = 1;\n yc[LID_0] = 1;\n}\n ''')\n\n assert code.strip() == expect_1.strip() or code.strip() == expect_2.strip()\n\n\ndef test_handles_parsing_functions():\n # Given\n def f(x=1.0):\n return x + 1\n\n # When\n t = CConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n double f(double x)\n {\n return (x + 1);\n }\n ''')\n assert code.strip() == expect.strip()\n\n # Given\n class A(object):\n def f(self, x=1.0):\n return x + 1.0\n\n # When\n t = CConverter()\n code = t.parse_function(A)\n\n # Then\n expect = dedent('''\n double A_f(A* self, double x)\n {\n return (x + 1.0);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_address_works():\n # Given\n def f(x=1.0):\n return address(x)\n\n # When\n t = CConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n double f(double x)\n {\n return (&x);\n }\n ''')\n assert code.strip() == expect.strip()\n\n\ndef test_atomic_inc_works():\n # Given\n def f(x=1.0):\n return atomic_inc(x)\n\n # When\n t = OpenCLConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n WITHIN_KERNEL double f(double x)\n {\n return atomic_inc(&x);\n }\n ''')\n\n assert code.strip() == expect.strip()\n\n # When\n t = CUDAConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n WITHIN_KERNEL double f(double x)\n {\n return atomicAdd(&x, 1);\n }\n ''')\n\n assert code.strip() == expect.strip()\n\n\ndef test_atomic_dec_works():\n # Given\n def f(x=1.0):\n return atomic_dec(x)\n\n # When\n t = OpenCLConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n WITHIN_KERNEL double f(double x)\n {\n return atomic_dec(&x);\n }\n ''')\n\n assert code.strip() == expect.strip()\n\n # When\n t = CUDAConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n WITHIN_KERNEL double f(double x)\n {\n return atomicAdd(&x, -1);\n }\n ''')\n\n assert code.strip() == expect.strip()\n\n\ndef test_cast_works():\n # Given\n def f(x=1.0):\n return cast(x, \"float\")\n\n # When\n t = OpenCLConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n WITHIN_KERNEL double f(double x)\n {\n return (float) (x);\n }\n ''')\n\n assert code.strip() == expect.strip()\n\n # When\n t = CUDAConverter()\n code = t.parse_function(f)\n\n # Then\n expect = dedent('''\n WITHIN_KERNEL double f(double x)\n {\n return (float) (x);\n }\n ''')\n\n assert code.strip() == expect.strip()\n" ]
[ [ "numpy.zeros", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jni/microscopium
[ "b9cddd8ef5f3003a396ace602228651b3020c4a3" ]
[ "microscopium/preprocess.py" ]
[ "import os\nimport functools as fun\nimport itertools as it\nimport collections as coll\nimport re\nimport numpy as np\nfrom scipy import ndimage as ndi\nfrom scipy.stats.mstats import mquantiles as quantiles\nfrom skimage import io, util, img_as_float, img_as_ubyte\nfrom skimage import morphology, filters as imfilter, exposure\nimport skimage.filters.rank as rank\nimport skimage\nimport cytoolz as tz\nfrom cytoolz import curried as c\nimport warnings\n\nfrom ._util import normalise_random_state\nfrom . import io as mio\n\n\ndef morphop(im, operation='open', radius='5'):\n \"\"\"Perform a morphological operation with spherical structuring element.\n\n Parameters\n ----------\n im : array, shape (M, N[, P])\n 2D or 3D grayscale image.\n operation : string, optional\n The operation to perform. Choices are 'opening', 'closing',\n 'erosion', and 'dilation'. Imperative verbs also work, e.g.\n 'dilate'.\n radius : int, optional\n The radius of the structuring element (disk or ball) used.\n\n Returns\n -------\n imout : array, shape (M, N[, P])\n The transformed image.\n\n Raises\n ------\n ValueError : if the image is not 2D or 3D.\n \"\"\"\n if im.ndim == 2:\n selem = morphology.disk(radius)\n elif im.ndim == 3:\n selem = morphology.ball(radius)\n else:\n raise ValueError(\"Image input to 'morphop' should be 2D or 3D\"\n \", got %iD\" % im.ndim)\n if operation.startswith('open'):\n imout = ndi.grey_opening(im, footprint=selem)\n elif operation.startswith('clos'):\n imout = ndi.grey_closing(im, footprint=selem)\n elif operation.startswith('dila'):\n imout = ndi.grey_dilation(im, footprint=selem)\n elif operation.startswith('ero'):\n imout = ndi.grey_erosion(im, footprint=selem)\n return imout\n\n\ndef basefn(fn):\n \"\"\"Get the filename without the extension.\n\n Parameters\n ----------\n fn : string\n A filename.\n\n Returns\n -------\n outfn : string\n `fn` with the extension stripped.\n\n Examples\n --------\n >>> file_name = 'file_name.ext'\n >>> basefn(file_name)\n 'file_name'\n \"\"\"\n return os.path.splitext(fn)[0]\n\n\ndef max_mask_iter(fns, offset=0, close_radius=0, erode_radius=0):\n \"\"\"Find masks for a set of images having brightness artifacts.\n\n Parameters\n ----------\n fns : list of string\n The images being examined.\n offset : int, optional\n Offset the threshold automatically found.\n close_radius : int, optional\n Perform a morphological closing of the mask of this radius.\n erode_radius : int, optional\n Perform a morphological erosion of the mask, after any closing,\n of this radius.\n\n Returns\n -------\n maxes : iterator of bool array\n The max mask image corresponding to each input image.\n \"\"\"\n ms = maxes(fns)\n t = imfilter.threshold_otsu(ms)\n ims = it.imap(io.imread, fns)\n masks = ((im < t + offset) for im in ims)\n if close_radius > 0:\n masks = (morphop(mask, 'close', close_radius) for mask in masks)\n if erode_radius > 0:\n masks = (morphop(mask, 'erode', erode_radius) for mask in masks)\n return masks\n\n\ndef write_max_masks(fns, offset=0, close_radius=0, erode_radius=0,\n suffix='.mask.tif', compress=1):\n \"\"\"Find a mask for images having a brightness artifact.\n\n This function iterates over a set of images and finds the maximum\n value of each. Then, Otsu's threshold is applied to the set of\n maxima, and any element brighter than this in *any* image is\n masked out.\n\n Parameters\n ----------\n fns : list of string\n The images being examined.\n offset : int, optional\n Offset the threshold automatically found.\n close_radius : int, optional\n Perform a morphological closing of the mask of this radius.\n erode_radius : int, optional\n Perform a morphological erosion of the mask, after any closing,\n of this radius.\n suffix : string, optional\n Save an image next to the original, with this suffix.\n compress : int in [0, 9], optional\n Compression level for saved images. 0 = no compression,\n 1 = fast compression, 9 = maximum compression, slowest.\n\n Returns\n -------\n n, m : int\n The number of images for which a mask was created, and the\n total number of images\n \"\"\"\n masks = max_mask_iter(fns, offset, close_radius, erode_radius)\n n = 0\n m = 0\n for fn, mask in zip(fns, masks):\n outfn = basefn(fn) + suffix\n m += 1\n if not mask.all():\n # we multiply by 255 to make the image easy to look at\n mio.imsave(outfn, mask.astype(np.uint8) * 255, compress=compress)\n n += 1\n return n, m\n\n\ndef maxes(fns):\n \"\"\"Return an array of the maximum intensity of each image.\n\n Parameters\n ----------\n fns : list of string\n The filenames of the images.\n\n Returns\n -------\n maxes : 1D array\n The maximum value of each image examined.\n \"\"\"\n ims = map(io.imread, fns)\n maxes = np.array(list(map(np.max, ims)))\n return maxes\n\n\ndef stretchlim(im, bottom=0.001, top=None, mask=None, in_place=False):\n \"\"\"Stretch the image so new image range corresponds to given quantiles.\n\n Parameters\n ----------\n im : array, shape (M, N, [...,] P)\n The input image.\n bottom : float, optional\n The lower quantile.\n top : float, optional\n The upper quantile. If not provided, it is set to 1 - `bottom`.\n mask : array of bool, shape (M, N, [...,] P), optional\n Only consider intensity values where `mask` is ``True``.\n in_place : bool, optional\n If True, modify the input image in-place (only possible if\n it is a float image).\n\n Returns\n -------\n out : np.ndarray of float\n The stretched image.\n \"\"\"\n if in_place and np.issubdtype(im.dtype, np.float):\n out = im\n else:\n out = np.empty(im.shape, np.float32)\n out[:] = im\n if mask is None:\n mask = np.ones(im.shape, dtype=bool)\n if top is None:\n top = 1 - bottom\n q0, q1 = quantiles(im[mask], [bottom, top])\n out -= q0\n out /= q1 - q0\n out = np.clip(out, 0, 1, out=out)\n return out\n\n\ndef run_quadrant_stitch(fns, re_string='(.*)_(s[1-4])_(w[1-3]).*',\n re_quadrant_group=1, compress=1):\n \"\"\"Read images, stitched them, and write out to same directory.\n\n Parameters\n ----------\n fns : list of string\n The filenames to be processed.\n re_string : string, optional\n The regular expression to match the filename.\n re_quadrant_group : int, optional\n The group from the re.match object that will contain quadrant info.\n compress : int in [0, 9], optional\n Compression level for saved images. 0 = no compression,\n 1 = fast compression, 9 = maximum compression, slowest.\n\n Returns\n -------\n fns_out : list of string\n The output filenames\n \"\"\"\n qd = group_by_quadrant(fns, re_string, re_quadrant_group)\n fns_out = []\n for fn_pattern, fns in qd.items():\n new_filename = '_'.join(fn_pattern) + '_stitched.tif'\n ims = list(map(io.imread, sorted(fns)))\n im = quadrant_stitch(*ims)\n mio.imsave(new_filename, im, compress=compress)\n fns_out.append(new_filename)\n return fns_out\n\n\ndef crop(im, slices=(slice(100, -100), slice(250, -300))):\n \"\"\"Crop an image to contain only plate interior.\n\n Parameters\n ----------\n im : array\n The image to be cropped.\n slices : tuple of slice objects, optional\n The slices defining the crop. The default values are for\n stitched images from the Marcelle screen.\n\n Returns\n -------\n imc : array\n The cropped image.\n\n Examples\n --------\n >>> im = np.zeros((5, 5), int)\n >>> im[1:4, 1:4] = 1\n >>> crop(im, slices=(slice(1, 4), slice(1, 4)))\n array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n \"\"\"\n return im[slices]\n\n\ndef group_by_channel(fns, re_string='(.*)_(w[1-3]).*',\n re_channel_group=1):\n \"\"\"Group filenames by channel to prepare for illumination estimation.\n\n Intended to be run *after* quadrant stitching.\n\n Parameters\n ----------\n fns : list of string\n The filenames to be processed.\n re_string : string, optional\n The regular expression to match the filename.\n re_quadrant_group : int, optional\n The group from the re.match object that will contain channel info.\n\n Returns\n -------\n grouped : dict mapping tuple of string to list of string\n The filenames, grouped into lists containing all images of the same\n channel. The keys are the channel regular expression group, useful for\n composing a filename for the illumination image.\n\n Examples\n --------\n >>> fn_numbering = it.product(range(2), range(1, 4))\n >>> fns = ['image_%i_w%i.tif' % (i, j) for i, j in fn_numbering]\n >>> fns\n ['image_0_w1.tif', 'image_0_w2.tif', 'image_0_w3.tif', 'image_1_w1.tif', 'image_1_w2.tif', 'image_1_w3.tif']\n >>> sorted(group_by_channel(fns).items())\n [('w1', ['image_0_w1.tif', 'image_1_w1.tif']), ('w2', ['image_0_w2.tif', 'image_1_w2.tif']), ('w3', ['image_0_w3.tif', 'image_1_w3.tif'])]\n \"\"\"\n re_match = fun.partial(re.match, re_string)\n match_objs = list(map(re_match, fns))\n fns = [fn for fn, match in zip(fns, match_objs) if match is not None]\n match_objs = [x for x in match_objs if x is not None]\n matches = [x.groups() for x in match_objs]\n keys = [m[re_channel_group] for m in matches]\n grouped = {}\n for k, fn in zip(keys, fns):\n grouped.setdefault(k, []).append(fn)\n return grouped\n\n\ndef group_by_quadrant(fns, re_string='(.*)_(s[1-4])_(w[1-3]).*',\n re_quadrant_group=1):\n \"\"\"Group filenames by quadrant to prepare for stitching.\n\n Parameters\n ----------\n fns : list of string\n The filenames to be processed.\n re_string : string, optional\n The regular expression to match the filename.\n re_quadrant_group : int, optional\n The group from the re.match object that will contain quadrant info.\n\n Returns\n -------\n grouped : dict mapping tuple of string to tuple of string\n The filenames, grouped into tuples containing four quadrants of the\n same image. The keys are all the regular expression match groups\n *other* than the quadrant group, useful for composing a filename for\n the stitched images.\n\n Examples\n --------\n >>> fn_numbering = it.product(range(2), range(1, 5))\n >>> fns = ['image_%i_s%i_w1.TIF' % (i, j) for i, j in fn_numbering]\n >>> fns\n ['image_0_s1_w1.TIF', 'image_0_s2_w1.TIF', 'image_0_s3_w1.TIF', 'image_0_s4_w1.TIF', 'image_1_s1_w1.TIF', 'image_1_s2_w1.TIF', 'image_1_s3_w1.TIF', 'image_1_s4_w1.TIF']\n >>> sorted(group_by_quadrant(fns).items())\n [(('image_0', 'w1'), ['image_0_s1_w1.TIF', 'image_0_s2_w1.TIF', 'image_0_s3_w1.TIF', 'image_0_s4_w1.TIF']), (('image_1', 'w1'), ['image_1_s1_w1.TIF', 'image_1_s2_w1.TIF', 'image_1_s3_w1.TIF', 'image_1_s4_w1.TIF'])]\n \"\"\"\n re_match = fun.partial(re.match, re_string)\n match_objs = list(map(re_match, fns))\n fns = [fn for fn, match in zip(fns, match_objs) if match is not None]\n match_objs = [x for x in match_objs if x is not None]\n matches = [x.groups() for x in match_objs]\n keys = list(map(tuple, [[m[i] for i in range(len(m))\n if i != re_quadrant_group] for m in matches]))\n grouped = {}\n for k, fn in zip(keys, fns):\n grouped.setdefault(k, []).append(fn)\n return grouped\n\n\ndef quadrant_stitch(nw, ne, sw, se):\n \"\"\"Stitch four seamless quadrant images into a single big image.\n\n Parameters\n ----------\n nw, ne, sw, se : np.ndarray, shape (Mi, Ni)\n The four quadrant images, corresponding to the cardinal directions of\n north-west, north-east, south-west, south-east.\n\n Returns\n -------\n stitched : np.ndarray, shape (M0+M2, N0+N1)\n The image resulting from stitching the four input images\n\n Examples\n --------\n >>> imbase = np.ones((2, 3), int)\n >>> nw, ne, sw, se = [i * imbase for i in range(4)]\n >>> quadrant_stitch(nw, ne, sw, se)\n array([[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [2, 2, 2, 3, 3, 3],\n [2, 2, 2, 3, 3, 3]])\n \"\"\"\n x1 = nw.shape[0]\n x2 = sw.shape[0]\n y1 = nw.shape[1]\n y2 = ne.shape[1]\n stitched = np.zeros((x1 + x2, y1 + y2), nw.dtype)\n stitched[:x1, :y1] = nw\n stitched[:x1, y1:] = ne\n stitched[x1:, :y1] = sw\n stitched[x1:, y1:] = se\n return stitched\n\n\ndef rescale_to_11bits(im_float):\n \"\"\"Rescale a float image in [0, 1] to integers in [0, 2047].\n\n This operation makes rank filtering much faster.\n\n Parameters\n ----------\n im_float : array of float in [0, 1]\n The float image. The range and type are *not* checked prior to\n conversion!\n\n Returns\n -------\n im11 : array of uint16 in [0, 2047]\n The converted image.\n\n Examples\n --------\n >>> im = np.array([0., 0.5, 1.])\n >>> rescale_to_11bits(im)\n array([ 0, 1024, 2047], dtype=uint16)\n \"\"\"\n im11 = np.round(im_float * 2047.).astype(np.uint16)\n return im11\n\n\ndef rescale_from_11bits(im11):\n \"\"\"Rescale a uint16 image with range in [0, 2047] to float in [0., 1.]\n\n Parameters\n ----------\n im11 : array of uint16, range in [0, 2047]\n The input image, encoded in uint16 but having 11-bit range.\n\n Returns\n -------\n imfloat : array of float, same shape as `im11`\n The output image.\n\n Examples\n --------\n >>> im = np.array([0, 1024, 2047], dtype=np.uint16)\n >>> rescale_from_11bits(im)\n array([0. , 0.5002, 1. ])\n\n Notes\n -----\n Designed to be a no-op with the above `rescale_to_11bits` function,\n although this is subject to approximation errors.\n \"\"\"\n return np.round(im11 / 2047., decimals=4)\n\n\ndef unpad(im, pad_width):\n \"\"\"Remove padding from a padded image.\n\n Parameters\n ----------\n im : array\n The input array.\n pad_width : int or sequence of int\n The width of padding: a number for the same width along each\n dimension, or a sequence for different widths.\n\n Returns\n -------\n imc : array\n The unpadded image.\n\n Examples\n --------\n >>> im = np.zeros((5, 5), int)\n >>> im[1:4, 1:4] = 1\n >>> unpad(im, 1)\n array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n \"\"\"\n if not isinstance(pad_width, coll.Iterable):\n pad_width = [pad_width] * im.ndim\n slices = tuple([slice(p, -p) for p in pad_width])\n return im[slices]\n\n\ndef _reduce_with_count(pairwise, iterator, accumulator=None):\n \"\"\"Return both the result of the reduction and the number of elements.\n\n Parameters\n ----------\n pairwise : function (a -> b -> a)\n The function with which to reduce the `iterator` sequence.\n iterator : iterable\n The sequence being reduced.\n accumulator : type \"a\", optional\n An initial value with which to perform the reduction.\n\n Returns\n -------\n result : type \"a\"\n The result of the reduce operation.\n count : int\n The number of elements that were accumulated.\n\n Examples\n --------\n >>> x = [5, 6, 7]\n >>> _reduce_with_count(np.add, x)\n (18, 3)\n \"\"\"\n def new_pairwise(a, b):\n (elem1, c1), (elem2, c2) = a, b\n return pairwise(elem1, elem2), c2\n new_iter = zip(iterator, it.count(1))\n new_acc = (0, accumulator)\n return tz.reduce(new_pairwise, new_iter, new_acc)\n\n\ndef mean(iterator):\n \"\"\"Use online algorithm to compute the mean of an iterator of values.\n\n Parameters\n ----------\n iterator : iterable of values\n The input iterable.\n\n Returns\n -------\n m : float or array of float\n The mean of the values in `iterator`. If the values are NumPy arrays,\n then the mean is an array of float of the same shape.\n\n Examples\n --------\n >>> values = [1, 0, 0, 1]\n >>> mean(values)\n 0.5\n >>> arrays = (np.full((2, 2), i) for i in range(5))\n >>> mean(arrays)\n array([[2., 2.],\n [2., 2.]])\n \"\"\"\n iterator = iter(iterator) # in case list/tuple passed as input\n curr_mean = np.float_(next(iterator))\n for i, elem in enumerate(iterator, start=2):\n curr_mean += (elem - curr_mean) / i\n return curr_mean\n \n\ndef find_background_illumination(fns, radius=None, input_bitdepth=None,\n quantile=0.5, stretch_quantile=0.):\n \"\"\"Use a set of related images to find uneven background illumination.\n\n Parameters\n ----------\n fns : list of string\n A list of image file names\n radius : int, optional\n The radius of the structuring element used to find background.\n default: The width or height of the input images divided by 4,\n whichever is smaller.\n input_bitdepth : int, optional\n The bit-depth of the input images. Should be specified if non-standard\n bitdepth images are used in a 16-bit image file, e.g. 12-bit images.\n Default is the dtype of the input image.\n quantile : float in [0, 1], optional\n The desired quantile to find background. default: 0.5 (median)\n stretch_quantile : float in [0, 1], optional\n Stretch image to full dtype limit, saturating above this quantile.\n\n Returns\n -------\n illum : np.ndarray, float, shape (M, N)\n The estimated illumination over the image field.\n\n See Also\n --------\n `correct_image_illumination`, `correct_multiimage_illumination`.\n \"\"\"\n # this function follows the \"PyToolz\" streaming data model to\n # obtain the illumination estimate.\n # first, define the functions for each individual step:\n in_range = ('image' if input_bitdepth is None\n else (0, 2**input_bitdepth - 1))\n rescale = tz.curry(exposure.rescale_intensity)\n normalize = (tz.partial(stretchlim, bottom=stretch_quantile)\n if stretch_quantile > 0\n else skimage.img_as_float)\n\n # produce a stream of properly-scaled images\n ims = (tz.pipe(fn, io.imread, rescale(in_range=in_range), normalize)\n for fn in fns)\n\n # take the mean of that stream\n mean_image = mean(ims)\n\n # return the median filter of that mean\n radius = radius or min(mean_image.shape) // 4\n\n mean_image = img_as_ubyte(stretchlim(mean_image))\n illum = imfilter.rank.median(mean_image, selem=morphology.disk(radius))\n return illum\n\n\ndef correct_multiimage_illumination(im_fns, illum, stretch_quantile=0,\n random_state=None):\n \"\"\"Divide input images pointwise by the illumination field.\n\n However, where `correct_image_illumination` rescales each individual\n image to span the full dynamic range of the data type, this one\n rescales each image such that *all images, collectively,* span the\n dynamic range. This aims to fix stretching of image noise when there\n is no signal in the data [1]_.\n\n Parameters\n ----------\n ims : iterable of image filenames, each of shape (M, N, ..., P)\n The images to be corrected.\n illum : array, shape (M, N, ..., P)\n The background illumination field.\n stretch_quantile : float, optional\n Clip intensity above and below this quantile. Stretch remaining\n values to fill dynamic range.\n random_state : None, int, or numpy RandomState instance, optional\n An optional random number generator or seed, passed directly to\n `_reservoir_sampled_image`.\n\n Returns\n -------\n ims_out : iterable of corrected images.\n The images corrected for background illumination.\n The dtype of the output images is determined by the dtype\n of the first image passed to the function.\n\n References\n ----------\n .. [1] https://github.com/microscopium/microscopium/issues/38\n \"\"\"\n p0 = 100 * stretch_quantile\n p1 = 100 - p0\n im_fns = list(im_fns)\n\n # read first image to get input image dtypes\n im0 = mio.imread(im_fns[0])\n\n # in first pass, make a composite image to get global intensity range\n ims_pass1 = map(io.imread, im_fns)\n sampled = _reservoir_sampled_image(ims_pass1, random_state)\n corrected = sampled / illum # don't do in-place, dtype may clash\n corr_range = tuple(np.percentile(corrected, [p0, p1]))\n\n # In second pass, correct every image and adjust exposure\n ims_pass2 = map(io.imread, im_fns)\n for im in ims_pass2:\n corrected = im / illum\n rescaled = exposure.rescale_intensity(corrected, in_range=corr_range,\n out_range=np.uint8)\n out = np.round(rescaled).astype(np.uint8)\n yield out\n\n\ndef _reservoir_sampled_image(ims_iter, random_state=None):\n \"\"\"Return an image where each pixel is sampled from a list of images.\n\n The idea is to get a sample of image intensity throughout a collection\n of images, to know what the \"standard range\" is for this type of image.\n\n The implementation uses a \"reservoir\" image to sample while remaining\n space-efficient, and only needs to hold about four images at one time\n (the reservoir, the current sample, a random image for sampling, and\n a thresholded version of the random image).\n\n Parameters\n ----------\n ims_iter : iterable of arrays\n An iterable over numpy arrays (representing images).\n random_state : None, int, or numpy RandomState instance, optional\n An optional random number generator or seed from which to draw\n samples.\n\n Returns\n -------\n sampled : array, same shape as input\n The sampled \"image\".\n\n Examples\n --------\n >>> ims = iter(np.arange(27).reshape((3, 3, 3)))\n >>> _reservoir_sampled_image(ims, 0)\n array([[ 0, 1, 2],\n [ 3, 13, 23],\n [24, 25, 8]])\n \"\"\"\n random = normalise_random_state(random_state)\n ims_iter = iter(ims_iter) # ensure iterator and not e.g. list\n sampled = next(ims_iter)\n for k, im in enumerate(ims_iter, start=2):\n to_replace = random.rand(*im.shape) < (1 / k)\n sampled[to_replace] = im[to_replace]\n return sampled\n\n\ndef global_threshold(ims_iter, random_state=None):\n \"\"\"Generate a global threshold for the collection of images given.\n\n The threshold is determined by sampling the intensity of every\n image and then computing the Otsu [1]_ threshold on this sample.\n\n When the input images are multi-channel, the threshold is computed\n separately for each channel.\n\n Parameters\n ----------\n ims_iter : iterable of arrays\n An iterable over numpy arrays (representing images).\n random_state : None, int, or numpy RandomState instance, optional\n An optional random number generator or seed from which to draw\n samples.\n\n Returns\n -------\n thresholds : tuple of float, length equal to number of channels\n The global threshold for the image collection.\n\n References\n ----------\n .. [1]: Nobuyuki Otsu (1979). \"A threshold selection method from\n gray-level histograms\". IEEE Trans. Sys., Man., Cyber.\n 9 (1): 62-66. doi:10.1109/TSMC.1979.4310076\n\n Examples\n --------\n >>> ims = iter(np.arange(27).reshape((3, 3, 3)))\n >>> global_threshold(ims, 0)\n (13,)\n \"\"\"\n sampled = _reservoir_sampled_image(ims_iter, random_state)\n if sampled.ndim < 3:\n sampled = sampled[..., np.newaxis] # add dummy channel dimension\n thresholds = [imfilter.threshold_otsu(sampled[..., i])\n for i in range(sampled.shape[-1])]\n return tuple(thresholds)\n\n\ndef correct_image_illumination(im, illum, stretch_quantile=0, mask=None):\n \"\"\"Divide input image pointwise by the illumination field.\n\n Parameters\n ----------\n im : np.ndarray of float\n The input image.\n illum : np.ndarray of float, same shape as `im`\n The illumination field.\n stretch_quantile : float, optional\n Stretch the image intensity to saturate the top and bottom\n quantiles given.\n mask : array of bool, same shape as im, optional\n Only stretch the image intensity where `mask` is ``True``.\n\n Returns\n -------\n imc : np.ndarray of float, same shape as `im`\n The corrected image.\n\n See Also\n --------\n `correct_multiimage_illumination`\n \"\"\"\n if im.dtype != np.float:\n imc = skimage.img_as_float(im)\n else:\n imc = im.copy()\n imc /= illum\n lim = stretch_quantile\n imc = stretchlim(imc, lim, 1-lim, mask)\n return imc\n\n\[email protected]\ndef montage(ims, order=None):\n \"\"\"Stitch together a list of images according to a specified pattern.\n\n The order pattern should be an array of integers where each element\n corresponds to the index of the image in the fns list.\n\n eg if order = [[20, 21, 22, 23, 24],\n [19, 6, 7, 8, 9],\n [18, 5, 0, 1, 10],\n [17, 4, 3, 2, 11],\n [16, 15, 14, 13, 12]]\n\n This order will stitch together 25 images in a clockwise spiral pattern.\n\n Parameters\n ----------\n ims : iterable of array, shape (M, N[, 3])\n The list of the image files to be stitched together. If None,\n this parameter defaults to the order given above.\n order : array-like of int, shape (P, Q)\n The order of the stitching, with each entry referring\n to the index of file in the fns array.\n\n Returns\n -------\n montaged : array, shape (M * P, N * Q[, 3])\n The stitched image.\n\n Examples\n --------\n >>> ims = [np.zeros((2, 2), dtype=np.uint8),\n ... 2 * np.ones((2, 2), dtype=np.uint8)]\n >>> order = [1, 0]\n >>> montage(ims, order)\n array([[2, 2, 0, 0],\n [2, 2, 0, 0]], dtype=uint8)\n \"\"\"\n if order is None:\n from .screens import cellomics\n order = cellomics.SPIRAL_CLOCKWISE_RIGHT_25\n order = np.atleast_2d(order)\n\n # in case stream is passed, take one sip at a time ;)\n ims = list(tz.take(order.size, ims))\n rows, cols = ims[0].shape[:2]\n mrows, mcols = order.shape\n\n montaged = np.zeros((rows * mrows, cols * mcols) + ims[0].shape[2:],\n dtype=ims[0].dtype)\n for i in range(mrows):\n for j in range(mcols):\n montaged[rows*i:rows*(i+1), cols*j:cols*(j+1)] = ims[order[i, j]]\n return montaged\n\n\ndef find_missing_fields(fns, order=None,\n re_string=r\".*_[A-P]\\d{2}f(\\d{2})d0\",\n re_group=1):\n \"\"\"Find which fields are missing from a list of files belonging to a well.\n\n Given a list of image files, a stitch order, and a regex pattern\n determining which part of the filename denotes the field, find out\n which fields are missing.\n\n Parameters\n ----------\n fns : list of str\n order : array-like of int, shape (M, N), optional\n The order of the stitching, with each entry referring\n to the index of file in the fns array.\n re_string : str, optional\n The regex pattern used to show where in the file the field is.\n Default follows the Cellomics pattern eg.\n MFGTMP_150406100001_A01f00d0.TIF where the field is the number\n after \"f\".\n re_group : int, optional\n The regex group the field value belongs to. Default 1.\n\n Returns\n -------\n missing : array of int\n A possibly empty array containing the indices of missing fields.\n \"\"\"\n if order is None:\n from .screens import cellomics\n order = cellomics.SPIRAL_CLOCKWISE_RIGHT_25\n\n # get fields present in list\n pattern = re.compile(re_string)\n fields = [int(re.match(pattern, fn).group(re_group)) for fn in fns]\n\n # determine which fields are missing\n missing = np.setdiff1d(order, fields)\n return missing\n\n\ndef create_missing_mask(missing, order, rows=512, cols=512):\n \"\"\"Create a binary mask for stitched images where fields are missing.\n\n Given a list of missing fields, a stitch order, and the size of\n the input images, create a binary mask with False values where\n fields are missing. This is used to prevent missing fields from\n upsetting feature computation on images where a field is missing.\n\n Parameters\n ----------\n missing : list of int, or empty list\n The fields that are missing.\n order : array-like of int, shape (M, N), optional\n The order of the stitching, with each entry referring\n to the index of file in the fns array.\n rows : int, optional\n The number of rows in the input images. Default 512.\n cols : int, optional\n The number of cols in the input images. Default 512.\n\n Returns\n -------\n mask : array of bool, shape (P, Q)\n A binary mask where False denotes a missing field.\n \"\"\"\n if order is None:\n order = cellomics.SPIRAL_CLOCKWISE_RIGHT_25\n order = np.atleast_2d(order)\n mrows, mcols = order.shape\n\n mask = np.ones((rows * mrows, cols * mcols),\n dtype=bool)\n\n for i in range(mrows):\n for j in range(mcols):\n if order[i, j] in missing:\n mask[rows*i:rows*(i+1), cols*j:cols*(j+1)] = False\n\n return mask\n\n\ndef montage_with_missing(fns, *, order, re_string, re_group):\n \"\"\"Montage a list of images, replacing missing fields with dummy values.\n\n The methods `montage` and `montage_stream` assume that image filenames\n and image iterators passed to it are complete, and include the full set\n images belonging to the well. Some screens have missing fields,\n so this function can be used to montage together images with missing\n fields. Missing fields are replaced with 0 values.\n\n Missing fields are determined from the information in the image\n file name. See 'find_missing_fields'\n\n Parameters\n ----------\n fns : list of str\n The list of filenames to montage.\n order : array-like of int, shape (M, N), optional\n The order of the stitching, with each entry referring\n to the index of file in the fns array.\n re_string : str\n The regular expression pattern to match to the filenames.\n re_group : str\n The group to find the field value in a regexp match.\n\n Returns\n -------\n montaged : array-like, shape (P, Q)\n The montaged image.\n mask : array of bool, shape (P, Q)\n A binary mask, where entries with taking the value of\n False represent missing fields in the montaged image.\n missing : int\n The number of fields that were found to be missing in the\n input list of filenames. This is useful for normalising\n features that depend on the entirety of the montaged image\n (e.g. count of objects).\n \"\"\"\n order = np.atleast_2d(order)\n mrows, mcols = order.shape\n\n if len(fns) == order.size:\n montaged = montage([io.imread(fn) for fn in fns], order)\n return montaged, np.ones_like(montaged, dtype=bool), 0\n\n # get width & height of first image. the rest of the images\n # are assumed to be of the same shape\n im0 = io.imread(fns[0])\n rows, cols = im0.shape[:2]\n\n # find which fields are missing\n missing = find_missing_fields(fns, order, re_string, re_group)\n\n # insert None value to list of files when fields missing\n _fns = fns[:] # create copy of list to avoid referencing problems\n for i in missing:\n _fns.insert(i, None)\n\n # create binary mask for the missing fields\n mask = create_missing_mask(missing, order, rows, cols)\n\n # instantiate array for output montaged image\n montaged = np.zeros((rows * mrows, cols * mcols) + im0.shape[2:],\n dtype=im0.dtype)\n\n for i, j in it.product(range(mrows), range(mcols)):\n index = order[i, j]\n\n if _fns[index] is not None:\n im = io.imread(_fns[index])\n montaged[rows*i:rows*(i+1), cols*j:cols*(j+1)] = im\n\n return montaged, mask, len(missing)\n\n\[email protected]\ndef reorder(index_list, list_to_reorder):\n \"\"\"Curried function to reorder a list according to input indices.\n\n Parameters\n ----------\n index_list : list of int\n The list of indices indicating where to put each element in the\n input list.\n list_to_reorder : list\n The list being reordered.\n\n Returns\n -------\n reordered_list : list\n The reordered list.\n\n Examples\n --------\n >>> list1 = ['foo', 'bar', 'baz']\n >>> reorder([2, 0, 1], list1)\n ['baz', 'foo', 'bar']\n \"\"\"\n return [list_to_reorder[j] for j in index_list]\n\n\[email protected]\ndef stack_channels(images, order=[0, 1, 2]):\n \"\"\"Stack multiple image files to one single, multi-channel image.\n\n Parameters\n ----------\n images : list of array, shape (M, N)\n The images to be concatenated. List should contain\n three images. Entries 'None' are considered to be dummy\n channels\n channel_order : list of int, optional\n The order the channels should be in in the final image.\n\n Returns\n -------\n stack_image : array, shape (M, N, 3)\n The concatenated, three channel image.\n\n Examples\n --------\n >>> image1 = np.ones((2, 2), dtype=int) * 1\n >>> image2 = np.ones((2, 2), dtype=int) * 2\n >>> joined = stack_channels((None, image1, image2))\n >>> joined.shape\n (2, 2, 3)\n >>> joined[0, 0]\n array([0, 1, 2])\n >>> joined = stack_channels((image1, image2), order=[None, 0, 1])\n >>> joined.shape\n (2, 2, 3)\n >>> joined[0, 0]\n array([0, 1, 2])\n \"\"\"\n # ensure we support iterators\n images = list(tz.take(len([pos for pos in order if pos is not None]),\n images))\n\n # ensure we grab an image and not `None`\n def is_array(obj): return isinstance(obj, np.ndarray)\n image_prototype = next(filter(is_array, images))\n\n # A `None` in `order` implies no image at that position\n ordered_ims = [images[i] if i is not None else None for i in order]\n ordered_ims = [np.zeros_like(image_prototype) if image is None else image\n for image in ordered_ims]\n\n # stack images with np.dstack, but if only a single channel is passed,\n # don't add an extra dimension\n stack_image = np.squeeze(np.dstack(ordered_ims))\n while ordered_ims:\n del ordered_ims[-1]\n return stack_image\n\n\ndef montage_stream(ims, montage_order=None, channel_order=[0, 1, 2],\n clear_none=True):\n \"\"\"From a sequence of single-channel field images, montage multichannels.\n\n Suppose the input is a list:\n\n ```\n ims = [green1a, blue1a, red1a, green1b, blue1b, red1b,\n green2a, blue2a, red2a, green2b, blue2b, red2b]\n ```\n\n with channel order ``[2, 0, 1]`` and montage order ``[1, 0]``, then\n the output will be:\n\n ```\n [rgb1_ba, rgb2_ba]\n ```\n\n Parameters\n ----------\n ims : iterator of array, shape (M, N)\n A list of images in which consecutive images represent single\n channels of the same image. (See example.)\n montage_order : array-like of int, optional\n The order of the montage images (in 1D or 2D).\n channel_order : list of int, optional\n The order in which the channels appear.\n\n Returns\n -------\n montaged_stream : iterator of arrays\n An iterator of the images composed into multi-channel montages.\n\n Examples\n --------\n >>> images = (i * np.ones((4, 5), dtype=np.uint8) for i in range(24))\n >>> montaged = list(montage_stream(images, [[0, 1], [2, 3]], [2, 0, 1]))\n >>> len(montaged)\n 2\n >>> montaged[0].shape\n (8, 10, 3)\n >>> montaged[0][0, 0, :]\n array([2, 0, 1], dtype=uint8)\n >>> montaged[0][4, 5, :]\n array([11, 9, 10], dtype=uint8)\n >>> montaged[1][4, 5, :]\n array([23, 21, 22], dtype=uint8)\n \"\"\"\n if montage_order is None:\n montage_order = cellomics.SPIRAL_CLOCKWISE_RIGHT_25\n montage_order = np.array(montage_order)\n ntiles = montage_order.size\n if clear_none:\n nchannels = len([i for i in channel_order if i is not None])\n else:\n nchannels = len(channel_order)\n return tz.pipe(ims, c.partition(nchannels),\n c.map(stack_channels(order=channel_order)),\n c.partition(ntiles),\n c.map(montage(order=montage_order)))\n" ]
[ [ "numpy.ones_like", "numpy.clip", "scipy.ndimage.grey_closing", "scipy.ndimage.grey_dilation", "numpy.issubdtype", "numpy.setdiff1d", "numpy.ones", "numpy.round", "numpy.atleast_2d", "scipy.ndimage.grey_opening", "numpy.percentile", "numpy.dstack", "numpy.zeros_like", "scipy.ndimage.grey_erosion", "numpy.array", "numpy.zeros", "numpy.empty", "scipy.stats.mstats.mquantiles" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
HLabProjects/RegenBoneAnalysis
[ "610496c2e8d67472ec02512bad7620b53f02580a" ]
[ "mdbands.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 27 23:21:22 2020\r\n@author: KFH\r\nscript to import greyscale images from CT scans of mouse bones and calculate\r\nbone mineral density in 3d space for a given pixel/voxel/increment size\r\n\"\"\"\r\n\r\nfrom scipy.stats import norm\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nimport math\r\nimport time\r\nimport datetime\r\nimport glob\r\nfrom tqdm import tqdm\r\nfrom shapely.geometry import Point\r\n\r\n\r\npath=\"your_ct_img_path\" #this is the path to your microCT image files\r\nimgdir=\"your_ct_img_folder\"\r\noutpath=\"your_file_outpath\" #this is the path to where your output files go\r\n\r\n\r\nbmpmatches=glob.glob(path+'/*[0-9].tif') #grabs list of all the ctimages in the folder\r\n#bmpmatches=glob.glob(path+'/*[0-9].bmp')\r\nnumbmp=len(bmpmatches)\r\nsample_img=bmpmatches[numbmp//2]\r\n\r\nchk=os.path.exists(sample_img) \r\nif chk==True: #if sample image exists then proceed to import images in folder\r\n print('confirmed')\r\n img = cv2.imread(sample_img,0)\r\n\r\nif chk==False:\r\n print('no file found')\r\n\r\nhh, ww = img.shape[:2]\r\nprint('img directory is: ',imgdir)\r\n\r\n\"\"\"\r\nVoxel Calibration\r\n\"\"\"\r\npxc=3.9 #from microCT scanner datafile, this will change with different machines\r\nh2=hh*3.9\r\nw2=ww*3.9\r\n\r\n\"\"\"\r\n-Processing to account for microCT stack dimensions - make largest x-y dimension into h\r\n-Setting size of computable roi to iterate through image stacks\r\n -initilize arrays for data in x,y, and z\r\n\"\"\"\r\nif hh > ww:\r\n h=hh\r\nelif ww > hh:\r\n h=ww\r\n \r\nresol=3 #reconstruction resolution: number of voxel lengths per reconstruction length\r\ndiv=numbmp/resol\r\nimgdir=imgdir+str(resol)\r\n\r\nroii=resol\r\nzsized=numbmp/roii\r\nxsized=hh/roii\r\nysized=ww/roii\r\nif zsized > xsized and zsized > ysized:\r\n fdiv=int(zsized)\r\nelif xsized > zsized and xsized > ysized:\r\n fdiv=int(xsized)\r\nelif ysized > xsized and ysized > zsized:\r\n fdiv=int(ysized)\r\n\r\n\r\n\"\"\"create the roii maxtrix \r\n-pad out numbers or cut at edges to get to the roii x div\r\n-proceding in left to right from top of image\r\n-img goes by (height, width)\r\n-apply thresholding for non mineralized (bone or cartileged or question)\r\n\"\"\"\r\nD=np.zeros([fdiv,fdiv,fdiv])\r\nD1=np.zeros([fdiv,fdiv,fdiv])\r\nD2=np.zeros([fdiv,fdiv,fdiv])\r\nD3=np.zeros([fdiv,fdiv,fdiv])\r\nD4=np.zeros([fdiv,fdiv,fdiv])\r\nG=[]\r\nG1=[]\r\nG2=[]\r\nG3=[]\r\nG4=[]\r\nctr=0\r\nhhindex=math.floor(hh/roii)\r\nwwindex=math.floor(ww/roii)\r\n\r\nfor k in tqdm(range(0,round(div)-1),position=0,leave=True):\r\n image_current=bmpmatches[k*resol]\r\n img = cv2.imread(image_current,0)\r\n for j in range(0,wwindex):\r\n for i in range(0,hhindex): #this is hh\r\n roi=img[i*roii:(i+1)*roii, j*roii:(j+1)*roii]\r\n #print(i,j)\r\n hold=np.average(roi, axis=1)\r\n value=np.average(hold)\r\n thresh=55 #This is about 0.7 g/cm3 for value of 55\r\n if value > thresh:\r\n band1=[thresh,90] \r\n band2=[91,112] \r\n band3=[113,134] \r\n band4=[135,255] \r\n D[k,i,j]=value\r\n G.append([i,j,k,value])\r\n if value <= band1[1]:\r\n D1[k,i,j]=value\r\n G1.append([i,j,k,value])\r\n if value >= band2[0] and value <= band2[1]:\r\n D2[k,i,j]=value\r\n G2.append([i,j,k,value])\r\n if value >= band3[0] and value <= band3[1]:\r\n D3[k,i,j]=value\r\n G3.append([i,j,k,value])\r\n if value >= band4[0]: \r\n D4[k,i,j]=value\r\n G4.append([i,j,k,value])\r\n \r\n\r\n\"\"\"\r\nReporting\r\n\"\"\"\r\n\r\nprint('img height(px): ',hh,' img width: ',ww)\r\nprint('img height(mc): ',h2,' img width: ',w2)\r\n\r\n#Here we turn the list of certified bone points (G) into an array\r\nGAR=np.array(G)\r\nX=GAR[:,0]\r\nY=GAR[:,1]\r\nZ=GAR[:,2]\r\nVV=(GAR[:,3]-57.7)*.0177 + 0.75 #calibration for greyscale to density values\r\nVold=GAR[:,3]\r\n\r\n#to check for no larger mineral values\r\nif len(G3) == 0:\r\n G3.append([0,0,0,0])\r\nif len(G4) == 0:\r\n G4.append([0,0,0,0])\r\n\r\nGA1=np.array(G1)\r\nGA2=np.array(G2)\r\nGA3=np.array(G3)\r\nGA4=np.array(G4)\r\n\r\nVV1=(GA1[:,3]-57.7)*.0177 + 0.75\r\nVV2=(GA2[:,3]-57.7)*.0177 + 0.75\r\nVV3=(GA3[:,3]-57.7)*.0177 + 0.75\r\nVV4=(GA4[:,3]-57.7)*.0177 + 0.75\r\n\r\nb1c=[np.min(VV1),np.max(VV1)] \r\nb2c=[np.min(VV2),np.max(VV2)] \r\nb3c=[np.min(VV3),np.max(VV3)] \r\nb4c=[np.min(VV4),np.max(VV4)] \r\n\r\n\r\n\r\n#we modify the D array\r\n#remember, for D[k,i,j]=value format\r\nD[:,0]=D[:,0]*resol\r\nD[:,1]=D[:,1]*roii\r\nD[:,2]=D[:,2]*roii\r\n\r\n#some stats work for histogram work\r\n(mu, sigma)=norm.fit(VV)\r\n\r\nsss=time.time()\r\ndef centroid_points(arr):\r\n length = arr.shape[0]\r\n sum_x = np.sum(arr[:, 0])\r\n sum_y = np.sum(arr[:, 1])\r\n sum_z = np.sum(arr[:, 2])\r\n return sum_x/length, sum_y/length, sum_z/length\r\n\r\ncent1=centroid_points(GAR)\r\n\r\n\r\n#now to calculate distance of each point to centroid.\r\ndef getcendist(GARX):\r\n dcen=np.zeros([len(GARX)])\r\n for i in tqdm(range(0,len(GARX)),position=0, leave=True):\r\n pointi=Point(GARX[i,0]*roii,GARX[i,1]*roii,GARX[i,2]*resol)\r\n poi_dist=pointi.distance(Point(cent1))\r\n dcen[i]=poi_dist\r\n return dcen\r\n \r\nGAR1=np.array(G1)\r\nGAR2=np.array(G2)\r\nGAR3=np.array(G3)\r\nGAR4=np.array(G4)\r\n\r\ndcen_total=getcendist(GAR)\r\ndcen1=getcendist(GAR1)\r\ndcen2=getcendist(GAR2)\r\ndcen3=getcendist(GAR3)\r\ndcen4=getcendist(GAR4)\r\n\r\n#to scale all the GAR correctly...\r\n\r\n\r\n#print(\"centroid distance time -- %s seconds ---\" % (time.time() - c1time))\r\n\r\n#print(\"Starting reporting processes\")\r\n#reporting parameters and writing to a .txt file\r\nf= open(outpath+imgdir+\".txt\",\"w+\")\r\nf.write('This is the report file for processing of bone microCT images\\n')\r\nf.write('Using HLab Projects code, '+ str(datetime.datetime.now()))\r\nf.write('\\n')\r\nf.write('files from: '+imgdir+'\\n')\r\nf.write('\\n')\r\nf.write('The folowing are parameters used in visualization and calculations \\n')\r\nf.write('img height(px): '+str(hh)+' img width: '+str(ww))\r\nf.write('\\n')\r\nf.write('img height(mc): '+str(h2)+' img width: '+str(w2))\r\nf.write('\\n')\r\nf.write('Length per pixel/resolution (mc): '+str(pxc))\r\nf.write('\\n')\r\nf.write('Resolution factor is: '+str(resol))\r\nf.write('\\n')\r\nf.write('size of 3d scatter array: '+str(fdiv))\r\nf.write('\\n')\r\nf.write('Band 1 is between '+str(b1c[0])+' g/cm^3 and '+str(b1c[1])+' g/cm^3 \\n')\r\nf.write('Band 2 is between '+str(round(b2c[0],2))+' g/cm^3 and '+str(round(b2c[1],2))+' g/cm^3 \\n')\r\nf.write('Band 3 is between '+str(b3c[0])+' g/cm^3 and '+str(b3c[1])+' g/cm^3 \\n')\r\nf.write('Band 4 is between '+str(b4c[0])+' g/cm^3 and '+str(b4c[1])+' g/cm^3 \\n')\r\nf.close()\r\n\r\n#section on creating values to save for later calculation, using folder names\r\nnp.save(outpath+imgdir+\"fdiv\",fdiv)\r\nnp.save(outpath+imgdir+\"X\",X)\r\nnp.save(outpath+imgdir+\"Y\",Y)\r\nnp.save(outpath+imgdir+\"Z\",Z)\r\nnp.save(outpath+imgdir+\"D\",D)\r\nnp.save(outpath+imgdir+\"GAR\",GAR)\r\nnp.save(outpath+imgdir+\"GAR1\",GAR1)\r\nnp.save(outpath+imgdir+\"GAR2\",GAR2)\r\nnp.save(outpath+imgdir+\"GAR3\",GAR3)\r\nnp.save(outpath+imgdir+\"GAR4\",GAR4)\r\ncent11=np.array(cent1)\r\nnp.save(outpath+imgdir+\"cent11\",cent11)\r\nnp.save(outpath+imgdir+\"dcen_total\",dcen_total)\r\nnp.save(outpath+imgdir+\"dcen1\",dcen1)\r\nnp.save(outpath+imgdir+\"dcen2\",dcen2)\r\nnp.save(outpath+imgdir+\"dcen3\",dcen3)\r\nnp.save(outpath+imgdir+\"dcen4\",dcen4)\r\n\r\n\r\nprint(\"calculations complete\")\r\n" ]
[ [ "numpy.min", "scipy.stats.norm.fit", "numpy.save", "numpy.max", "numpy.average", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kweisamx/ESPCN-
[ "1ed590ad1b57d42331be20e5a91e0fdb968a0952" ]
[ "utils.py" ]
[ "import cv2\nimport numpy as np\nimport tensorflow as tf\nimport os \nimport glob\nimport h5py\n\n\n\n# Get the Image\ndef imread(path):\n img = cv2.imread(path)\n return img\n\ndef imsave(image, path, config):\n #checkimage(image)\n # Check the check dir, if not, create one\n if not os.path.isdir(os.path.join(os.getcwd(),config.result_dir)):\n os.makedirs(os.path.join(os.getcwd(),config.result_dir))\n\n # NOTE: because normial, we need mutlify 255 back \n cv2.imwrite(os.path.join(os.getcwd(),path),image * 255.)\n\ndef checkimage(image):\n cv2.imshow(\"test\",image)\n cv2.waitKey(0)\n\ndef modcrop(img, scale =3):\n \"\"\"\n To scale down and up the original image, first thing to do is to have no remainder while scaling operation.\n \"\"\"\n # Check the image is grayscale\n if len(img.shape) ==3:\n h, w, _ = img.shape\n h = int(h / scale) * scale\n w = int(w / scale) * scale\n img = img[0:h, 0:w, :]\n else:\n h, w = img.shape\n h = int(h / scale) * scale\n w = int(w / scale) * scale\n img = img[0:h, 0:w]\n return img\n\ndef checkpoint_dir(config):\n if config.is_train:\n return os.path.join('./{}'.format(config.checkpoint_dir), \"train.h5\")\n else:\n return os.path.join('./{}'.format(config.checkpoint_dir), \"test.h5\")\n\ndef preprocess(path ,scale = 3):\n \"\"\"\n Args:\n path: the image directory path\n scale: the image need to scale \n \"\"\"\n img = imread(path)\n\n label_ = modcrop(img, scale)\n \n input_ = cv2.resize(label_,None,fx = 1.0/scale ,fy = 1.0/scale, interpolation = cv2.INTER_CUBIC) # Resize by scaling factor\n\n kernel_size = (7, 7);\n sigma = 3.0;\n #input_ = cv2.GaussianBlur(input_, kernel_size, sigma);\n #checkimage(input_)\n\n return input_, label_\n\ndef prepare_data(dataset=\"Train\",Input_img=\"\"):\n \"\"\"\n Args:\n dataset: choose train dataset or test dataset\n For train dataset, output data would be ['.../t1.bmp', '.../t2.bmp',..., 't99.bmp']\n \"\"\"\n if dataset == \"Train\":\n data_dir = os.path.join(os.getcwd(), dataset) # Join the Train dir to current directory\n data = glob.glob(os.path.join(data_dir, \"*.bmp\")) # make set of all dataset file path\n else:\n if Input_img !=\"\":\n data = [os.path.join(os.getcwd(),Input_img)]\n else:\n data_dir = os.path.join(os.path.join(os.getcwd(), dataset), \"Set5\")\n data = glob.glob(os.path.join(data_dir, \"*.bmp\")) # make set of all dataset file path\n print(data)\n return data\n\ndef load_data(is_train, test_img):\n if is_train:\n data = prepare_data(dataset=\"Train\")\n else:\n if test_img != \"\":\n return prepare_data(dataset=\"Test\",Input_img=test_img)\n data = prepare_data(dataset=\"Test\")\n return data\n\ndef make_sub_data(data, config):\n \"\"\"\n Make the sub_data set\n Args:\n data : the set of all file path \n config : the all flags\n \"\"\"\n sub_input_sequence = []\n sub_label_sequence = []\n for i in range(len(data)):\n input_, label_, = preprocess(data[i], config.scale) # do bicbuic\n if len(input_.shape) == 3: # is color\n h, w, c = input_.shape\n else:\n h, w = input_.shape # is grayscale\n \n if not config.is_train:\n input_ = imread(data[i])\n input_ = input_ / 255.0\n sub_input_sequence.append(input_)\n return sub_input_sequence, sub_label_sequence\n\n # NOTE: make subimage of LR and HR\n\n # Input \n for x in range(0, h - config.image_size + 1, config.stride):\n for y in range(0, w - config.image_size + 1, config.stride):\n\n sub_input = input_[x: x + config.image_size, y: y + config.image_size] # 17 * 17\n\n\n # Reshape the subinput and sublabel\n sub_input = sub_input.reshape([config.image_size, config.image_size, config.c_dim])\n\n # Normialize\n sub_input = sub_input / 255.0\n\n # Add to sequence\n sub_input_sequence.append(sub_input)\n\n # Label (the time of scale)\n for x in range(0, h * config.scale - config.image_size * config.scale + 1, config.stride * config.scale):\n for y in range(0, w * config.scale - config.image_size * config.scale + 1, config.stride * config.scale):\n sub_label = label_[x: x + config.image_size * config.scale, y: y + config.image_size * config.scale] # 17r * 17r\n \n # Reshape the subinput and sublabel\n sub_label = sub_label.reshape([config.image_size * config.scale , config.image_size * config.scale, config.c_dim])\n # Normialize\n sub_label = sub_label / 255.0\n # Add to sequence\n sub_label_sequence.append(sub_label)\n\n return sub_input_sequence, sub_label_sequence\n\n\ndef read_data(path):\n \"\"\"\n Read h5 format data file\n\n Args:\n path: file path of desired file\n data: '.h5' file format that contains input values\n label: '.h5' file format that contains label values \n \"\"\"\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_\n\ndef make_data_hf(input_, label_, config):\n \"\"\"\n Make input data as h5 file format\n Depending on \"is_train\" (flag value), savepath would be change.\n \"\"\"\n # Check the check dir, if not, create one\n if not os.path.isdir(os.path.join(os.getcwd(),config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(),config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir + '/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir + '/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('input', data=input_)\n hf.create_dataset('label', data=label_)\n\ndef input_setup(config):\n \"\"\"\n Read image files and make their sub-images and saved them as a h5 file format\n \"\"\"\n\n # Load data path, if is_train False, get test data\n data = load_data(config.is_train, config.test_img)\n\n\n # Make sub_input and sub_label, if is_train false more return nx, ny\n sub_input_sequence, sub_label_sequence = make_sub_data(data, config)\n\n\n # Make list to numpy array. With this transform\n arrinput = np.asarray(sub_input_sequence) # [?, 17, 17, 3]\n arrlabel = np.asarray(sub_label_sequence) # [?, 17 * scale , 17 * scale, 3]\n \n print(arrinput.shape)\n make_data_hf(arrinput, arrlabel, config)\n\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lizhuoling/RSN
[ "d649d017ef4d73b5800225e56ee2f8879d0b870b" ]
[ "exps/Res18.coco/train.py" ]
[ "\"\"\"\n@author: Yuanhao Cai\n@date: 2020.03\n\"\"\"\n\nimport argparse\nimport time\n\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom cvpack.torch_modeling.engine.engine import Engine\nfrom cvpack.utils.pyt_utils import ensure_dir\n\nfrom config import cfg\nfrom network import RSN \nfrom lib.utils.dataloader import get_train_loader\nfrom lib.utils.solver import make_lr_scheduler, make_optimizer\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n with Engine(cfg, custom_parser=parser) as engine:\n logger = engine.setup_log(\n name='train', log_dir=cfg.OUTPUT_DIR, file_name='log.txt')\n args = parser.parse_args()\n ensure_dir(cfg.OUTPUT_DIR)\n\n model = RSN(cfg, run_efficient=cfg.RUN_EFFICIENT)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n num_gpu = len(engine.devices) \n # default num_gpu: 8, adjust iter settings\n cfg.SOLVER.CHECKPOINT_PERIOD = \\\n int(cfg.SOLVER.CHECKPOINT_PERIOD * 8 / num_gpu)\n cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * 8 / num_gpu)\n optimizer = make_optimizer(cfg, model, num_gpu)\n scheduler = make_lr_scheduler(cfg, optimizer)\n\n engine.register_state(\n scheduler=scheduler, model=model, optimizer=optimizer)\n\n if engine.distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank],\n broadcast_buffers=False, )\n\n if engine.continue_state_object:\n engine.restore_checkpoint(is_restore=False)\n else:\n if cfg.MODEL.WEIGHT:\n engine.load_checkpoint(cfg.MODEL.WEIGHT, is_restore=False)\n\n data_loader = get_train_loader(cfg, num_gpu=num_gpu, is_dist=engine.distributed)\n\n # ------------ do training ---------------------------- #\n logger.info(\"\\n\\nStart training with pytorch version {}\".format(\n torch.__version__))\n\n max_iter = len(data_loader)\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n tb_writer = SummaryWriter(cfg.TENSORBOARD_DIR)\n\n model.train()\n\n time1 = time.time()\n for iteration, (images, valids, labels) in enumerate(\n data_loader, engine.state.iteration):\n iteration = iteration + 1\n images = images.to(device)\n valids = valids.to(device)\n labels = labels.to(device)\n\n scheduler.step()\n loss_dict = model(images, valids, labels)\n losses = sum(loss for loss in loss_dict.values())\n\n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n\n if cfg.RUN_EFFICIENT:\n del images, valids, labels, losses\n\n if engine.local_rank == 0:\n if iteration % 20 == 0 or iteration == max_iter:\n log_str = 'Iter:%d, LR:%.1e, ' % (\n iteration, optimizer.param_groups[0][\"lr\"] / num_gpu)\n for key in loss_dict:\n tb_writer.add_scalar(\n key, loss_dict[key].mean(), global_step=iteration)\n log_str += key + ': %.3f, ' % float(loss_dict[key])\n\n time2 = time.time()\n elapsed_time = time2 - time1\n time1 = time2\n required_time = elapsed_time / 20 * (max_iter - iteration)\n hours = required_time // 3600\n mins = required_time % 3600 // 60\n log_str += 'To Finish: %dh%dmin,' % (hours, mins) \n\n logger.info(log_str)\n\n if iteration % checkpoint_period == 0 or iteration == max_iter:\n engine.update_iteration(iteration)\n if engine.distributed and (engine.local_rank == 0):\n engine.save_and_link_checkpoint(cfg.OUTPUT_DIR)\n elif not engine.distributed:\n engine.save_and_link_checkpoint(cfg.OUTPUT_DIR)\n\n if iteration >= max_iter:\n logger.info('Finish training process!')\n break\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.device", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Shane-Neeley/DrugMarket
[ "e3859bbb7e906f9d4a2d355bdf6d03a4e067de6b" ]
[ "old/hyperparameter_optimization_tf.py" ]
[ "# For the class Data Science: Practical Deep Learning Concepts in Theano and TensorFlow\n# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow\n# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow\nfrom __future__ import print_function, division\nfrom builtins import range\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom process_data import get_data\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.INFO)\n\ndef random_search():\n\n X, Y, data = get_data()\n X, Y = shuffle(X, Y)\n Ntrain = int(0.75 * len(X))\n Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]\n Xtest, Ytest = X[Ntrain:], Y[Ntrain:]\n\n # Make copies of the small data (because variance matters?)\n # Xtrain = np.concatenate((Xtrain,Xtrain,Xtrain), 0)\n # Ytrain = np.concatenate((Ytrain,Ytrain,Ytrain), 0)\n\n N = Xtrain.shape[0]\n D = Xtrain.shape[1]\n K = len(set(Ytrain)) # classes, only 0,1 here so len=2\n\n Ntest = Xtrain.shape[0]\n Dtest = Xtrain.shape[1]\n\n print('size Xtrain: ' + str(Xtrain.shape))\n print('size Ytrain: ' + str(Ytrain.shape))\n print('size Xtest: ' + str(Xtest.shape))\n print('size Ytest: ' + str(Ytest.shape))\n\n # turn Y into an indicator matrix for training\n T = np.zeros((N, K))\n for i in range(N):\n T[i, Ytrain[i]] = 1\n\n Ttest = np.zeros((Ntest, K))\n for i in range(Dtest):\n Ttest[i, Ytest[i]] = 1\n\n\n # tensor flow variables are not the same as regular Python variables\n def init_weights(shape):\n return tf.Variable(tf.random_normal(shape, stddev=0.01))\n\n def forward(X, W1, b1, W2, b2):\n Z = tf.nn.sigmoid(tf.matmul(X, W1) + b1)\n return tf.matmul(Z, W2) + b2\n\n # starting hyperparameters\n M = 100 # hidden units\n nHidden = 1 # hidden layers #TODO: how to add more layers in TF?\n log_lr = -4 # learning rate\n log_l2 = -2 # l2 regularization #TODO: how to add regularization in TF?\n max_tries = 30\n\n # loop through all possible hyperparameter settings\n best_validation_rate = 0\n # best_nHidden = None\n best_M = None\n best_lr = None\n best_l2 = None\n validation_accuracies = []\n\n for _ in range(max_tries):\n print('on try: ' + str(_+1) + '/' + str(max_tries))\n\n tfX = tf.placeholder(tf.float32, [None, D])\n tfY = tf.placeholder(tf.float32, [None, K])\n tfXtest = tf.placeholder(tf.float32, [None, D])\n tfYtest = tf.placeholder(tf.float32, [None, K])\n\n W1 = init_weights([D, M]) # create symbolic variables\n b1 = init_weights([M])\n W2 = init_weights([M, K])\n b2 = init_weights([K])\n\n logits = forward(tfX, W1, b1, W2, b2)\n\n cost = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=tfY,\n logits=logits\n )\n )\n\n # Normal gradient descent, # input parameter is the learning rate\n # train_op = tf.train.GradientDescentOptimizer(10**log_lr).minimize(cost) # construct an optimizer\n\n # Optimized gradient descent\n train_op = tf.train.RMSPropOptimizer(10**log_lr, decay=0.99, momentum=0.9).minimize(cost)\n\n # Prediction operation, input parameter is the axis on which to choose the max\n predict_op = tf.argmax(logits, 1)\n\n # just stuff that has to be done\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n\n epochs = 3000\n for i in range(epochs):\n sess.run(train_op, feed_dict={tfX: Xtrain, tfY: T})\n pred = sess.run(predict_op, feed_dict={tfX: Xtrain, tfY: T})\n test = sess.run(predict_op, feed_dict={tfX: Xtest, tfY: Ttest})\n\n train_accuracy = np.mean(Ytrain == pred)\n validation_accuracy = np.mean(Ytest == test)\n print(\n \"validation_accuracy: %.3f, train_accuracy: %.3f, settings: %s (layers), %s (log_lr), %s (log_l2)\" %\n (validation_accuracy, train_accuracy,\n [M] * nHidden, log_lr, log_l2)\n )\n\n # keep the best parameters, then make modifications to them\n if validation_accuracy > best_validation_rate:\n best_validation_rate = validation_accuracy\n best_M = M\n # best_nHidden = nHidden\n best_lr = log_lr\n best_l2 = log_l2\n\n # select new hyperparams\n # nHidden = best_nHidden + np.random.randint(-1, 2) # -1, 0, or 1, add, remove or keep same the layers\n # nHidden = max(1, nHidden)\n M = best_M + np.random.randint(-1, 2) * 50\n M = max(10, M)\n log_lr = best_lr + np.random.randint(-1, 2)\n log_l2 = best_l2 + np.random.randint(-1, 2)\n\n\n # TODO: save these in mongodb, then read them and see if we beat it, in a new file run forward on best params\n print(\"Best validation_accuracy:\", best_validation_rate)\n # print(\"Mean validation_accuracy:\", np.mean(validation_accuracies))\n print(\"Best settings:\")\n print(\"Best M (hidden units):\", best_M)\n # print(\"Best nHidden (hidden layers):\", best_nHidden)\n print(\"Best learning_rate:\", best_lr)\n print(\"Best l2 regularization:\", best_l2)\n\n\nif __name__ == '__main__':\n random_search()\n" ]
[ [ "tensorflow.matmul", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.train.RMSPropOptimizer", "sklearn.utils.shuffle", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.mean", "tensorflow.logging.set_verbosity", "tensorflow.Session", "numpy.random.randint", "tensorflow.argmax", "numpy.zeros", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
neurospin-projects/2022_jchavas_cingulate_inhibitory_control
[ "30e63f0af62fa83abd3858720ce3f3a15a3fbaea" ]
[ "contrastive/utils/plots/visu_utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This software and supporting documentation are distributed by\n# Institut Federatif de Recherche 49\n# CEA/NeuroSpin, Batiment 145,\n# 91191 Gif-sur-Yvette cedex\n# France\n#\n# This software is governed by the CeCILL license version 2 under\n# French law and abiding by the rules of distribution of free software.\n# You can use, modify and/or redistribute the software under the\n# terms of the CeCILL license version 2 as circulated by CEA, CNRS\n# and INRIA at the following URL \"http://www.cecill.info\".\n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n# same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license version 2 and that you accept its terms.\nimport logging\n\nimport matplotlib.pyplot as plt\nimport PIL\nfrom torchvision.transforms import ToTensor\n\nlogger = logging.getLogger(__name__)\n\n\ndef buffer_to_image(buffer):\n \"\"\"Transforms IO buffer into PNG image\"\"\"\n\n plt.savefig(buffer, format='png')\n buffer.seek(0)\n plt.close('all')\n image = PIL.Image.open(buffer)\n image = ToTensor()(image).unsqueeze(0)[0]\n return image\n\n\ndef prime_factors(n):\n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Devsart/MecFlu-TransCal-Comp-EM
[ "65997ad52decbd18ed9f2cba24773831a60821cd" ]
[ "Exercicios_Capitulo2/pendulo_amortecido_forcado.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 26 11:40:06 2021\n\n@author: matheus.sartor\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\ndef pendulo_amortecido_forcado(dt,theta_i,gamma,n_iter=2000,v=0.,g=9.81):\n t = 0 \n theta_i = (2*np.pi*theta_i)/360\n theta = theta_i\n v_theta_n = v\n w = 2*np.pi\n w0 = (3/2)*w\n B = w0/4.\n vv = [v_theta_n]\n theta_n = [theta]\n tt = [t]\n for i in range(n_iter):\n theta = theta + dt*v_theta_n\n theta_n.append(theta)\n v_theta_n = -dt*((w0**2)*np.sin(theta)+2*B*v_theta_n-gamma*(w0**2)*np.cos(w*t)) + v_theta_n\n vv.append(v_theta_n)\n t += dt\n tt.append(t)\n return (vv,theta_n,tt)\n \nif __name__ == '__main__':\n name = 'pendulo amortecido forçado'\n Path(os.path.join('results', name)).mkdir(parents=True, exist_ok=True)\n vv1, xn1, tt1 = pendulo_amortecido_forcado(1e-2,0.,.2,6*(10**2))\n vv2, xn2, tt2 = pendulo_amortecido_forcado(1e-3,0.,.9,6*(10**3))\n vv3, xn3, tt3 = pendulo_amortecido_forcado(1e-4,0.,1.06,160000)\n vv4, xn4, tt4 = pendulo_amortecido_forcado(1e-5,0.,1.073,3*(10**6))\n fig, axs = plt.subplots(2,2)\n fig.suptitle('Espaço de Fases do Pêndulo Amortecido Forçado')\n fig.set_size_inches(10, 10)\n axs[0][0].plot(xn1[::],vv1[::],'k',linewidth=.7,label = '$\\\\gamma = 0.2$')\n axs[0][0].set(xlabel='$\\\\theta$[rad]',ylabel='velocidade[rad/s]')\n axs[0][0].grid()\n axs[0][0].legend()\n axs[0][1].plot(xn2,vv2,'k',linewidth=.7,label='$\\\\gamma = 0.9$')\n axs[0][1].set(xlabel='$\\\\theta$[rad]',ylabel='velocidade[rad/s]')\n axs[0][1].legend(loc='lower right')\n axs[1][0].plot(xn3,vv3,'k',linewidth=.7,label='$\\\\gamma = 1.06$')\n axs[1][0].set(xlabel='$\\\\theta$[rad]',ylabel='velocidade[rad/s]')\n axs[1][0].legend(loc='lower right')\n axs[1][1].plot(xn4,vv4,'k',linewidth=.7,label='$\\\\gamma = 1.073$')\n axs[1][1].set(xlabel='$\\\\theta$[rad]',ylabel='velocidade[rad/s]')\n axs[1][1].legend(loc='upper right')\n #axs[1].set_ylim([-10,10])\n plt.savefig(os.path.join('results', name, 'pendulo_amortecido_forcado_fases.png'))" ]
[ [ "numpy.cos", "matplotlib.pyplot.subplots", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jeffresh/deployment-of-machine-learning-models
[ "bc4a5ff5713601a1a4b2c73292b1f276f22bf258" ]
[ "assignment_3/pipeline.py" ]
[ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nimport preprocessors as pp\nimport config\n\n\ntitanic_pipe = Pipeline(steps=[\n # complete with the list of steps from the preprocessors file\n # and the list of variables from the config\n ('categorical_imputer', pp.CategoricalImputer(\n variables=config.CATEGORICAL_VARS)),\n\n ('missing indicator', pp.MissingIndicator(\n variables=config.NUMERICAL_VARS)),\n\n ('numerical_imputer', pp.NumericalImputer(\n variables=config.NUMERICAL_VARS)),\n\n ('cabin_variable', pp.ExtractFirstLetter(variables=config.CABIN)),\n\n ('rare_label_encoder', pp.RareLabelCategoricalEncoder(\n tol=0.05, variables=config.CATEGORICAL_VARS)),\n\n ('categorical_encoder', pp.CategoricalEncoder(\n variables=config.CATEGORICAL_VARS)),\n\n ('scaler', StandardScaler()),\n\n ('Linear_model', LogisticRegression(C=0.0005, random_state=0))\n]\n)\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "sklearn.linear_model.LogisticRegression" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
margiki/Improving-Interpretability-Medical-Imaging
[ "428cf5af4e154dfcac734ba6150e5adcb583460c" ]
[ "robustness/datasets.py" ]
[ "\"\"\"\nModule containing all the supported datasets, which are subclasses of the\nabstract class :class:`robustness.datasets.DataSet`. \n\nCurrently supported datasets:\n\n- ImageNet (:class:`robustness.datasets.ImageNet`)\n- RestrictedImageNet (:class:`robustness.datasets.RestrictedImageNet`)\n- CIFAR-10 (:class:`robustness.datasets.CIFAR`)\n- CINIC-10 (:class:`robustness.datasets.CINIC`)\n- A2B: horse2zebra, summer2winter_yosemite, apple2orange\n (:class:`robustness.datasets.A2B`)\n\n:doc:`../example_usage/training_lib_part_2` shows how to add custom\ndatasets to the library.\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom glob import glob\nimport torch as ch\nimport torch.utils.data\nimport torchvision\nfrom torch import nn\nfrom torch.utils.data import Dataset\nfrom functools import partial\nfrom collections import OrderedDict\n\nfrom . import imagenet_models, cifar_models\nfrom torchvision import transforms, datasets\n\nfrom .tools import constants\nfrom . import data_augmentation as da\nfrom . import loaders\n\nfrom .tools.helpers import get_label_mapping\n\n###\n# Datasets: (all subclassed from dataset)\n# In order:\n## ImageNet\n## Restricted Imagenet \n## Other Datasets:\n## - CIFAR\n## - CINIC\n## - A2B (orange2apple, horse2zebra, etc)\n###\n\nclass DataSet(object):\n '''\n Base class for representing a dataset. Meant to be subclassed, with\n subclasses implementing the `get_model` function. \n '''\n\n def __init__(self, ds_name, data_path, **kwargs):\n \"\"\"\n Args:\n ds_name (str) : string identifier for the dataset\n data_path (str) : path to the dataset \n num_classes (int) : *required kwarg*, the number of classes in\n the dataset\n mean (ch.tensor) : *required kwarg*, the mean to normalize the\n dataset with (e.g. :samp:`ch.tensor([0.4914, 0.4822,\n 0.4465])` for CIFAR-10)\n std (ch.tensor) : *required kwarg*, the standard deviation to\n normalize the dataset with (e.g. :samp:`ch.tensor([0.2023,\n 0.1994, 0.2010])` for CIFAR-10)\n custom_class (type) : *required kwarg*, a\n :samp:`torchvision.models` class corresponding to the\n dataset, if it exists (otherwise :samp:`None`)\n label_mapping (dict[int,str]) : *required kwarg*, a dictionary\n mapping from class numbers to human-interpretable class\n names (can be :samp:`None`)\n transform_train (torchvision.transforms) : *required kwarg*, \n transforms to apply to the training images from the\n dataset\n transform_test (torchvision.transforms) : *required kwarg*,\n transforms to apply to the validation images from the\n dataset\n \"\"\"\n required_args = ['num_classes', 'mean', 'std', 'custom_class',\n 'label_mapping', 'transform_train', 'transform_test']\n assert set(kwargs.keys()) == set(required_args), \"Missing required args, only saw %s\" % kwargs.keys()\n self.ds_name = ds_name\n self.data_path = data_path\n self.__dict__.update(kwargs)\n\n def get_model(self, arch, pretrained):\n '''\n Should be overriden by subclasses. Also, you will probably never\n need to call this function, and should instead by using\n `model_utils.make_and_restore_model </source/robustness.model_utils.html>`_.\n\n Args:\n arch (str) : name of architecture \n pretrained (bool): whether to try to load torchvision \n pretrained checkpoint\n\n Returns:\n A model with the given architecture that works for each\n dataset (e.g. with the right input/output dimensions).\n '''\n\n raise NotImplementedError\n\n def make_loaders(self, workers, batch_size, data_aug=True, subset=None,\n subset_start=0, subset_type='rand', val_batch_size=None,\n only_val=False, shuffle_train=True, shuffle_val=False):\n '''\n Args:\n workers (int) : number of workers for data fetching (*required*).\n batch_size (int) : batch size for the data loaders (*required*).\n data_aug (bool) : whether or not to do train data augmentation.\n subset (None|int) : if given, the returned training data loader\n will only use a subset of the training data; this should be a\n number specifying the number of training data points to use.\n subset_start (int) : only used if `subset` is not None; this specifies the\n starting index of the subset.\n subset_type (\"rand\"|\"first\"|\"last\") : only used if `subset is\n not `None`; \"rand\" selects the subset randomly, \"first\"\n uses the first `subset` images of the training data, and\n \"last\" uses the last `subset` images of the training data.\n seed (int) : only used if `subset == \"rand\"`; allows one to fix\n the random seed used to generate the subset (defaults to 1).\n val_batch_size (None|int) : if not `None`, specifies a\n different batch size for the validation set loader.\n only_val (bool) : If `True`, returns `None` in place of the\n training data loader\n shuffle_train (bool) : Whether or not to shuffle the training data\n in the returned DataLoader.\n shuffle_val (bool) : Whether or not to shuffle the test data in the\n returned DataLoader.\n\n Returns:\n A training loader and validation loader according to the\n parameters given. These are standard PyTorch data loaders, and\n thus can just be used via:\n\n >>> train_loader, val_loader = ds.make_loaders(workers=8, batch_size=128) \n >>> for im, lab in train_loader:\n >>> # Do stuff...\n '''\n transforms = (self.transform_train, self.transform_test)\n return loaders.make_loaders(workers=workers,\n batch_size=batch_size,\n transforms=transforms,\n data_path=self.data_path,\n data_aug=data_aug,\n dataset=self.ds_name,\n label_mapping=self.label_mapping,\n custom_class=self.custom_class,\n val_batch_size=val_batch_size,\n subset=subset,\n subset_start=subset_start,\n subset_type=subset_type,\n only_val=only_val,\n shuffle_train=shuffle_train,\n shuffle_val=shuffle_val)\n\nclass MNIST(DataSet):\n def __init__(self, data_path, **kwargs):\n ds_kwargs = {\n 'num_classes': 10,\n 'mean': ch.tensor([0.1307]),\n 'std': ch.tensor([0.3081]),\n 'custom_class': datasets.MNIST,\n 'label_mapping': None,\n 'transform_train': transforms.Compose([torchvision.transforms.ToTensor()]), # da.TRAIN_TRANSFORMS_DEFAULT(28),\n 'transform_test': transforms.Compose([torchvision.transforms.ToTensor()]) # da.TEST_TRANSFORMS_DEFAULT(28),\n }\n super(MNIST, self).__init__('mnist', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n if pretrained:\n raise ValueError(\"MNIST does not support pytorch_pretrained=True\")\n return cifar_models.__dict__[arch]()\n\ndef add_background(image):\n \"\"\"\n Takes a processed image of 224x224 and place it on a 448x448 background\n\n - image (torch.Tensor) of size 224x224\n\n Returns:\n - upscaled_image (torch.Tensor) of size 448x448\n \"\"\"\n sz = 224\n \n # pick a random position to place the image\n indices = torch.randint(0, sz+1, (2,))\n x = indices[0].item()\n y = indices[1].item()\n\n upscaled_image = torch.zeros((3, 448, 448))\n upscaled_image[..., x:x+sz, y:y+sz] = image\n\n return upscaled_image\n\nclass HAM10000(DataSet):\n def __init__(self, data_path, file_name, label_mapping, custom_class, \\\n apply_ablation=False, saliency_dir=None, perc_ablation=0, **kwarg):\n \"\"\"\n Args:\n - data_path (str): path to folder with the dataset\n - file_name (str): the CSV file keeping the dataset split.\n It is passed as parameter to HAM10000_dataset\n - label_mapping (OrderedDict): mapping between label id and class\n OrderedDict([\n (0, 'bkl'),\n (1, 'nv'),\n (2, 'vasc')\n ])\n - apply_ablation(boolean): If `True`, then don't apply transforms.CenterCrop and transforms.Resize\n because they were already applied when retrieving the image\n \"\"\"\n # Compute number of classes from the csv\n self.num_classes = len(list(label_mapping.keys()))\n self.saliency_dir = saliency_dir\n self.perc_ablation = perc_ablation\n\n input_size = 224\n\n # Use imagenet mean and std, because I use a pre-trained model.\n imagenet_mean = [0.485, 0.456, 0.406] # RGB\n imagenet_std = [0.229, 0.224, 0.225]\n\n train_transforms, test_transforms = [], []\n if apply_ablation == False: \n \"\"\"\n If `apply_ablation` is True, then these transforms are \n \"\"\"\n train_transforms.extend([\n transforms.CenterCrop(450), \n transforms.Resize((input_size, input_size))])\n\n test_transforms.extend([\n transforms.CenterCrop(450),\n transforms.Resize((input_size, input_size))\n ])\n\n train_transforms.extend([\n transforms.ColorJitter(0.2, 0.2, 0.2, 0),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.RandomAffine(10),\n transforms.RandomRotation(50),\n transforms.ToTensor()\n ])\n TRAIN_TRANSFORMS_HAM10000 = transforms.Compose(train_transforms)\n \n test_transforms.extend([transforms.ToTensor()])\n TEST_TRANSFORMS_HAM10000 = transforms.Compose(test_transforms)\n\n ds_kwargs = {\n 'num_classes': self.num_classes,\n 'mean': ch.tensor(imagenet_mean), # Just add again the mean and std. I didn't find where this is used.\n 'std': ch.tensor(imagenet_std),\n 'custom_class': partial(custom_class, file_name=file_name, \n apply_ablation=apply_ablation, saliency_dir=saliency_dir, perc_ablation=perc_ablation),\n 'label_mapping': label_mapping,\n 'transform_train': TRAIN_TRANSFORMS_HAM10000,\n 'transform_test': TEST_TRANSFORMS_HAM10000\n }\n super().__init__('ham10000', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained=True, custom_head=None):\n \"\"\"\n - custom_head (nn.Module) - custom head architecture to add after the convolutional layers\n \"\"\"\n # if arch not in [\"resnet18\", \"resnet34\", \"resnet50\", \"resnet101\", \"resnet152\"]:\n # raise ValueError(\"Currently HAM10000 supports only Resnet\")\n\n # The model is initialized from an ImageNet model, which has 1000 classes\n # Thus, I need to replace the last layer to have `self.num_classes` logits\n\n model = imagenet_models.__dict__[arch](num_classes=1000, pretrained=pretrained)\n freeze(model)\n num_ftrs = model.fc.in_features\n\n if custom_head:\n model.fc = custom_head\n else:\n fc_activation = nn.ReLU()\n\n model.fc = nn.Sequential(\n nn.Dropout(p=0.25),\n nn.Linear(num_ftrs, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(512, self.num_classes)\n )\n \n # Initialize the weights in the head\n for m in model.fc.modules():\n if isinstance(m, nn.Linear):\n nn.init.kaiming_normal_(\n m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n return model\n\nclass HAM10000_3cls(HAM10000):\n def __init__(self, data_path, file_name, apply_ablation=False, saliency_dir=None, perc_ablation=0,\n use_dropout_head=False, dropout_perc=0, **kwarg):\n self.use_dropout_head = use_dropout_head\n self.dropout_perc = dropout_perc\n \n label_mapping = OrderedDict([\n (0, 'nv'),\n (1, 'mel'),\n (2, 'bkl')\n ])\n\n self.num_classes = len(label_mapping)\n super().__init__(data_path, file_name, label_mapping, custom_class=HAM10000_dataset_3cls_balanced,\n apply_ablation=apply_ablation, saliency_dir=saliency_dir, perc_ablation=perc_ablation, **kwarg)\n\n def get_model(self, arch='resnet18', pretrained=True):\n if arch == 'resnet50':\n prev_size = 2048\n else:\n prev_size = 512\n\n if self.use_dropout_head:\n custom_head = nn.Sequential(\n nn.Linear(prev_size, 512),\n nn.ReLU(),\n nn.BatchNorm1d(512),\n nn.Dropout(self.dropout_perc),\n nn.Linear(512, self.num_classes)\n )\n else:\n custom_head = nn.Sequential(\n nn.Linear(prev_size, self.num_classes)\n )\n\n return super().get_model(arch, pretrained=pretrained, custom_head=custom_head)\n\nclass ImageNet(DataSet):\n '''\n ImageNet Dataset [DDS+09]_.\n\n Requires ImageNet in ImageFolder-readable format. \n ImageNet can be downloaded from http://www.image-net.org. See\n `here <https://pytorch.org/docs/master/torchvision/datasets.html#torchvision.datasets.ImageFolder>`_\n for more information about the format.\n\n .. [DDS+09] Deng, J., Dong, W., Socher, R., Li, L., Li, K., & Fei-Fei, L. (2009). ImageNet: A large-scale hierarchical image database. 2009 IEEE Conference on Computer Vision and Pattern Recognition, 248-255.\n\n '''\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_kwargs = {\n 'num_classes': 1000,\n 'mean': ch.tensor([0.485, 0.456, 0.406]),\n 'std': ch.tensor([0.229, 0.224, 0.225]),\n 'custom_class': None,\n 'label_mapping': None,\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n super(ImageNet, self).__init__('imagenet', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n return imagenet_models.__dict__[arch](num_classes=self.num_classes,\n pretrained=pretrained)\n\nclass RestrictedImageNet(DataSet):\n '''\n RestrictedImagenet Dataset [TSE+19]_\n\n A subset of ImageNet with the following labels:\n\n * Dog (classes 151-268)\n * Cat (classes 281-285)\n * Frog (classes 30-32)\n * Turtle (classes 33-37)\n * Bird (classes 80-100)\n * Monkey (classes 365-382)\n * Fish (classes 389-397)\n * Crab (classes 118-121)\n * Insect (classes 300-319)\n\n To initialize, just provide the path to the full ImageNet dataset\n (no special formatting required).\n\n .. [TSE+19] Tsipras, D., Santurkar, S., Engstrom, L., Turner, A., &\n Madry, A. (2019). Robustness May Be at Odds with Accuracy. ICLR\n 2019.\n '''\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_name = 'restricted_imagenet'\n ds_kwargs = {\n 'num_classes': len(constants.RESTRICTED_IMAGNET_RANGES),\n 'mean': ch.tensor([0.4717, 0.4499, 0.3837]),\n 'std': ch.tensor([0.2600, 0.2516, 0.2575]),\n 'custom_class': None,\n 'label_mapping': get_label_mapping(ds_name,\n constants.RESTRICTED_IMAGNET_RANGES),\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n super(RestrictedImageNet, self).__init__(ds_name,\n data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError(\"Dataset doesn't support pytorch_pretrained\")\n return imagenet_models.__dict__[arch](num_classes=self.num_classes)\n\nclass CustomImageNet(DataSet):\n '''\n CustomImagenet Dataset \n\n A subset of ImageNet with the user-specified labels\n\n To initialize, just provide the path to the full ImageNet dataset\n along with a list of lists of wnids to be grouped together\n (no special formatting required).\n\n '''\n def __init__(self, data_path, custom_grouping, **kwargs):\n \"\"\"\n \"\"\"\n ds_name = 'custom_imagenet'\n ds_kwargs = {\n 'num_classes': len(custom_grouping),\n 'mean': ch.tensor([0.4717, 0.4499, 0.3837]),\n 'std': ch.tensor([0.2600, 0.2516, 0.2575]),\n 'custom_class': None,\n 'label_mapping': get_label_mapping(ds_name,\n custom_grouping),\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n super(CustomImageNet, self).__init__(ds_name,\n data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError(\"Dataset doesn't support pytorch_pretrained\")\n return imagenet_models.__dict__[arch](num_classes=self.num_classes)\n\nclass CIFAR(DataSet):\n \"\"\"\n CIFAR-10 dataset [Kri09]_.\n\n A dataset with 50k training images and 10k testing images, with the\n following classes:\n\n * Airplane\n * Automobile\n * Bird\n * Cat\n * Deer\n * Dog\n * Frog\n * Horse\n * Ship\n * Truck\n\n .. [Kri09] Krizhevsky, A (2009). Learning Multiple Layers of Features\n from Tiny Images. Technical Report.\n \"\"\"\n def __init__(self, data_path='/tmp/', **kwargs):\n \"\"\"\n \"\"\"\n ds_kwargs = {\n 'num_classes': 10,\n 'mean': ch.tensor([0.4914, 0.4822, 0.4465]),\n 'std': ch.tensor([0.2023, 0.1994, 0.2010]),\n 'custom_class': datasets.CIFAR10,\n 'label_mapping': None,\n 'transform_train': da.TRAIN_TRANSFORMS_DEFAULT(32),\n 'transform_test': da.TEST_TRANSFORMS_DEFAULT(32)\n }\n super(CIFAR, self).__init__('cifar', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n return cifar_models.__dict__[arch](num_classes=self.num_classes)\n\nclass CINIC(DataSet):\n \"\"\"\n CINIC-10 dataset [DCA+18]_.\n\n A dataset with the same classes as CIFAR-10, but with downscaled images\n from various matching ImageNet classes added in to increase the size of\n the dataset.\n\n .. [DCA+18] Darlow L.N., Crowley E.J., Antoniou A., and A.J. Storkey\n (2018) CINIC-10 is not ImageNet or CIFAR-10. Report\n EDI-INF-ANC-1802 (arXiv:1810.03505)\n \"\"\"\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n ds_kwargs = {\n 'num_classes': 10,\n 'mean': ch.tensor([0.47889522, 0.47227842, 0.43047404]),\n 'std': ch.tensor([0.24205776, 0.23828046, 0.25874835]),\n 'custom_class': None,\n 'label_mapping': None,\n 'transform_train': da.TRAIN_TRANSFORMS_DEFAULT(32),\n 'transform_test': da.TEST_TRANSFORMS_DEFAULT(32)\n }\n super(CINIC, self).__init__('cinic', data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError('CINIC does not support pytorch_pretrained=True')\n return cifar_models.__dict__[arch](num_classes=self.num_classes)\n\nclass A2B(DataSet):\n \"\"\"\n A-to-B datasets [ZPI+17]_\n\n A general class for image-to-image translation dataset. Currently\n supported are:\n \n * Horse <-> Zebra\n * Apple <-> Orange\n * Summer <-> Winter\n\n .. [ZPI+17] Zhu, J., Park, T., Isola, P., & Efros, A.A. (2017).\n Unpaired Image-to-Image Translation Using Cycle-Consistent\n Adversarial Networks. 2017 IEEE International Conference on\n Computer Vision (ICCV), 2242-2251.\n \"\"\"\n def __init__(self, data_path, **kwargs):\n \"\"\"\n \"\"\"\n _, ds_name = os.path.split(data_path)\n valid_names = ['horse2zebra', 'apple2orange', 'summer2winter_yosemite']\n assert ds_name in valid_names, \\\n f\"path must end in one of {valid_names}, not {ds_name}\"\n ds_kwargs = {\n 'num_classes': 2,\n 'mean': ch.tensor([0.5, 0.5, 0.5]),\n 'custom_class': None,\n 'std': ch.tensor([0.5, 0.5, 0.5]),\n 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET,\n 'label_mapping': None,\n 'transform_test': da.TEST_TRANSFORMS_IMAGENET\n }\n super(A2B, self).__init__(ds_name, data_path, **ds_kwargs)\n\n def get_model(self, arch, pretrained=False):\n \"\"\"\n \"\"\"\n if pretrained:\n raise ValueError('A2B does not support pytorch_pretrained=True')\n return imagenet_models.__dict__[arch](num_classes=self.num_classes)\n\nDATASETS = {\n 'imagenet': ImageNet,\n 'restricted_imagenet': RestrictedImageNet,\n 'custom_imagenet': CustomImageNet,\n 'cifar': CIFAR,\n 'cinic': CINIC,\n 'a2b': A2B,\n 'mnist': MNIST,\n 'ham10000': HAM10000\n}\n'''\nDictionary of datasets. A dataset class can be accessed as:\n\n>>> import robustness.datasets\n>>> ds = datasets.DATASETS['cifar']('/path/to/cifar')\n'''\n\n\ndef upsample_dataframe(df):\n \"\"\"\n Given a dataframe, upsample the items to have equal number of items in each class\n \"\"\"\n df_upsampled = df.iloc[0:0]\n\n # compute the maximum count\n counts = df['dx'].value_counts().to_dict()\n maxx = np.max(list(counts.values()))\n\n # multiply the df that many times\n for lesion, count in counts.items():\n upsampling_factor = int(np.ceil(maxx / count))\n\n # upsample\n aux = df.iloc[0:0]\n for i in range(upsampling_factor):\n aux = aux.append(df.loc[df['dx'] == lesion])\n\n # drop the last rows exceeding the max count\n aux.reset_index(inplace=True, drop=True)\n aux.drop(np.arange(maxx, aux.shape[0]), inplace=True)\n\n df_upsampled = df_upsampled.append(aux)\n\n df_upsampled.reset_index(inplace=True, drop=True)\n\n return df_upsampled\n\n# Custom Dataset to load my datasets\nclass HAM10000_dataset(Dataset):\n def __init__(self, root, file_name, train=True, download=None, transform=None, upsample=True, test=False):\n \"\"\"\n Gets called in `loaders.make_loaders` with exactly these parameters.\n\n - test (bool): If `True`, then load the test set!\n\n \"\"\"\n self.transform = transform\n self.file_name = file_name\n\n data = pd.read_csv(os.path.join(root, file_name))\n if test:\n self.df = data.loc[data['split'] == 'test']\n else:\n if train:\n self.df = data.loc[data['split'] == 'train']\n else:\n self.df = data.loc[data['split'] == 'valid']\n\n self.df.reset_index(inplace=True)\n\n if upsample:\n self.df = upsample_dataframe(self.df)\n\n self.df['path'] = root + '/' + self.df.loc[:, 'path_relative']\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n \"\"\"\n Load data and get label\n \"\"\"\n X, y = self._getitem_index(index)\n y = torch.tensor(y)\n\n if self.transform:\n X = self.transform(X)\n\n return X, y\n\n def _getitem_index(self, index):\n \"\"\"\n Get item without applying any transform.\n \"\"\"\n X = Image.open(self.df['path'][index])\n y = int(self.df['type'][index])\n\n return X, y\n\n def _getitem_image_id(self, image_id):\n row = self.df[self.df.image_id == image_id]\n X = Image.open(row['path'].iloc[0])\n y = int(row['type'].iloc[0])\n\n return X, y\n\ndef ablate_features(image, mask, ablation_type='white'):\n \"\"\"\n Ablate an image, based on a given mask.\n \n Args:\n - image (torch.Tensor of (3, 224, 224))\n - mask (binary torch.Tensor of (3, 224, 224)): Contains value of 1 of the feature which should be removed\n \"\"\"\n # remove ablated feature by making them white\n ablated_image = image * (ch.tensor(1)-mask) + mask\n\n if ablation_type == 'mean':\n # compute the mean\n mean0 = image[0, :, :].mean()\n mean1 = image[1, :, :].mean()\n mean2 = image[2, :, :].mean()\n\n ablated_image[0, :, :] = ablated_image[0, :, :] + mean0 * mask[0, :, :]\n ablated_image[1, :, :] = ablated_image[1, :, :] + mean1 * mask[1, :, :]\n ablated_image[2, :, :] = ablated_image[2, :, :] + mean2 * mask[2, :, :]\n\n return ablated_image\n\ndef get_mask(saliency_map_2d, perc=0.3):\n \"\"\"\n Given a saliency map, return a mask corresponding to a percentage of the top values.\n\n Args:\n - saliency_map (np.ndarray (224, 224))\n - perc (float): percentage of saliency to remove e.g. 0.3\n\n Returns:\n - mask (binary torch.Tensor(3, 224, 224))\n \"\"\"\n pixels_sorted = np.sort(saliency_map_2d.reshape(-1)) # sort in ascending order\n pixels_sorted = pixels_sorted[::-1] # reverse the array\n\n # Pick the kth value percentage-wise from the top\n kth_value_index = int(224*224*perc-1)\n kth_value = pixels_sorted[kth_value_index]\n kth_value\n\n # Create mask\n mask = saliency_map_2d >= kth_value # (224, 224)\n mask = ch.tensor(mask.astype(np.int))\n mask = mask.repeat(3, 1, 1) # (3, 224, 224)\n\n return mask\n\ndef save_saliency_map(map, DATA_DIR, model_name, image_id):\n \"\"\"\n Given a mask for abalting feature, save it in the director 'DATA_DIR/saliency_maps/model_name'\n\n Args:\n - mask (torch.Tensor (3, 224, 224)) - binary tensor with values 0, 1\n - DATA_DIR (str) - path to data directory\n - model_name (str) - used to create the folder for storing the files \n - image_id (str) - the id of the image for which the mask was generated\n \"\"\"\n dir_path = os.path.join(DATA_DIR, 'saliency_maps', model_name)\n\n if os.path.exists(dir_path) == False:\n os.mkdir(dir_path)\n\n ch.save(map, os.path.join(dir_path, image_id))\n\nclass HAM10000_dataset_3cls_balanced(HAM10000_dataset):\n def __init__(self, root, file_name, train=True, download=None, transform=None, upsample=True, test=False, \n apply_ablation=False, saliency_dir=None, perc_ablation=0):\n \"\"\"\n Gets called in `loaders.make_loaders` with exactly these parameters.\n\n This class represents the 3_cls_balanced, which is creted to work for cross_validation!\n The dataset has 5 folds. This class should work for both when this is the internal training\n split (4 fold), internal validation (1 fold) and the test set.\n \n - root (str): root path\n - file_name (str): Contains the file name (e.g. `data.csv`), followed by \"::\" and the \n id for the VALIDATION fold! \n (note that when test=True, appending \"::...\") does not change anything\n\n e.g. \"data.csv::5\" with flag `train=True` means self.df contains folds 1, 2, 3, 4\n e.g. \"data.csv::5\" with flag `train=False` means self.df contains folds 5\n e.g. \"data.csv\" with flag 'test=True' means self.df contains the entire dataset\n\n - test (bool): If `True`, then load the test set from `root`\n - apply_ablation (boolean): If `True`, then load and apply the saliency map\n - saliency_dir (str): Path to the directory from where to load and apply the saliency mask\n \"\"\"\n if apply_ablation and saliency_dir == None:\n raise ValueError(\"If apply_ablation==True, then you must provide `saliency_dir`\")\n\n self.transform = transform\n self.file_name = file_name\n self.apply_ablation = apply_ablation\n self.saliency_dir = saliency_dir\n self.perc_ablation = perc_ablation\n\n aux = file_name.split('::')\n data = pd.read_csv(os.path.join(root, aux[0]))\n if test == True:\n self.df = data\n else: # train/validation\n aux = aux[1]\n if aux == '0': # train on the whole training set\n if train == True:\n self.df = data[data['fold'].isin(['1', '2', '3', '4', '5'])]\n else:\n self.df = data[data['fold'].isin({'validation'})]\n else:\n train_folds = {'1', '2', '3', '4', '5'}\n train_folds.remove(aux)\n\n if train == True:\n self.df = data[data['fold'].isin(train_folds)]\n else:\n self.df = data[data['fold'].isin([aux])]\n\n if upsample:\n self.df = upsample_dataframe(self.df)\n\n print(f\"Created dataset of length: {len(self.df)}\")\n self.df = self.df.reset_index()\n self.df['path'] = root + '/' + self.df.loc[:, 'path_relative']\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n \"\"\"\n Load data and get label\n \"\"\"\n X, y = self._getitem_index(index)\n\n y = torch.tensor(y)\n\n if self.apply_ablation:\n # load saliency\n image_id = self.df['image_id'][index]\n saliency_map_2d = torch.load(os.path.join(\n self.saliency_dir, image_id)) # np.ndarray of (224, 224)\n mask = get_mask(saliency_map_2d, perc=self.perc_ablation)\n \n X = transforms.Compose([\n transforms.CenterCrop(450), \n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n partial(ablate_features, mask=mask),\n transforms.ToPILImage()\n ])(X)\n\n if self.transform:\n X = self.transform(X)\n\n return X, y\n\n def _getitem_index(self, index):\n \"\"\"\n Get item without applying any transform.\n \"\"\"\n X = Image.open(self.df['path'][index])\n y = int(self.df['type'][index])\n\n return X, y\n\n def _getitem_image_id(self, image_id):\n row = self.df[self.df.image_id == image_id]\n X = Image.open(row['path'].iloc[0])\n y = int(row['type'].iloc[0])\n\n return X, y\n\n\ndef freeze(model):\n \"\"\"\n Function to `freeze` all weight in the model\n \"\"\"\n for param in model.parameters():\n param.requires_grad = False\n\ndef unfreeze(model, until=0):\n \"\"\"\n Unfreezes blocks of a ResNet starting from the head to the first layer.\n\n Args:\n - model (robustness/cifar_models/resnet.py, which is instance of nn.Module) - instace of ResNet-18 model\n - until (int) - the layer until to unfreeze\n - 5: model.fc\n - 4: model.fc + model.layer4\n - 3: model.fc + model.layer4 + model.layer3\n - 2: model.fc + model.layer4 + model.layer3 + model.layer2\n - 1: (full unfreeze)\n \"\"\"\n assert until in [1, 2, 3, 4, 5], (\"Paramter 'until' needs to have values in [1, 2, 3, 4, 5]\")\n\n if until == 1:\n for param in model.parameters():\n param.requires_grad = True\n print(\"Unfrozen the entire model\")\n return\n\n if until<=5:\n for param in model.fc.parameters():\n param.requires_grad = True\n print(\"Unfrozen layer .fc\")\n if until<=4:\n for param in model.layer4.parameters():\n param.requires_grad = True\n print(\"Unfrozen layer .layer4\")\n if until<=3:\n for param in model.layer3.parameters():\n param.requires_grad = True\n print(\"Unfrozen layer .layer3\")\n if until<=2:\n for param in model.layer2.parameters():\n param.requires_grad = True\n print(\"Unfrozen layer .layer2\")\n \n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.init.constant_", "numpy.arange", "torch.tensor", "numpy.ceil", "torch.nn.Linear", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xinrong-databricks/koalas
[ "2981b0f3d5d9d71d372556a553ee119118d236fc" ]
[ "databricks/koalas/tests/test_indexes.py" ]
[ "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport inspect\nimport unittest\nfrom distutils.version import LooseVersion\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pyspark\n\nimport databricks.koalas as ks\nfrom databricks.koalas.exceptions import PandasNotImplementedError\nfrom databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex\nfrom databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils, SPARK_CONF_ARROW_ENABLED\n\n\nclass IndexesTest(ReusedSQLTestCase, TestUtils):\n @property\n def pdf(self):\n return pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0],},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n\n @property\n def kdf(self):\n return ks.from_pandas(self.pdf)\n\n def test_index(self):\n for pdf in [\n pd.DataFrame(np.random.randn(10, 5), index=list(\"abcdefghij\")),\n pd.DataFrame(\n np.random.randn(10, 5), index=pd.date_range(\"2011-01-01\", freq=\"D\", periods=10)\n ),\n pd.DataFrame(np.random.randn(10, 5), columns=list(\"abcde\")).set_index([\"a\", \"b\"]),\n ]:\n kdf = ks.from_pandas(pdf)\n self.assert_eq(kdf.index, pdf.index)\n\n def test_index_getattr(self):\n kidx = self.kdf.index\n item = \"databricks\"\n\n expected_error_message = \"'Index' object has no attribute '{}'\".format(item)\n with self.assertRaisesRegex(AttributeError, expected_error_message):\n kidx.__getattr__(item)\n\n def test_multi_index_getattr(self):\n arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]]\n idx = pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\"))\n pdf = pd.DataFrame(np.random.randn(4, 5), idx)\n kdf = ks.from_pandas(pdf)\n kidx = kdf.index\n item = \"databricks\"\n\n expected_error_message = \"'MultiIndex' object has no attribute '{}'\".format(item)\n with self.assertRaisesRegex(AttributeError, expected_error_message):\n kidx.__getattr__(item)\n\n def test_to_series(self):\n pidx = self.pdf.index\n kidx = self.kdf.index\n\n self.assert_eq(kidx.to_series(), pidx.to_series())\n self.assert_eq(kidx.to_series(name=\"a\"), pidx.to_series(name=\"a\"))\n\n # With name\n pidx.name = \"Koalas\"\n kidx.name = \"Koalas\"\n self.assert_eq(kidx.to_series(), pidx.to_series())\n self.assert_eq(kidx.to_series(name=(\"x\", \"a\")), pidx.to_series(name=(\"x\", \"a\")))\n\n # With tupled name\n pidx.name = (\"x\", \"a\")\n kidx.name = (\"x\", \"a\")\n self.assert_eq(kidx.to_series(), pidx.to_series())\n self.assert_eq(kidx.to_series(name=\"a\"), pidx.to_series(name=\"a\"))\n\n self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())\n\n pidx = self.pdf.set_index(\"b\", append=True).index\n kidx = self.kdf.set_index(\"b\", append=True).index\n\n with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):\n self.assert_eq(kidx.to_series(), pidx.to_series())\n self.assert_eq(kidx.to_series(name=\"a\"), pidx.to_series(name=\"a\"))\n\n expected_error_message = \"Series.name must be a hashable type\"\n with self.assertRaisesRegex(TypeError, expected_error_message):\n kidx.to_series(name=[\"x\", \"a\"])\n\n def test_to_frame(self):\n pidx = self.pdf.index\n kidx = self.kdf.index\n\n self.assert_eq(kidx.to_frame(), pidx.to_frame())\n self.assert_eq(kidx.to_frame(index=False), pidx.to_frame(index=False))\n\n pidx.name = \"a\"\n kidx.name = \"a\"\n\n self.assert_eq(kidx.to_frame(), pidx.to_frame())\n self.assert_eq(kidx.to_frame(index=False), pidx.to_frame(index=False))\n\n if LooseVersion(pd.__version__) >= LooseVersion(\"0.24\"):\n # The `name` argument is added in pandas 0.24.\n self.assert_eq(kidx.to_frame(name=\"x\"), pidx.to_frame(name=\"x\"))\n self.assert_eq(\n kidx.to_frame(index=False, name=\"x\"), pidx.to_frame(index=False, name=\"x\"),\n )\n\n pidx = self.pdf.set_index(\"b\", append=True).index\n kidx = self.kdf.set_index(\"b\", append=True).index\n\n self.assert_eq(kidx.to_frame(), pidx.to_frame())\n self.assert_eq(kidx.to_frame(index=False), pidx.to_frame(index=False))\n\n if LooseVersion(pd.__version__) >= LooseVersion(\"0.24\"):\n # The `name` argument is added in pandas 0.24.\n self.assert_eq(kidx.to_frame(name=[\"x\", \"y\"]), pidx.to_frame(name=[\"x\", \"y\"]))\n self.assert_eq(\n kidx.to_frame(index=False, name=[\"x\", \"y\"]),\n pidx.to_frame(index=False, name=[\"x\", \"y\"]),\n )\n\n def test_index_names(self):\n kdf = self.kdf\n self.assertIsNone(kdf.index.name)\n\n idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name=\"x\")\n pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list(\"abcde\"))\n kdf = ks.from_pandas(pdf)\n\n pser = pdf.a\n kser = kdf.a\n\n self.assertEqual(kdf.index.name, pdf.index.name)\n self.assertEqual(kdf.index.names, pdf.index.names)\n\n pidx = pdf.index\n kidx = kdf.index\n pidx.name = \"renamed\"\n kidx.name = \"renamed\"\n self.assertEqual(kidx.name, pidx.name)\n self.assertEqual(kidx.names, pidx.names)\n self.assert_eq(kidx, pidx)\n self.assertEqual(kdf.index.name, pdf.index.name)\n self.assertEqual(kdf.index.names, pdf.index.names)\n self.assertEqual(kser.index.names, pser.index.names)\n\n pidx.name = None\n kidx.name = None\n self.assertEqual(kidx.name, pidx.name)\n self.assertEqual(kidx.names, pidx.names)\n self.assert_eq(kidx, pidx)\n self.assertEqual(kdf.index.name, pdf.index.name)\n self.assertEqual(kdf.index.names, pdf.index.names)\n self.assertEqual(kser.index.names, pser.index.names)\n\n with self.assertRaisesRegex(ValueError, \"Names must be a list-like\"):\n kidx.names = \"hi\"\n\n expected_error_message = \"Length of new names must be {}, got {}\".format(\n len(kdf._internal.index_map), len([\"0\", \"1\"])\n )\n with self.assertRaisesRegex(ValueError, expected_error_message):\n kidx.names = [\"0\", \"1\"]\n\n def test_multi_index_names(self):\n arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]]\n idx = pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\"))\n pdf = pd.DataFrame(np.random.randn(4, 5), idx)\n kdf = ks.from_pandas(pdf)\n\n self.assertEqual(kdf.index.names, pdf.index.names)\n\n pidx = pdf.index\n kidx = kdf.index\n pidx.names = [\"renamed_number\", \"renamed_color\"]\n kidx.names = [\"renamed_number\", \"renamed_color\"]\n self.assertEqual(kidx.names, pidx.names)\n\n pidx.names = [\"renamed_number\", None]\n kidx.names = [\"renamed_number\", None]\n self.assertEqual(kidx.names, pidx.names)\n if LooseVersion(pyspark.__version__) < LooseVersion(\"2.4\"):\n # PySpark < 2.4 does not support struct type with arrow enabled.\n with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):\n self.assert_eq(kidx, pidx)\n else:\n self.assert_eq(kidx, pidx)\n\n with self.assertRaises(PandasNotImplementedError):\n kidx.name\n with self.assertRaises(PandasNotImplementedError):\n kidx.name = \"renamed\"\n\n def test_index_rename(self):\n pdf = pd.DataFrame(\n np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name=\"x\")\n )\n kdf = ks.from_pandas(pdf)\n\n pidx = pdf.index\n kidx = kdf.index\n\n self.assert_eq(kidx.rename(\"y\"), pidx.rename(\"y\"))\n self.assert_eq(kdf.index.names, pdf.index.names)\n\n kidx.rename(\"z\", inplace=True)\n pidx.rename(\"z\", inplace=True)\n\n self.assert_eq(kidx, pidx)\n self.assert_eq(kdf.index.names, pdf.index.names)\n\n self.assert_eq(kidx.rename(None), pidx.rename(None))\n self.assert_eq(kdf.index.names, pdf.index.names)\n\n def test_multi_index_rename(self):\n arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]]\n idx = pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\"))\n pdf = pd.DataFrame(np.random.randn(4, 5), idx)\n kdf = ks.from_pandas(pdf)\n\n pmidx = pdf.index\n kmidx = kdf.index\n\n self.assert_eq(kmidx.rename([\"n\", \"c\"]), pmidx.rename([\"n\", \"c\"]))\n self.assert_eq(kdf.index.names, pdf.index.names)\n\n kmidx.rename([\"num\", \"col\"], inplace=True)\n pmidx.rename([\"num\", \"col\"], inplace=True)\n\n self.assert_eq(kmidx, pmidx)\n self.assert_eq(kdf.index.names, pdf.index.names)\n\n self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))\n self.assert_eq(kdf.index.names, pdf.index.names)\n\n self.assertRaises(TypeError, lambda: kmidx.rename(\"number\"))\n self.assertRaises(ValueError, lambda: kmidx.rename([\"number\"]))\n\n def test_multi_index_levshape(self):\n pidx = pd.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2)])\n kidx = ks.from_pandas(pidx)\n self.assertEqual(pidx.levshape, kidx.levshape)\n\n def test_index_unique(self):\n kidx = self.kdf.index\n\n # here the output is different than pandas in terms of order\n expected = [0, 1, 3, 5, 6, 8, 9]\n\n self.assert_eq(expected, sorted(kidx.unique().to_pandas()))\n self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))\n\n expected = [1, 2, 4, 6, 7, 9, 10]\n self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))\n\n with self.assertRaisesRegex(IndexError, \"Too many levels*\"):\n kidx.unique(level=1)\n\n with self.assertRaisesRegex(KeyError, \"Requested level (hi)*\"):\n kidx.unique(level=\"hi\")\n\n def test_multi_index_copy(self):\n arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]]\n idx = pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\"))\n pdf = pd.DataFrame(np.random.randn(4, 5), idx)\n kdf = ks.from_pandas(pdf)\n\n self.assert_eq(kdf.index.copy(), pdf.index.copy())\n\n def test_drop_duplicates(self):\n pidx = pd.Index([4, 2, 4, 1, 4, 3])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(kidx.drop_duplicates().sort_values(), pidx.drop_duplicates().sort_values())\n self.assert_eq(\n (kidx + 1).drop_duplicates().sort_values(), (pidx + 1).drop_duplicates().sort_values()\n )\n\n def test_dropna(self):\n pidx = pd.Index([np.nan, 2, 4, 1, np.nan, 3])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(kidx.dropna(), pidx.dropna())\n self.assert_eq((kidx + 1).dropna(), (pidx + 1).dropna())\n\n def test_index_symmetric_difference(self):\n pidx1 = pd.Index([1, 2, 3, 4])\n pidx2 = pd.Index([2, 3, 4, 5])\n kidx1 = ks.from_pandas(pidx1)\n kidx2 = ks.from_pandas(pidx2)\n\n self.assert_eq(\n kidx1.symmetric_difference(kidx2).sort_values(),\n pidx1.symmetric_difference(pidx2).sort_values(),\n )\n self.assert_eq(\n (kidx1 + 1).symmetric_difference(kidx2).sort_values(),\n (pidx1 + 1).symmetric_difference(pidx2).sort_values(),\n )\n\n pmidx1 = pd.MultiIndex(\n [[\"lama\", \"cow\", \"falcon\"], [\"speed\", \"weight\", \"length\"]],\n [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],\n )\n pmidx2 = pd.MultiIndex(\n [[\"koalas\", \"cow\", \"falcon\"], [\"speed\", \"weight\", \"length\"]],\n [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],\n )\n kmidx1 = ks.from_pandas(pmidx1)\n kmidx2 = ks.from_pandas(pmidx2)\n\n self.assert_eq(\n kmidx1.symmetric_difference(kmidx2).sort_values(),\n pmidx1.symmetric_difference(pmidx2).sort_values(),\n )\n\n idx = ks.Index([\"a\", \"b\", \"c\"])\n midx = ks.MultiIndex.from_tuples([(\"a\", \"x\"), (\"b\", \"y\"), (\"c\", \"z\")])\n\n with self.assertRaisesRegex(NotImplementedError, \"Doesn't support*\"):\n idx.symmetric_difference(midx)\n\n def test_multi_index_symmetric_difference(self):\n idx = ks.Index([\"a\", \"b\", \"c\"])\n midx = ks.MultiIndex.from_tuples([(\"a\", \"x\"), (\"b\", \"y\"), (\"c\", \"z\")])\n midx_ = ks.MultiIndex.from_tuples([(\"a\", \"x\"), (\"b\", \"y\"), (\"c\", \"z\")])\n\n self.assert_eq(\n midx.symmetric_difference(midx_),\n midx.to_pandas().symmetric_difference(midx_.to_pandas()),\n )\n\n with self.assertRaisesRegex(NotImplementedError, \"Doesn't support*\"):\n midx.symmetric_difference(idx)\n\n def test_missing(self):\n kdf = ks.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]})\n\n # Index functions\n missing_functions = inspect.getmembers(MissingPandasLikeIndex, inspect.isfunction)\n unsupported_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"unsupported_function\"\n ]\n for name in unsupported_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"method.*Index.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.set_index(\"a\").index, name)()\n\n deprecated_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"deprecated_function\"\n ]\n for name in deprecated_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"method.*Index.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.set_index(\"a\").index, name)()\n\n # MultiIndex functions\n missing_functions = inspect.getmembers(MissingPandasLikeMultiIndex, inspect.isfunction)\n unsupported_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"unsupported_function\"\n ]\n for name in unsupported_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"method.*Index.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.set_index([\"a\", \"b\"]).index, name)()\n\n deprecated_functions = [\n name for (name, type_) in missing_functions if type_.__name__ == \"deprecated_function\"\n ]\n for name in deprecated_functions:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"method.*Index.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.set_index([\"a\", \"b\"]).index, name)()\n\n # Index properties\n missing_properties = inspect.getmembers(\n MissingPandasLikeIndex, lambda o: isinstance(o, property)\n )\n unsupported_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"unsupported_property\"\n ]\n for name in unsupported_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"property.*Index.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.set_index(\"a\").index, name)\n\n deprecated_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"deprecated_property\"\n ]\n for name in deprecated_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"property.*Index.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.set_index(\"a\").index, name)\n\n # MultiIndex properties\n missing_properties = inspect.getmembers(\n MissingPandasLikeMultiIndex, lambda o: isinstance(o, property)\n )\n unsupported_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"unsupported_property\"\n ]\n for name in unsupported_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError,\n \"property.*Index.*{}.*not implemented( yet\\\\.|\\\\. .+)\".format(name),\n ):\n getattr(kdf.set_index([\"a\", \"b\"]).index, name)\n\n deprecated_properties = [\n name\n for (name, type_) in missing_properties\n if type_.fget.__name__ == \"deprecated_property\"\n ]\n for name in deprecated_properties:\n with self.assertRaisesRegex(\n PandasNotImplementedError, \"property.*Index.*{}.*is deprecated\".format(name)\n ):\n getattr(kdf.set_index([\"a\", \"b\"]).index, name)\n\n def test_index_has_duplicates(self):\n indexes = [(\"a\", \"b\", \"c\"), (\"a\", \"a\", \"c\"), (1, 3, 3), (1, 2, 3)]\n names = [None, \"ks\", \"ks\", None]\n has_dup = [False, True, True, False]\n\n for idx, name, expected in zip(indexes, names, has_dup):\n pdf = pd.DataFrame({\"a\": [1, 2, 3]}, index=pd.Index(idx, name=name))\n kdf = ks.from_pandas(pdf)\n\n self.assertEqual(kdf.index.has_duplicates, expected)\n\n def test_multiindex_has_duplicates(self):\n indexes = [\n [list(\"abc\"), list(\"edf\")],\n [list(\"aac\"), list(\"edf\")],\n [list(\"aac\"), list(\"eef\")],\n [[1, 4, 4], [4, 6, 6]],\n ]\n has_dup = [False, False, True, True]\n\n for idx, expected in zip(indexes, has_dup):\n pdf = pd.DataFrame({\"a\": [1, 2, 3]}, index=idx)\n kdf = ks.from_pandas(pdf)\n\n self.assertEqual(kdf.index.has_duplicates, expected)\n\n def test_multi_index_not_supported(self):\n kdf = ks.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]})\n\n with self.assertRaisesRegex(TypeError, \"cannot perform any with this index type\"):\n kdf.set_index([\"a\", \"b\"]).index.any()\n\n with self.assertRaisesRegex(TypeError, \"cannot perform all with this index type\"):\n kdf.set_index([\"a\", \"b\"]).index.all()\n\n def test_index_nlevels(self):\n pdf = pd.DataFrame({\"a\": [1, 2, 3]}, index=pd.Index([\"a\", \"b\", \"c\"]))\n kdf = ks.from_pandas(pdf)\n\n self.assertEqual(kdf.index.nlevels, 1)\n\n def test_multiindex_nlevel(self):\n pdf = pd.DataFrame({\"a\": [1, 2, 3]}, index=[list(\"abc\"), list(\"def\")])\n kdf = ks.from_pandas(pdf)\n\n self.assertEqual(kdf.index.nlevels, 2)\n\n def test_multiindex_from_arrays(self):\n arrays = [[\"a\", \"a\", \"b\", \"b\"], [\"red\", \"blue\", \"red\", \"blue\"]]\n pidx = pd.MultiIndex.from_arrays(arrays)\n kidx = ks.MultiIndex.from_arrays(arrays)\n\n self.assert_eq(pidx, kidx)\n\n def test_multiindex_swaplevel(self):\n pidx = pd.MultiIndex.from_arrays([[\"a\", \"b\"], [1, 2]])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))\n\n pidx = pd.MultiIndex.from_arrays([[\"a\", \"b\"], [1, 2]], names=[\"word\", \"number\"])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))\n\n pidx = pd.MultiIndex.from_arrays([[\"a\", \"b\"], [1, 2]], names=[\"word\", None])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.swaplevel(-2, -1), kidx.swaplevel(-2, -1))\n self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))\n self.assert_eq(pidx.swaplevel(\"word\", 1), kidx.swaplevel(\"word\", 1))\n\n with self.assertRaisesRegex(IndexError, \"Too many levels: Index\"):\n kidx.swaplevel(-3, \"word\")\n with self.assertRaisesRegex(IndexError, \"Too many levels: Index\"):\n kidx.swaplevel(0, 2)\n with self.assertRaisesRegex(IndexError, \"Too many levels: Index\"):\n kidx.swaplevel(0, -3)\n with self.assertRaisesRegex(KeyError, \"Level work not found\"):\n kidx.swaplevel(0, \"work\")\n\n def test_multiindex_droplevel(self):\n pidx = pd.MultiIndex.from_tuples(\n [(\"a\", \"x\", 1), (\"b\", \"y\", 2)], names=[\"level1\", \"level2\", \"level3\"]\n )\n kidx = ks.from_pandas(pidx)\n with self.assertRaisesRegex(IndexError, \"Too many levels: Index has only 3 levels, not 5\"):\n kidx.droplevel(4)\n\n with self.assertRaisesRegex(KeyError, \"Level level4 not found\"):\n kidx.droplevel(\"level4\")\n\n with self.assertRaisesRegex(KeyError, \"Level.*level3.*level4.*not found\"):\n kidx.droplevel([(\"level3\", \"level4\")])\n\n with self.assertRaisesRegex(\n ValueError,\n \"Cannot remove 4 levels from an index with 3 levels: at least one \"\n \"level must be left.\",\n ):\n kidx.droplevel([0, 0, 1, 2])\n\n with self.assertRaisesRegex(\n ValueError,\n \"Cannot remove 3 levels from an index with 3 levels: at least one \"\n \"level must be left.\",\n ):\n kidx.droplevel([0, 1, 2])\n\n self.assert_eq(pidx.droplevel(0), kidx.droplevel(0))\n self.assert_eq(pidx.droplevel([0, 1]), kidx.droplevel([0, 1]))\n self.assert_eq(pidx.droplevel([0, \"level2\"]), kidx.droplevel([0, \"level2\"]))\n\n def test_index_fillna(self):\n pidx = pd.Index([1, 2, None])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.fillna(0), kidx.fillna(0), almost=True)\n self.assert_eq(pidx.rename(\"name\").fillna(0), kidx.rename(\"name\").fillna(0), almost=True)\n\n with self.assertRaisesRegex(TypeError, \"Unsupported type <class 'list'>\"):\n kidx.fillna([1, 2])\n\n def test_index_drop(self):\n pidx = pd.Index([1, 2, 3])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.drop(1), kidx.drop(1))\n self.assert_eq(pidx.drop([1, 2]), kidx.drop([1, 2]))\n\n def test_multiindex_drop(self):\n pidx = pd.MultiIndex.from_tuples(\n [(\"a\", \"x\"), (\"b\", \"y\"), (\"c\", \"z\")], names=[\"level1\", \"level2\"]\n )\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.drop(\"a\"), kidx.drop(\"a\"))\n self.assert_eq(pidx.drop([\"a\", \"b\"]), kidx.drop([\"a\", \"b\"]))\n self.assert_eq(pidx.drop([\"x\", \"y\"], level=1), kidx.drop([\"x\", \"y\"], level=1))\n self.assert_eq(pidx.drop([\"x\", \"y\"], level=\"level2\"), kidx.drop([\"x\", \"y\"], level=\"level2\"))\n\n pidx.names = [\"lv1\", \"lv2\"]\n kidx.names = [\"lv1\", \"lv2\"]\n self.assert_eq(pidx.drop([\"x\", \"y\"], level=\"lv2\"), kidx.drop([\"x\", \"y\"], level=\"lv2\"))\n\n self.assertRaises(IndexError, lambda: kidx.drop([\"a\", \"b\"], level=2))\n self.assertRaises(KeyError, lambda: kidx.drop([\"a\", \"b\"], level=\"level\"))\n\n kidx.names = [\"lv\", \"lv\"]\n self.assertRaises(ValueError, lambda: kidx.drop([\"x\", \"y\"], level=\"lv\"))\n\n def test_sort_values(self):\n pidx = pd.Index([-10, -100, 200, 100])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.sort_values(), kidx.sort_values())\n self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))\n\n pidx.name = \"koalas\"\n kidx.name = \"koalas\"\n\n self.assert_eq(pidx.sort_values(), kidx.sort_values())\n self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))\n\n pidx = pd.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n kidx = ks.from_pandas(pidx)\n\n pidx.names = [\"hello\", \"koalas\", \"goodbye\"]\n kidx.names = [\"hello\", \"koalas\", \"goodbye\"]\n\n self.assert_eq(pidx.sort_values(), kidx.sort_values())\n self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))\n\n def test_index_drop_duplicates(self):\n pidx = pd.Index([1, 1, 2])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())\n\n pidx = pd.MultiIndex.from_tuples([(1, 1), (1, 1), (2, 2)], names=[\"level1\", \"level2\"])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())\n\n def test_index_sort(self):\n idx = ks.Index([1, 2, 3, 4, 5])\n midx = ks.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2)])\n\n with self.assertRaisesRegex(\n TypeError, \"cannot sort an Index object in-place, use sort_values instead\"\n ):\n idx.sort()\n with self.assertRaisesRegex(\n TypeError, \"cannot sort an Index object in-place, use sort_values instead\"\n ):\n midx.sort()\n\n def test_multiindex_isna(self):\n kidx = ks.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n\n with self.assertRaisesRegex(NotImplementedError, \"isna is not defined for MultiIndex\"):\n kidx.isna()\n\n with self.assertRaisesRegex(NotImplementedError, \"isna is not defined for MultiIndex\"):\n kidx.isnull()\n\n with self.assertRaisesRegex(NotImplementedError, \"notna is not defined for MultiIndex\"):\n kidx.notna()\n\n with self.assertRaisesRegex(NotImplementedError, \"notna is not defined for MultiIndex\"):\n kidx.notnull()\n\n def test_index_nunique(self):\n pidx = pd.Index([1, 1, 2, None])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.nunique(), kidx.nunique())\n self.assert_eq(pidx.nunique(dropna=True), kidx.nunique(dropna=True))\n\n def test_multiindex_nunique(self):\n kidx = ks.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n with self.assertRaisesRegex(NotImplementedError, \"notna is not defined for MultiIndex\"):\n kidx.notnull()\n\n def test_multiindex_rename(self):\n pidx = pd.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n kidx = ks.from_pandas(pidx)\n\n pidx = pidx.rename(list(\"ABC\"))\n kidx = kidx.rename(list(\"ABC\"))\n self.assert_eq(pidx, kidx)\n\n pidx = pidx.rename([\"my\", \"name\", \"is\"])\n kidx = kidx.rename([\"my\", \"name\", \"is\"])\n self.assert_eq(pidx, kidx)\n\n def test_multiindex_set_names(self):\n pidx = pd.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n kidx = ks.from_pandas(pidx)\n\n pidx = pidx.set_names([\"set\", \"new\", \"names\"])\n kidx = kidx.set_names([\"set\", \"new\", \"names\"])\n self.assert_eq(pidx, kidx)\n\n pidx.set_names([\"set\", \"new\", \"names\"], inplace=True)\n kidx.set_names([\"set\", \"new\", \"names\"], inplace=True)\n self.assert_eq(pidx, kidx)\n\n pidx = pidx.set_names(\"first\", level=0)\n kidx = kidx.set_names(\"first\", level=0)\n self.assert_eq(pidx, kidx)\n\n pidx = pidx.set_names(\"second\", level=1)\n kidx = kidx.set_names(\"second\", level=1)\n self.assert_eq(pidx, kidx)\n\n pidx = pidx.set_names(\"third\", level=2)\n kidx = kidx.set_names(\"third\", level=2)\n self.assert_eq(pidx, kidx)\n\n pidx.set_names(\"first\", level=0, inplace=True)\n kidx.set_names(\"first\", level=0, inplace=True)\n self.assert_eq(pidx, kidx)\n\n pidx.set_names(\"second\", level=1, inplace=True)\n kidx.set_names(\"second\", level=1, inplace=True)\n self.assert_eq(pidx, kidx)\n\n pidx.set_names(\"third\", level=2, inplace=True)\n kidx.set_names(\"third\", level=2, inplace=True)\n self.assert_eq(pidx, kidx)\n\n def test_multiindex_from_tuples(self):\n tuples = [(1, \"red\"), (1, \"blue\"), (2, \"red\"), (2, \"blue\")]\n pidx = pd.MultiIndex.from_tuples(tuples)\n kidx = ks.MultiIndex.from_tuples(tuples)\n\n self.assert_eq(pidx, kidx)\n\n def test_multiindex_from_product(self):\n iterables = [[0, 1, 2], [\"green\", \"purple\"]]\n pidx = pd.MultiIndex.from_product(iterables)\n kidx = ks.MultiIndex.from_product(iterables)\n\n self.assert_eq(pidx, kidx)\n\n def test_multiindex_tuple_column_name(self):\n column_labels = pd.MultiIndex.from_tuples([(\"a\", \"x\"), (\"a\", \"y\"), (\"b\", \"z\")])\n pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=column_labels)\n pdf.set_index((\"a\", \"x\"), append=True, inplace=True)\n kdf = ks.from_pandas(pdf)\n self.assert_eq(pdf, kdf)\n\n def test_len(self):\n pidx = pd.Index(range(10000))\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(len(pidx), len(kidx))\n\n pidx = pd.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n kidx = ks.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n\n self.assert_eq(len(pidx), len(kidx))\n\n def test_delete(self):\n pidx = pd.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])\n kidx = ks.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])\n\n self.assert_eq(pidx.delete(5).sort_values(), kidx.delete(5).sort_values())\n self.assert_eq(pidx.delete(-5).sort_values(), kidx.delete(-5).sort_values())\n self.assert_eq(pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values())\n self.assert_eq(\n pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()\n )\n\n with self.assertRaisesRegex(IndexError, \"index 10 is out of bounds for axis 0 with size 9\"):\n kidx.delete(10)\n\n pidx = pd.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n kidx = ks.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n\n self.assert_eq(pidx.delete(1).sort_values(), kidx.delete(1).sort_values())\n self.assert_eq(pidx.delete(-1).sort_values(), kidx.delete(-1).sort_values())\n self.assert_eq(pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values())\n self.assert_eq(\n pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()\n )\n\n def test_append(self):\n # Index\n pidx = pd.Index(range(10000))\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.append(pidx), kidx.append(kidx))\n\n # Index with name\n pidx1 = pd.Index(range(10000), name=\"a\")\n pidx2 = pd.Index(range(10000), name=\"b\")\n kidx1 = ks.from_pandas(pidx1)\n kidx2 = ks.from_pandas(pidx2)\n\n self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))\n\n self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))\n\n # Index from DataFrame\n pdf1 = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[\"a\", \"b\", \"c\"])\n pdf2 = pd.DataFrame({\"a\": [7, 8, 9], \"d\": [10, 11, 12]}, index=[\"x\", \"y\", \"z\"])\n kdf1 = ks.from_pandas(pdf1)\n kdf2 = ks.from_pandas(pdf2)\n\n pidx1 = pdf1.set_index(\"a\").index\n pidx2 = pdf2.set_index(\"d\").index\n kidx1 = kdf1.set_index(\"a\").index\n kidx2 = kdf2.set_index(\"d\").index\n\n self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))\n\n self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))\n\n # Index from DataFrame with MultiIndex columns\n pdf1 = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n pdf2 = pd.DataFrame({\"a\": [7, 8, 9], \"d\": [10, 11, 12]})\n pdf1.columns = pd.MultiIndex.from_tuples([(\"a\", \"x\"), (\"b\", \"y\")])\n pdf2.columns = pd.MultiIndex.from_tuples([(\"a\", \"x\"), (\"d\", \"y\")])\n kdf1 = ks.from_pandas(pdf1)\n kdf2 = ks.from_pandas(pdf2)\n\n pidx1 = pdf1.set_index((\"a\", \"x\")).index\n pidx2 = pdf2.set_index((\"d\", \"y\")).index\n kidx1 = kdf1.set_index((\"a\", \"x\")).index\n kidx2 = kdf2.set_index((\"d\", \"y\")).index\n\n self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))\n\n self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n kmidx = ks.from_pandas(pmidx)\n\n self.assert_eq(pmidx.append(pmidx), kmidx.append(kmidx))\n\n # MultiIndex with names\n pmidx1 = pd.MultiIndex.from_tuples(\n [(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)], names=[\"x\", \"y\", \"z\"]\n )\n pmidx2 = pd.MultiIndex.from_tuples(\n [(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)], names=[\"p\", \"q\", \"r\"]\n )\n kmidx1 = ks.from_pandas(pmidx1)\n kmidx2 = ks.from_pandas(pmidx2)\n\n self.assert_eq(pmidx1.append(pmidx2), kmidx1.append(kmidx2))\n\n self.assert_eq(pmidx2.append(pmidx1), kmidx2.append(kmidx1))\n\n self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)\n\n self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)\n\n # Index & MultiIndex currently is not supported\n expected_error_message = r\"append\\(\\) between Index & MultiIndex currently is not supported\"\n with self.assertRaisesRegex(NotImplementedError, expected_error_message):\n kidx.append(kmidx)\n with self.assertRaisesRegex(NotImplementedError, expected_error_message):\n kmidx.append(kidx)\n\n def test_argmin(self):\n pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.argmin(), kidx.argmin())\n\n # MultiIndex\n kidx = ks.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n with self.assertRaisesRegex(\n TypeError, \"reduction operation 'argmin' not allowed for this dtype\"\n ):\n kidx.argmin()\n\n def test_argmax(self):\n pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.argmax(), kidx.argmax())\n\n # MultiIndex\n kidx = ks.MultiIndex.from_tuples([(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)])\n with self.assertRaisesRegex(\n TypeError, \"reduction operation 'argmax' not allowed for this dtype\"\n ):\n kidx.argmax()\n\n def test_monotonic(self):\n # test monotonic_increasing & monotonic_decreasing for MultiIndex.\n # Since the Behavior for null value was changed in pandas >= 1.0.0,\n # several cases are tested differently.\n datas = []\n\n # increasing / decreasing ordered each index level with string\n datas.append([(\"w\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\"), (\"z\", \"d\")])\n datas.append([(\"w\", \"d\"), (\"x\", \"c\"), (\"y\", \"b\"), (\"z\", \"a\")])\n datas.append([(\"z\", \"a\"), (\"y\", \"b\"), (\"x\", \"c\"), (\"w\", \"d\")])\n datas.append([(\"z\", \"d\"), (\"y\", \"c\"), (\"x\", \"b\"), (\"w\", \"a\")])\n # mixed order each index level with string\n datas.append([(\"z\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\"), (\"w\", \"d\")])\n datas.append([(\"z\", \"a\"), (\"y\", \"c\"), (\"x\", \"b\"), (\"w\", \"d\")])\n\n # increasing / decreasing ordered each index level with integer\n datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (5, 500)])\n datas.append([(1, 500), (2, 400), (3, 300), (4, 200), (5, 100)])\n datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, 500)])\n datas.append([(5, 500), (4, 400), (3, 300), (2, 200), (1, 100)])\n # mixed order each index level with integer\n datas.append([(1, 500), (3, 400), (2, 300), (4, 200), (5, 100)])\n datas.append([(1, 100), (2, 300), (3, 200), (4, 400), (5, 500)])\n\n # integer / negative mixed tests\n datas.append([(\"a\", -500), (\"b\", -400), (\"c\", -300), (\"d\", -200), (\"e\", -100)])\n datas.append([(\"e\", -500), (\"d\", -400), (\"c\", -300), (\"b\", -200), (\"a\", -100)])\n datas.append([(-5, \"a\"), (-4, \"b\"), (-3, \"c\"), (-2, \"d\"), (-1, \"e\")])\n datas.append([(-5, \"e\"), (-4, \"d\"), (-3, \"c\"), (-2, \"b\"), (-1, \"a\")])\n datas.append([(-5, \"e\"), (-3, \"d\"), (-2, \"c\"), (-4, \"b\"), (-1, \"a\")])\n datas.append([(-5, \"e\"), (-4, \"c\"), (-3, \"b\"), (-2, \"d\"), (-1, \"a\")])\n\n # None type tests (None type is treated as the smallest value)\n datas.append([(1, 100), (2, 200), (None, 300), (4, 400), (5, 500)])\n datas.append([(5, None), (4, 200), (3, 300), (2, 400), (1, 500)])\n datas.append([(5, 100), (4, 200), (3, None), (2, 400), (1, 500)])\n datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, None)])\n datas.append([(1, 100), (2, 200), (None, None), (4, 400), (5, 500)])\n # The datas below cannot be an arguments for `MultiIndex.from_tuples` in pandas >= 1.1.0.\n # Refer https://github.com/databricks/koalas/pull/1688#issuecomment-667156560 for detail.\n if LooseVersion(pd.__version__) < LooseVersion(\"1.1.0\"):\n datas.append([(-5, None), (-4, None), (-3, None), (-2, None), (-1, None)])\n datas.append([(None, \"e\"), (None, \"c\"), (None, \"b\"), (None, \"d\"), (None, \"a\")])\n datas.append([(None, None), (None, None), (None, None), (None, None), (None, None)])\n\n # duplicated index value tests\n datas.append([(\"x\", \"d\"), (\"y\", \"c\"), (\"y\", \"b\"), (\"z\", \"a\")])\n datas.append([(\"x\", \"d\"), (\"y\", \"b\"), (\"y\", \"c\"), (\"z\", \"a\")])\n datas.append([(\"x\", \"d\"), (\"y\", \"c\"), (\"y\", None), (\"z\", \"a\")])\n datas.append([(\"x\", \"d\"), (\"y\", None), (\"y\", None), (\"z\", \"a\")])\n datas.append([(\"x\", \"d\"), (\"y\", \"c\"), (\"y\", \"b\"), (None, \"a\")])\n datas.append([(\"x\", \"d\"), (\"y\", \"b\"), (\"y\", \"c\"), (None, \"a\")])\n\n # more depth tests\n datas.append([(\"x\", \"d\", \"o\"), (\"y\", \"c\", \"p\"), (\"y\", \"c\", \"q\"), (\"z\", \"a\", \"r\")])\n datas.append([(\"x\", \"d\", \"o\"), (\"y\", \"c\", \"q\"), (\"y\", \"c\", \"p\"), (\"z\", \"a\", \"r\")])\n datas.append([(\"x\", \"d\", \"o\"), (\"y\", \"c\", \"p\"), (\"y\", \"c\", None), (\"z\", \"a\", \"r\")])\n datas.append([(\"x\", \"d\", \"o\"), (\"y\", \"c\", None), (\"y\", \"c\", None), (\"z\", \"a\", \"r\")])\n\n for data in datas:\n with self.subTest(data=data):\n pmidx = pd.MultiIndex.from_tuples(data)\n kmidx = ks.from_pandas(pmidx)\n self.assert_eq(kmidx.is_monotonic_increasing, pmidx.is_monotonic_increasing)\n self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)\n\n # The datas below are showing different result depends on pandas version.\n # Because the behavior of handling null values is changed in pandas >= 1.0.0.\n datas = []\n datas.append([(None, 100), (2, 200), (3, 300), (4, 400), (5, 500)])\n datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, 500)])\n datas.append([(None, None), (2, 200), (3, 300), (4, 400), (5, 500)])\n datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, None)])\n datas.append([(\"x\", \"d\"), (\"y\", None), (\"y\", \"c\"), (\"z\", \"a\")])\n datas.append([(\"x\", \"d\", \"o\"), (\"y\", \"c\", None), (\"y\", \"c\", \"q\"), (\"z\", \"a\", \"r\")])\n\n for data in datas:\n with self.subTest(data=data):\n pmidx = pd.MultiIndex.from_tuples(data)\n kmidx = ks.from_pandas(pmidx)\n expected_increasing_result = pmidx.is_monotonic_increasing\n if LooseVersion(pd.__version__) < LooseVersion(\"1.0.0\"):\n expected_increasing_result = not expected_increasing_result\n self.assert_eq(kmidx.is_monotonic_increasing, expected_increasing_result)\n self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)\n\n def test_difference(self):\n # Index\n pidx1 = pd.Index([1, 2, 3, 4], name=\"koalas\")\n pidx2 = pd.Index([3, 4, 5, 6], name=\"koalas\")\n kidx1 = ks.from_pandas(pidx1)\n kidx2 = ks.from_pandas(pidx2)\n\n self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())\n self.assert_eq(\n kidx1.difference([3, 4, 5, 6]).sort_values(),\n pidx1.difference([3, 4, 5, 6]).sort_values(),\n )\n self.assert_eq(\n kidx1.difference((3, 4, 5, 6)).sort_values(),\n pidx1.difference((3, 4, 5, 6)).sort_values(),\n )\n self.assert_eq(\n kidx1.difference({3, 4, 5, 6}).sort_values(),\n pidx1.difference({3, 4, 5, 6}).sort_values(),\n )\n self.assert_eq(\n kidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),\n pidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),\n )\n\n # Exceptions for Index\n with self.assertRaisesRegex(TypeError, \"Input must be Index or array-like\"):\n kidx1.difference(\"1234\")\n with self.assertRaisesRegex(TypeError, \"Input must be Index or array-like\"):\n kidx1.difference(1234)\n with self.assertRaisesRegex(TypeError, \"Input must be Index or array-like\"):\n kidx1.difference(12.34)\n with self.assertRaisesRegex(TypeError, \"Input must be Index or array-like\"):\n kidx1.difference(None)\n with self.assertRaisesRegex(TypeError, \"Input must be Index or array-like\"):\n kidx1.difference(np.nan)\n with self.assertRaisesRegex(\n ValueError, \"The 'sort' keyword only takes the values of None or True; 1 was passed.\"\n ):\n kidx1.difference(kidx2, sort=1)\n\n # MultiIndex\n pidx1 = pd.MultiIndex.from_tuples(\n [(\"a\", \"x\", 1), (\"b\", \"y\", 2), (\"c\", \"z\", 3)], names=[\"hello\", \"koalas\", \"world\"]\n )\n pidx2 = pd.MultiIndex.from_tuples(\n [(\"a\", \"x\", 1), (\"b\", \"z\", 2), (\"k\", \"z\", 3)], names=[\"hello\", \"koalas\", \"world\"]\n )\n kidx1 = ks.from_pandas(pidx1)\n kidx2 = ks.from_pandas(pidx2)\n\n self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())\n self.assert_eq(\n kidx1.difference({(\"a\", \"x\", 1)}).sort_values(),\n pidx1.difference({(\"a\", \"x\", 1)}).sort_values(),\n )\n self.assert_eq(\n kidx1.difference({(\"a\", \"x\", 1): [1, 2, 3]}).sort_values(),\n pidx1.difference({(\"a\", \"x\", 1): [1, 2, 3]}).sort_values(),\n )\n\n # Exceptions for MultiIndex\n with self.assertRaisesRegex(TypeError, \"other must be a MultiIndex or a list of tuples\"):\n kidx1.difference([\"b\", \"z\", \"2\"])\n\n def test_repeat(self):\n pidx = pd.Index([\"a\", \"b\", \"c\"])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(kidx.repeat(3).sort_values(), pidx.repeat(3).sort_values())\n self.assert_eq(kidx.repeat(0).sort_values(), pidx.repeat(0).sort_values())\n self.assert_eq((kidx + \"x\").repeat(3).sort_values(), (pidx + \"x\").repeat(3).sort_values())\n\n self.assertRaises(ValueError, lambda: kidx.repeat(-1))\n self.assertRaises(ValueError, lambda: kidx.repeat(\"abc\"))\n\n pmidx = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")])\n kmidx = ks.from_pandas(pmidx)\n\n self.assert_eq(kmidx.repeat(3).sort_values(), pmidx.repeat(3).sort_values())\n self.assert_eq(kmidx.repeat(0).sort_values(), pmidx.repeat(0).sort_values(), almost=True)\n\n self.assertRaises(ValueError, lambda: kmidx.repeat(-1))\n self.assertRaises(ValueError, lambda: kmidx.repeat(\"abc\"))\n\n def test_unique(self):\n pidx = pd.Index([\"a\", \"b\", \"a\"])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values())\n self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values())\n\n pmidx = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"a\")])\n kmidx = ks.from_pandas(pmidx)\n\n self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values())\n self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values())\n\n def test_asof(self):\n # Increasing values\n pidx = pd.Index([\"2013-12-31\", \"2014-01-02\", \"2014-01-03\"])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(kidx.asof(\"2014-01-01\"), pidx.asof(\"2014-01-01\"))\n self.assert_eq(kidx.asof(\"2014-01-02\"), pidx.asof(\"2014-01-02\"))\n self.assert_eq(repr(kidx.asof(\"1999-01-02\")), repr(pidx.asof(\"1999-01-02\")))\n\n # Decreasing values\n pidx = pd.Index([\"2014-01-03\", \"2014-01-02\", \"2013-12-31\"])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(kidx.asof(\"2014-01-01\"), pidx.asof(\"2014-01-01\"))\n self.assert_eq(kidx.asof(\"2014-01-02\"), pidx.asof(\"2014-01-02\"))\n self.assert_eq(kidx.asof(\"1999-01-02\"), pidx.asof(\"1999-01-02\"))\n self.assert_eq(repr(kidx.asof(\"2015-01-02\")), repr(pidx.asof(\"2015-01-02\")))\n\n # Not increasing, neither decreasing (ValueError)\n kidx = ks.Index([\"2013-12-31\", \"2015-01-02\", \"2014-01-03\"])\n self.assertRaises(ValueError, lambda: kidx.asof(\"2013-12-31\"))\n\n kmidx = ks.MultiIndex.from_tuples([(\"a\", \"a\"), (\"a\", \"b\"), (\"a\", \"c\")])\n self.assertRaises(NotImplementedError, lambda: kmidx.asof((\"a\", \"b\")))\n\n def test_union(self):\n # Index\n pidx1 = pd.Index([1, 2, 3, 4])\n pidx2 = pd.Index([3, 4, 5, 6])\n kidx1 = ks.from_pandas(pidx1)\n kidx2 = ks.from_pandas(pidx2)\n\n self.assert_eq(kidx1.union(kidx2), pidx1.union(pidx2))\n self.assert_eq(kidx2.union(kidx1), pidx2.union(pidx1))\n self.assert_eq(kidx1.union([3, 4, 5, 6]), pidx1.union([3, 4, 5, 6]), almost=True)\n self.assert_eq(kidx2.union([1, 2, 3, 4]), pidx2.union([1, 2, 3, 4]), almost=True)\n self.assert_eq(\n kidx1.union(ks.Series([3, 4, 5, 6])), pidx1.union(pd.Series([3, 4, 5, 6])), almost=True\n )\n self.assert_eq(\n kidx2.union(ks.Series([1, 2, 3, 4])), pidx2.union(pd.Series([1, 2, 3, 4])), almost=True\n )\n\n # Testing if the result is correct after sort=False.\n # The `sort` argument is added in pandas 0.24.\n if LooseVersion(pd.__version__) >= LooseVersion(\"0.24\"):\n self.assert_eq(\n kidx1.union(kidx2, sort=False).sort_values(),\n pidx1.union(pidx2, sort=False).sort_values(),\n )\n self.assert_eq(\n kidx2.union(kidx1, sort=False).sort_values(),\n pidx2.union(pidx1, sort=False).sort_values(),\n )\n self.assert_eq(\n kidx1.union([3, 4, 5, 6], sort=False).sort_values(),\n pidx1.union([3, 4, 5, 6], sort=False).sort_values(),\n almost=True,\n )\n self.assert_eq(\n kidx2.union([1, 2, 3, 4], sort=False).sort_values(),\n pidx2.union([1, 2, 3, 4], sort=False).sort_values(),\n almost=True,\n )\n self.assert_eq(\n kidx1.union(ks.Series([3, 4, 5, 6]), sort=False).sort_values(),\n pidx1.union(pd.Series([3, 4, 5, 6]), sort=False).sort_values(),\n almost=True,\n )\n self.assert_eq(\n kidx2.union(ks.Series([1, 2, 3, 4]), sort=False).sort_values(),\n pidx2.union(pd.Series([1, 2, 3, 4]), sort=False).sort_values(),\n almost=True,\n )\n\n # Duplicated values for Index is supported in pandas >= 1.0.0\n if LooseVersion(pd.__version__) >= LooseVersion(\"1.0.0\"):\n pidx1 = pd.Index([1, 2, 3, 4, 3, 4, 3, 4])\n pidx2 = pd.Index([3, 4, 3, 4, 5, 6])\n kidx1 = ks.from_pandas(pidx1)\n kidx2 = ks.from_pandas(pidx2)\n\n self.assert_eq(kidx1.union(kidx2), pidx1.union(pidx2))\n self.assert_eq(kidx2.union(kidx1), pidx2.union(pidx1))\n self.assert_eq(\n kidx1.union([3, 4, 3, 3, 5, 6]), pidx1.union([3, 4, 3, 4, 5, 6]), almost=True\n )\n self.assert_eq(\n kidx2.union([1, 2, 3, 4, 3, 4, 3, 4]),\n pidx2.union([1, 2, 3, 4, 3, 4, 3, 4]),\n almost=True,\n )\n self.assert_eq(\n kidx1.union(ks.Series([3, 4, 3, 3, 5, 6])),\n pidx1.union(pd.Series([3, 4, 3, 4, 5, 6])),\n almost=True,\n )\n self.assert_eq(\n kidx2.union(ks.Series([1, 2, 3, 4, 3, 4, 3, 4])),\n pidx2.union(pd.Series([1, 2, 3, 4, 3, 4, 3, 4])),\n almost=True,\n )\n\n # MultiIndex\n pmidx1 = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"a\"), (\"x\", \"b\")])\n pmidx2 = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\"), (\"x\", \"d\")])\n pmidx3 = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)])\n pmidx4 = pd.MultiIndex.from_tuples([(1, 3), (1, 4), (1, 5), (1, 6)])\n kmidx1 = ks.from_pandas(pmidx1)\n kmidx2 = ks.from_pandas(pmidx2)\n kmidx3 = ks.from_pandas(pmidx3)\n kmidx4 = ks.from_pandas(pmidx4)\n\n self.assert_eq(kmidx1.union(kmidx2), pmidx1.union(pmidx2))\n self.assert_eq(kmidx2.union(kmidx1), pmidx2.union(pmidx1))\n self.assert_eq(kmidx3.union(kmidx4), pmidx3.union(pmidx4))\n self.assert_eq(kmidx4.union(kmidx3), pmidx4.union(pmidx3))\n self.assert_eq(\n kmidx1.union([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\"), (\"x\", \"d\")]),\n pmidx1.union([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\"), (\"x\", \"d\")]),\n )\n self.assert_eq(\n kmidx2.union([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"a\"), (\"x\", \"b\")]),\n pmidx2.union([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"a\"), (\"x\", \"b\")]),\n )\n self.assert_eq(\n kmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)]),\n pmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)]),\n )\n self.assert_eq(\n kmidx4.union([(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)]),\n pmidx4.union([(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)]),\n )\n\n # Testing if the result is correct after sort=False.\n # The `sort` argument is added in pandas 0.24.\n if LooseVersion(pd.__version__) >= LooseVersion(\"0.24\"):\n self.assert_eq(\n kmidx1.union(kmidx2, sort=False).sort_values(),\n pmidx1.union(pmidx2, sort=False).sort_values(),\n )\n self.assert_eq(\n kmidx2.union(kmidx1, sort=False).sort_values(),\n pmidx2.union(pmidx1, sort=False).sort_values(),\n )\n self.assert_eq(\n kmidx3.union(kmidx4, sort=False).sort_values(),\n pmidx3.union(pmidx4, sort=False).sort_values(),\n )\n self.assert_eq(\n kmidx4.union(kmidx3, sort=False).sort_values(),\n pmidx4.union(pmidx3, sort=False).sort_values(),\n )\n self.assert_eq(\n kmidx1.union(\n [(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\"), (\"x\", \"d\")], sort=False\n ).sort_values(),\n pmidx1.union(\n [(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\"), (\"x\", \"d\")], sort=False\n ).sort_values(),\n )\n self.assert_eq(\n kmidx2.union(\n [(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"a\"), (\"x\", \"b\")], sort=False\n ).sort_values(),\n pmidx2.union(\n [(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"a\"), (\"x\", \"b\")], sort=False\n ).sort_values(),\n )\n self.assert_eq(\n kmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)], sort=False).sort_values(),\n pmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)], sort=False).sort_values(),\n )\n self.assert_eq(\n kmidx4.union(\n [(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)], sort=False\n ).sort_values(),\n pmidx4.union(\n [(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)], sort=False\n ).sort_values(),\n )\n\n self.assertRaises(NotImplementedError, lambda: kidx1.union(kmidx1))\n self.assertRaises(TypeError, lambda: kmidx1.union(kidx1))\n self.assertRaises(TypeError, lambda: kmidx1.union([\"x\", \"a\"]))\n self.assertRaises(ValueError, lambda: kidx1.union(ks.range(2)))\n\n def test_take(self):\n # Index\n pidx = pd.Index([100, 200, 300, 400, 500], name=\"Koalas\")\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(kidx.take([0, 2, 4]).sort_values(), pidx.take([0, 2, 4]).sort_values())\n self.assert_eq(\n kidx.take(range(0, 5, 2)).sort_values(), pidx.take(range(0, 5, 2)).sort_values()\n )\n self.assert_eq(kidx.take([-4, -2, 0]).sort_values(), pidx.take([-4, -2, 0]).sort_values())\n self.assert_eq(\n kidx.take(range(-4, 1, 2)).sort_values(), pidx.take(range(-4, 1, 2)).sort_values()\n )\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples(\n [(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\")], names=[\"hello\", \"Koalas\"]\n )\n kmidx = ks.from_pandas(pmidx)\n\n self.assert_eq(kmidx.take([0, 2]).sort_values(), pmidx.take([0, 2]).sort_values())\n self.assert_eq(\n kmidx.take(range(0, 4, 2)).sort_values(), pmidx.take(range(0, 4, 2)).sort_values()\n )\n self.assert_eq(kmidx.take([-2, 0]).sort_values(), pmidx.take([-2, 0]).sort_values())\n self.assert_eq(\n kmidx.take(range(-2, 1, 2)).sort_values(), pmidx.take(range(-2, 1, 2)).sort_values()\n )\n\n # Checking the type of indices.\n self.assertRaises(ValueError, lambda: kidx.take(1))\n self.assertRaises(ValueError, lambda: kidx.take(\"1\"))\n self.assertRaises(ValueError, lambda: kidx.take({1, 2}))\n self.assertRaises(ValueError, lambda: kidx.take({1: None, 2: None}))\n self.assertRaises(ValueError, lambda: kmidx.take(1))\n self.assertRaises(ValueError, lambda: kmidx.take(\"1\"))\n self.assertRaises(ValueError, lambda: kmidx.take({1, 2}))\n self.assertRaises(ValueError, lambda: kmidx.take({1: None, 2: None}))\n\n def test_index_get_level_values(self):\n pidx = pd.Index([1, 2, 3], name=\"ks\")\n kidx = ks.from_pandas(pidx)\n\n for level in [0, \"ks\"]:\n self.assert_eq(kidx.get_level_values(level), pidx.get_level_values(level))\n\n def test_multiindex_get_level_values(self):\n pmidx = pd.MultiIndex.from_tuples([(\"a\", \"d\"), (\"b\", \"e\"), (\"c\", \"f\")])\n pmidx.names = [\"level_1\", \"level_2\"]\n kmidx = ks.from_pandas(pmidx)\n\n for level in [0, 1, \"level_1\", \"level_2\"]:\n self.assert_eq(kmidx.get_level_values(level), pmidx.get_level_values(level))\n\n def test_index_get_level_number(self):\n # name of two levels are the same, which is None\n kdf = ks.DataFrame({\"a\": [1, 2, 3]}, index=[list(\"aac\"), list(\"ddf\")])\n with self.assertRaisesRegex(\n ValueError, \"The name None occurs multiple times, use a level number\"\n ):\n kdf.index._get_level_number(None)\n\n mi = pd.MultiIndex.from_arrays((list(\"abc\"), list(\"def\")))\n mi.names = [\"level_1\", \"level_2\"]\n kdf = ks.DataFrame({\"a\": [1, 2, 3]}, index=mi)\n\n # level is not int and not in the level name list\n with self.assertRaisesRegex(KeyError, \"Level lv_3 not found\"):\n kdf.index._get_level_number(\"lv_3\")\n\n # level is int, but an invalid negative number\n with self.assertRaisesRegex(IndexError, \"Too many levels: Index has only\"):\n kdf.index._get_level_number(-3)\n\n # level is int, but an invalid positive number\n with self.assertRaisesRegex(IndexError, \"Too many levels: Index has only\"):\n kdf.index._get_level_number(3)\n\n # Correct and valid inputs in numbers\n level_number = [-2, -1, 0, 1]\n outputs = [0, 1, 0, 1]\n\n for lv, output in zip(level_number, outputs):\n self.assertEqual(output, kdf.index._get_level_number(lv))\n\n # Valid inputs as level names\n level_names = [\"level_1\", \"level_2\"]\n outputs = [0, 1]\n\n for lv, output in zip(level_names, outputs):\n self.assertEqual(output, kdf.index._get_level_number(lv))\n\n def test_holds_integer(self):\n pidx = pd.Index([1, 2, 3, 4])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.holds_integer(), kidx.holds_integer())\n\n pidx = pd.Index([1.1, 2.2, 3.3, 4.4])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.holds_integer(), kidx.holds_integer())\n\n pidx = pd.Index([\"A\", \"B\", \"C\", \"D\"])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.holds_integer(), kidx.holds_integer())\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"a\")])\n kmidx = ks.from_pandas(pmidx)\n self.assert_eq(pmidx.holds_integer(), kmidx.holds_integer())\n\n pmidx = pd.MultiIndex.from_tuples([(10, 1), (10, 2), (20, 1)])\n kmidx = ks.from_pandas(pmidx)\n self.assert_eq(pmidx.holds_integer(), kmidx.holds_integer())\n\n def test_abs(self):\n pidx = pd.Index([-2, -1, 0, 1])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(abs(pidx), abs(kidx))\n self.assert_eq(np.abs(pidx), np.abs(kidx))\n\n kidx = ks.MultiIndex.from_tuples([(1, 2)], names=[\"level1\", \"level2\"])\n with self.assertRaisesRegex(TypeError, \"perform __abs__ with this index\"):\n abs(kidx)\n\n def test_hasnans(self):\n # BooleanType\n pidx = pd.Index([True, False, True, True])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.hasnans, kidx.hasnans)\n\n pidx = pd.Index([True, False, np.nan, True])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.hasnans, kidx.hasnans)\n\n # TimestampType\n pser = pd.Series([pd.Timestamp(\"2020-07-30\") for _ in range(3)])\n kser = ks.from_pandas(pser)\n self.assert_eq(pser.hasnans, kser.hasnans)\n\n pser = pd.Series([pd.Timestamp(\"2020-07-30\"), np.nan, pd.Timestamp(\"2020-07-30\")])\n kser = ks.from_pandas(pser)\n self.assert_eq(pser.hasnans, kser.hasnans)\n\n def test_item(self):\n pidx = pd.Index([10])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.item(), kidx.item())\n\n # with timestamp\n pidx = pd.Index([datetime(1990, 3, 9)])\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.item(), kidx.item())\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples([(\"a\", \"x\")])\n kmidx = ks.from_pandas(pmidx)\n\n self.assert_eq(pmidx.item(), kmidx.item())\n\n # MultiIndex with timestamp\n pmidx = pd.MultiIndex.from_tuples([(datetime(1990, 3, 9), datetime(2019, 8, 15))])\n kmidx = ks.from_pandas(pmidx)\n\n self.assert_eq(pidx.item(), kidx.item())\n\n err_msg = \"can only convert an array of size 1 to a Python scalar\"\n with self.assertRaisesRegex(ValueError, err_msg):\n ks.Index([10, 20]).item()\n with self.assertRaisesRegex(ValueError, err_msg):\n ks.MultiIndex.from_tuples([(\"a\", \"x\"), (\"b\", \"y\")]).item()\n\n def test_inferred_type(self):\n # Integer\n pidx = pd.Index([1, 2, 3])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.inferred_type, kidx.inferred_type)\n\n # Floating\n pidx = pd.Index([1.0, 2.0, 3.0])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.inferred_type, kidx.inferred_type)\n\n # String\n pidx = pd.Index([\"a\", \"b\", \"c\"])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.inferred_type, kidx.inferred_type)\n\n # Boolean\n pidx = pd.Index([True, False, True, False])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.inferred_type, kidx.inferred_type)\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples([(\"a\", \"x\")])\n kmidx = ks.from_pandas(pmidx)\n self.assert_eq(pmidx.inferred_type, kmidx.inferred_type)\n\n def test_multi_index_from_index(self):\n tuples = [(1, \"red\"), (1, \"blue\"), (2, \"red\"), (2, \"blue\")]\n pmidx = pd.Index(tuples)\n kmidx = ks.Index(tuples)\n\n self.assertTrue(isinstance(kmidx, ks.MultiIndex))\n self.assert_eq(pmidx, kmidx)\n\n # Specify the `names`\n pmidx = pd.Index(tuples, names=[\"Hello\", \"Koalas\"])\n kmidx = ks.Index(tuples, names=[\"Hello\", \"Koalas\"])\n\n self.assertTrue(isinstance(kmidx, ks.MultiIndex))\n self.assert_eq(pmidx, kmidx)\n\n @unittest.skipIf(\n LooseVersion(pd.__version__) < LooseVersion(\"0.24\"),\n \"MultiIndex.from_frame is new in pandas 0.24\",\n )\n def test_multiindex_from_frame(self):\n pdf = pd.DataFrame(\n [[\"HI\", \"Temp\"], [\"HI\", \"Precip\"], [\"NJ\", \"Temp\"], [\"NJ\", \"Precip\"]], columns=[\"a\", \"b\"]\n )\n kdf = ks.from_pandas(pdf)\n pidx = pd.MultiIndex.from_frame(pdf)\n kidx = ks.MultiIndex.from_frame(kdf)\n\n self.assert_eq(pidx, kidx)\n\n # Specify `names`\n pidx = pd.MultiIndex.from_frame(pdf, names=[\"state\", \"observation\"])\n kidx = ks.MultiIndex.from_frame(kdf, names=[\"state\", \"observation\"])\n self.assert_eq(pidx, kidx)\n\n # MultiIndex columns\n pidx = pd.MultiIndex.from_tuples([(\"a\", \"w\"), (\"b\", \"x\")])\n pdf.columns = pidx\n kdf = ks.from_pandas(pdf)\n\n pidx = pd.MultiIndex.from_frame(pdf)\n kidx = ks.MultiIndex.from_frame(kdf)\n\n self.assert_eq(pidx, kidx)\n\n # tuples for names\n pidx = pd.MultiIndex.from_frame(pdf, names=[(\"a\", \"w\"), (\"b\", \"x\")])\n kidx = ks.MultiIndex.from_frame(kdf, names=[(\"a\", \"w\"), (\"b\", \"x\")])\n\n self.assert_eq(pidx, kidx)\n\n err_msg = \"Input must be a DataFrame\"\n with self.assertRaisesRegex(TypeError, err_msg):\n ks.MultiIndex.from_frame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n\n def test_is_type_compatible(self):\n data_types = [\"integer\", \"floating\", \"string\", \"boolean\"]\n # Integer\n pidx = pd.Index([1, 2, 3])\n kidx = ks.from_pandas(pidx)\n for data_type in data_types:\n self.assert_eq(pidx.is_type_compatible(data_type), kidx.is_type_compatible(data_type))\n\n # Floating\n pidx = pd.Index([1.0, 2.0, 3.0])\n kidx = ks.from_pandas(pidx)\n for data_type in data_types:\n self.assert_eq(pidx.is_type_compatible(data_type), kidx.is_type_compatible(data_type))\n\n # String\n pidx = pd.Index([\"a\", \"b\", \"c\"])\n kidx = ks.from_pandas(pidx)\n for data_type in data_types:\n self.assert_eq(pidx.is_type_compatible(data_type), kidx.is_type_compatible(data_type))\n\n # Boolean\n pidx = pd.Index([True, False, True, False])\n kidx = ks.from_pandas(pidx)\n for data_type in data_types:\n self.assert_eq(pidx.is_type_compatible(data_type), kidx.is_type_compatible(data_type))\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples([(\"a\", \"x\")])\n kmidx = ks.from_pandas(pmidx)\n for data_type in data_types:\n self.assert_eq(pmidx.is_type_compatible(data_type), kmidx.is_type_compatible(data_type))\n\n def test_asi8(self):\n # Integer\n pidx = pd.Index([1, 2, 3])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.asi8, kidx.asi8)\n self.assert_eq(pidx.astype(\"int\").asi8, kidx.astype(\"int\").asi8)\n self.assert_eq(pidx.astype(\"int16\").asi8, kidx.astype(\"int16\").asi8)\n self.assert_eq(pidx.astype(\"int8\").asi8, kidx.astype(\"int8\").asi8)\n\n # Integer with missing value\n pidx = pd.Index([1, 2, None, 4, 5])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.asi8, kidx.asi8)\n\n # Datetime\n pidx = pd.date_range(end=\"1/1/2018\", periods=3)\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.asi8, kidx.asi8)\n\n # Floating\n pidx = pd.Index([1.0, 2.0, 3.0])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.asi8, kidx.asi8)\n\n # String\n pidx = pd.Index([\"a\", \"b\", \"c\"])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.asi8, kidx.asi8)\n\n # Boolean\n pidx = pd.Index([True, False, True, False])\n kidx = ks.from_pandas(pidx)\n self.assert_eq(pidx.asi8, kidx.asi8)\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples([(1, 2)])\n kmidx = ks.from_pandas(pmidx)\n self.assert_eq(pmidx.asi8, kmidx.asi8)\n\n def test_index_is_unique(self):\n indexes = [(\"a\", \"b\", \"c\"), (\"a\", \"a\", \"c\"), (1, 3, 3), (1, 2, 3)]\n names = [None, \"ks\", \"ks\", None]\n is_uniq = [True, False, False, True]\n\n for idx, name, expected in zip(indexes, names, is_uniq):\n pdf = pd.DataFrame({\"a\": [1, 2, 3]}, index=pd.Index(idx, name=name))\n kdf = ks.from_pandas(pdf)\n\n self.assertEqual(kdf.index.is_unique, expected)\n\n def test_multiindex_is_unique(self):\n indexes = [\n [list(\"abc\"), list(\"edf\")],\n [list(\"aac\"), list(\"edf\")],\n [list(\"aac\"), list(\"eef\")],\n [[1, 4, 4], [4, 6, 6]],\n ]\n is_uniq = [True, True, False, False]\n\n for idx, expected in zip(indexes, is_uniq):\n pdf = pd.DataFrame({\"a\": [1, 2, 3]}, index=idx)\n kdf = ks.from_pandas(pdf)\n\n self.assertEqual(kdf.index.is_unique, expected)\n\n def test_view(self):\n pidx = pd.Index([1, 2, 3, 4], name=\"Koalas\")\n kidx = ks.from_pandas(pidx)\n\n self.assert_eq(pidx.view(), kidx.view())\n\n # MultiIndex\n pmidx = pd.MultiIndex.from_tuples([(\"a\", \"x\"), (\"b\", \"y\"), (\"c\", \"z\")])\n kmidx = ks.from_pandas(pmidx)\n\n self.assert_eq(pmidx.view(), kmidx.view())\n" ]
[ [ "pandas.MultiIndex.from_frame", "numpy.abs", "pandas.Series", "pandas.MultiIndex", "pandas.MultiIndex.from_tuples", "pandas.Index", "pandas.DataFrame", "pandas.MultiIndex.from_arrays", "numpy.random.randn", "pandas.MultiIndex.from_product", "pandas.date_range", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kshitij-Ambilduke/MedVQA
[ "e20f0d29638c5d05e3e0c385fe67a9bfeef0f921" ]
[ "mmbert/vqarad/train_vqarad.py" ]
[ "import argparse\nfrom utils_vqarad import seed_everything, Model, VQAMed, train_one_epoch, validate, test, load_data, LabelSmoothing\n# import wandb\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, Dataset\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torchvision import transforms, models\nfrom torch.cuda.amp import GradScaler\nfrom torchtoolbox.transform import Cutout\nimport albumentations as albu\nfrom albumentations.core.composition import OneOf\nfrom albumentations.pytorch.transforms import ToTensorV2\nimport os\nimport warnings\n\nwarnings.simplefilter(\"ignore\", UserWarning)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description = \"Finetune on VQARAD\")\n\n # parser.add_argument('--run_name', type = str, required = True, help = \"run name for wandb\")\n parser.add_argument('--data_dir', type = str, required = False, default = \"/home/roboticslab/Documents/MED-VQA/dataset/med-vqa-data/vqa_rad\", help = \"path for data\")\n parser.add_argument('--model_dir', type = str, required = False, default = \"/home/roboticslab/Documents/MED-VQA/github/Weights/roco_mlm/val_loss_3.pt\", help = \"path to load weights\")\n parser.add_argument('--save_dir', type = str, required = False, default = \"/home/roboticslab/Documents/MED-VQA/github/Weights/vqa-rad\", help = \"path to save weights\")\n parser.add_argument('--question_type', type = str, required = False, default = None, help = \"choose specific category if you want\")\n parser.add_argument('--use_pretrained', action = 'store_true', default = False, help = \"use pretrained weights or not\")\n parser.add_argument('--mixed_precision', action = 'store_true', default = False, help = \"use mixed precision or not\")\n parser.add_argument('--clip', action = 'store_true', default = False, help = \"clip the gradients or not\")\n\n parser.add_argument('--seed', type = int, required = False, default = 42, help = \"set seed for reproducibility\")\n parser.add_argument('--num_workers', type = int, required = False, default = 4, help = \"number of workers\")\n parser.add_argument('--epochs', type = int, required = False, default = 100, help = \"num epochs to train\")\n parser.add_argument('--train_pct', type = float, required = False, default = 1.0, help = \"fraction of train samples to select\")\n parser.add_argument('--valid_pct', type = float, required = False, default = 1.0, help = \"fraction of validation samples to select\")\n parser.add_argument('--test_pct', type = float, required = False, default = 1.0, help = \"fraction of test samples to select\")\n\n parser.add_argument('--max_position_embeddings', type = int, required = False, default = 28, help = \"max length of sequence\")\n parser.add_argument('--batch_size', type = int, required = False, default = 16, help = \"batch size\")\n parser.add_argument('--lr', type = float, required = False, default = 1e-4, help = \"learning rate'\")\n # parser.add_argument('--weight_decay', type = float, required = False, default = 1e-2, help = \" weight decay for gradients\")\n parser.add_argument('--factor', type = float, required = False, default = 0.1, help = \"factor for rlp\")\n parser.add_argument('--patience', type = int, required = False, default = 10, help = \"patience for rlp\")\n # parser.add_argument('--lr_min', type = float, required = False, default = 1e-6, help = \"minimum lr for Cosine Annealing\")\n parser.add_argument('--hidden_dropout_prob', type = float, required = False, default = 0.3, help = \"hidden dropout probability\")\n parser.add_argument('--smoothing', type = float, required = False, default = None, help = \"label smoothing\")\n\n parser.add_argument('--image_size', type = int, required = False, default = 224, help = \"image size\")\n parser.add_argument('--hidden_size', type = int, required = False, default = 768, help = \"hidden size\")\n parser.add_argument('--vocab_size', type = int, required = False, default = 30522, help = \"vocab size\")\n parser.add_argument('--type_vocab_size', type = int, required = False, default = 2, help = \"type vocab size\")\n parser.add_argument('--heads', type = int, required = False, default = 12, help = \"heads\")\n parser.add_argument('--n_layers', type = int, required = False, default = 4, help = \"num of layers\")\n\n\n args = parser.parse_args()\n\n # wandb.init(project='imageclef20vqa', name = args.run_name, config = args)\n\n seed_everything(args.seed)\n\n\n train_df, test_df = load_data(args)\n\n if args.question_type:\n \n train_df = train_df[train_df['question_type']==args.question_type].reset_index(drop=True)\n val_df = val_df[val_df['question_type']==args.question_type].reset_index(drop=True)\n test_df = test_df[test_df['question_type']==args.question_type].reset_index(drop=True)\n\n\n df = pd.concat([train_df, test_df]).reset_index(drop=True)\n df['answer'] = df['answer'].str.lower()\n ans2idx = {ans:idx for idx,ans in enumerate(df['answer'].unique())}\n idx2ans = {idx:ans for ans,idx in ans2idx.items()}\n df['answer'] = df['answer'].map(ans2idx).astype(int)\n # print(df[\"mode\"])\n train_df = df[df['mode']=='train'].reset_index(drop=True)\n val_df = df[df['mode']=='val'].reset_index(drop=True)\n test_df = df[df['mode']=='test'].reset_index(drop=True)\n\n num_classes = len(ans2idx)\n\n args.num_classes = num_classes\n\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n model = Model(args)\n\n def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n print('The model has {}: trainable parameters'.format(count_parameters(model)))\n \n\n if args.use_pretrained:\n model.load_state_dict(torch.load(args.model_dir))\n \n \n model.classifier[2] = nn.Linear(args.hidden_size, num_classes)\n \n model.to(device)\n\n # wandb.watch(model, log='all')\n\n optimizer = optim.Adam(model.parameters(),lr=args.lr)\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, patience = args.patience, factor = args.factor, verbose = True)\n\n if args.smoothing:\n criterion = LabelSmoothing(smoothing=args.smoothing)\n else:\n criterion = nn.CrossEntropyLoss()\n\n scaler = GradScaler()\n\n \n train_tfm = transforms.Compose([transforms.Resize((224, 224)),\n transforms.RandomResizedCrop(224,scale=(0.5,1.0),ratio=(0.75,1.333)),\n transforms.RandomRotation(10),\n # Cutout(),\n transforms.ColorJitter(brightness=0.4,contrast=0.4,saturation=0.4,hue=0.4),\n transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\n val_tfm = transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n test_tfm = transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n\n\n\n traindataset = VQAMed(train_df, imgsize = args.image_size, tfm = train_tfm, args = args)\n valdataset = VQAMed(val_df, imgsize = args.image_size, tfm = val_tfm, args = args)\n testdataset = VQAMed(test_df, imgsize = args.image_size, tfm = test_tfm, args = args)\n\n trainloader = DataLoader(traindataset, batch_size = args.batch_size, shuffle=True, num_workers = args.num_workers)\n valloader = DataLoader(valdataset, batch_size = args.batch_size, shuffle=False, num_workers = args.num_workers)\n testloader = DataLoader(testdataset, batch_size = args.batch_size, shuffle=False, num_workers = args.num_workers)\n\n val_best_acc = 0\n test_best_acc = 0\n best_loss = np.inf\n counter = 0\n\n for epoch in range(args.epochs):\n\n print('Epoch {}/{}'.format(epoch+1,args.epochs))\n\n\n train_loss, train_acc = train_one_epoch(trainloader, model, optimizer, criterion, device, scaler, args, train_df,idx2ans)\n val_loss, val_predictions, val_acc, val_bleu = validate(valloader, model, criterion, device, scaler, args, val_df,idx2ans)\n test_loss, test_predictions, test_acc = test(testloader, model, criterion, device, scaler, args, test_df,idx2ans)\n\n scheduler.step(train_loss)\n\n log_dict = test_acc\n\n for k,v in test_acc.items():\n log_dict[k] = v\n\n log_dict['train_loss'] = train_loss\n log_dict['test_loss'] = test_loss\n log_dict['learning_rate'] = optimizer.param_groups[0][\"lr\"]\n\n # wandb.log(log_dict)\n\n content = f'Learning rate: {(optimizer.param_groups[0][\"lr\"]):.7f}, Train loss: {(train_loss):.4f}, Train acc: {train_acc},Test loss: {(test_loss):.4f}, Test acc: {test_acc}'\n print(content)\n \n if test_acc['total_acc'] > test_best_acc:\n torch.save(model.state_dict(),os.path.join(args.save_dir, f'{args.run_name}_test_acc.pt'))\n test_best_acc=test_acc['total_acc']\n" ]
[ [ "torch.nn.CrossEntropyLoss", "pandas.concat", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.load", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.cuda.amp.GradScaler", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
atbrandao/ross_f
[ "6a1b03b0594802bc5b916095937fef4866a735fa" ]
[ "ross/tests/test_stochastic_elements.py" ]
[ "\"\"\"Tests file.\nTests for:\n st_shaft_element.py\n st_disk_element.py\n st_bearing_seal_element.py\n st_point_mass.py\n\"\"\"\nimport numpy as np\nimport plotly.graph_objects as go\nimport pytest\n\nfrom ross.stochastic.st_bearing_seal_element import ST_BearingElement\nfrom ross.stochastic.st_disk_element import ST_DiskElement\nfrom ross.stochastic.st_materials import ST_Material\nfrom ross.stochastic.st_point_mass import ST_PointMass\nfrom ross.stochastic.st_shaft_element import ST_ShaftElement\n\n\[email protected]\ndef rand_shaft():\n E = [209e9, 211e9]\n st_steel = ST_Material(name=\"Steel\", rho=7810, E=E, G_s=81.2e9)\n elm = ST_ShaftElement(\n L=[1.0, 1.1],\n idl=[0.01, 0.02],\n odl=[0.1, 0.2],\n material=st_steel,\n is_random=[\"L\", \"idl\", \"odl\", \"material\"],\n )\n return elm\n\n\[email protected]\ndef rand_disk_from_inertia():\n elm = ST_DiskElement(\n n=1, m=[30, 40], Id=[0.2, 0.3], Ip=[0.5, 0.7], is_random=[\"m\", \"Id\", \"Ip\"],\n )\n return elm\n\n\[email protected]\ndef rand_disk_from_geometry():\n E = [209e9, 211e9]\n st_steel = ST_Material(name=\"Steel\", rho=7810, E=E, G_s=81.2e9)\n elm = ST_DiskElement.from_geometry(\n n=1,\n material=st_steel,\n width=[0.07, 0.08],\n i_d=[0.05, 0.06],\n o_d=[0.30, 0.40],\n is_random=[\"material\", \"width\", \"i_d\", \"o_d\"],\n )\n return elm\n\n\[email protected]\ndef rand_bearing_constant_coefs():\n kxx = [1e6, 2e6]\n cxx = [1e3, 2e3]\n elm = ST_BearingElement(n=1, kxx=kxx, cxx=cxx, is_random=[\"kxx\", \"cxx\"])\n\n return elm\n\n\[email protected]\ndef rand_bearing_varying_coefs():\n kxx = [[1e6, 1.1e6], [2e6, 2.1e6]]\n kxy = [[1.5e6, 1.6e6], [2.5e6, 2.6e6]]\n kyx = [[1.5e6, 1.6e6], [2.5e6, 2.6e6]]\n kyy = [[3e6, 3.1e6], [4e6, 4.1e6]]\n cxx = [[1e3, 1.1e3], [2e3, 2.1e3]]\n cxy = [[1.5e3, 1.6e3], [2.5e3, 2.6e3]]\n cyx = [[1.5e3, 1.6e3], [2.5e3, 2.6e3]]\n cyy = [[3e3, 3.1e3], [4e3, 4.1e3]]\n frequency = np.array([500, 800])\n elm = ST_BearingElement(\n n=1,\n kxx=kxx,\n kxy=kxy,\n kyx=kyx,\n kyy=kyy,\n cxx=cxx,\n cxy=cxy,\n cyx=cyx,\n cyy=cyy,\n frequency=frequency,\n is_random=[\"kxx\", \"kxy\", \"kyx\", \"kyy\", \"cxx\", \"cxy\", \"cyx\", \"cyy\"],\n )\n\n return elm\n\n\[email protected]\ndef rand_point_mass():\n mx = [2.0, 2.5]\n my = [3.0, 3.5]\n elm = ST_PointMass(n=1, mx=mx, my=my, is_random=[\"mx\", \"my\"])\n return elm\n\n\n###############################################################################\n# testing attributes and parameters\n###############################################################################\ndef test_st_shaft_element(rand_shaft):\n elm = list(iter(rand_shaft))\n assert [sh.L for sh in elm] == [1.0, 1.1]\n assert [sh.idl for sh in elm] == [0.01, 0.02]\n assert [sh.odl for sh in elm] == [0.1, 0.2]\n assert [sh.idr for sh in elm] == [0.01, 0.02]\n assert [sh.odr for sh in elm] == [0.1, 0.2]\n assert [sh.material.E for sh in elm] == [209000000000.0, 211000000000.0]\n\n\ndef test_st_disk_element_from_inertia(rand_disk_from_inertia):\n elm = list(iter(rand_disk_from_inertia))\n assert [dk.n for dk in elm] == [1, 1]\n assert [dk.m for dk in elm] == [30, 40]\n assert [dk.Id for dk in elm] == [0.2, 0.3]\n assert [dk.Ip for dk in elm] == [0.5, 0.7]\n\n\ndef test_st_disk_element_from_geometry(rand_disk_from_geometry):\n elm = list(iter(rand_disk_from_geometry))\n assert [dk.n for dk in elm] == [1, 1]\n assert [dk.m for dk in elm] == [37.570502893821185, 76.74810321754951]\n assert [dk.Id for dk in elm] == [0.2325457585365474, 0.8256816771154702]\n assert [dk.Ip for dk in elm] == [0.43440893970980754, 1.5694987107988876]\n\n\ndef test_st_bearing_element_constant_coef(rand_bearing_constant_coefs):\n elm = list(iter(rand_bearing_constant_coefs))\n assert [brg.n for brg in elm] == [1, 1]\n assert [brg.kxx.coefficient for brg in elm] == [[1000000.0], [2000000.0]]\n assert [brg.kyy.coefficient for brg in elm] == [[1000000.0], [2000000.0]]\n assert [brg.kxy.coefficient for brg in elm] == [[0], [0]]\n assert [brg.kyx.coefficient for brg in elm] == [[0], [0]]\n assert [brg.cxx.coefficient for brg in elm] == [[1000.0], [2000.0]]\n assert [brg.cyy.coefficient for brg in elm] == [[1000.0], [2000.0]]\n assert [brg.cxy.coefficient for brg in elm] == [[0], [0]]\n assert [brg.cyx.coefficient for brg in elm] == [[0], [0]]\n\n\ndef test_st_bearing_element_varying_coef(rand_bearing_varying_coefs):\n elm = list(iter(rand_bearing_varying_coefs))\n assert [brg.n for brg in elm] == [1, 1]\n assert [list(brg.kxx) for brg in elm] == [\n [1000000.0, 2000000.0],\n [1100000.0, 2100000.0],\n ]\n assert [list(brg.kyy) for brg in elm] == [\n [3000000.0, 4000000.0],\n [3100000.0, 4100000.0],\n ]\n assert [list(brg.kxy) for brg in elm] == [\n [1500000.0, 2500000.0],\n [1600000.0, 2600000.0],\n ]\n assert [list(brg.kyx) for brg in elm] == [\n [1500000.0, 2500000.0],\n [1600000.0, 2600000.0],\n ]\n assert [list(brg.cxx) for brg in elm] == [[1000.0, 2000.0], [1100.0, 2100.0]]\n assert [list(brg.cyy) for brg in elm] == [[3000.0, 4000.0], [3100.0, 4100.0]]\n assert [list(brg.cxy) for brg in elm] == [[1500.0, 2500.0], [1600.0, 2600.0]]\n assert [list(brg.cyx) for brg in elm] == [[1500.0, 2500.0], [1600.0, 2600.0]]\n\n\ndef test_st_point_mass(rand_point_mass):\n elm = list(iter(rand_point_mass))\n assert [pm.n for pm in elm] == [1, 1]\n assert [pm.mx for pm in elm] == [2.0, 2.5]\n assert [pm.my for pm in elm] == [3.0, 3.5]\n\n\n###############################################################################\n# testing error messages\n###############################################################################\ndef test_st_bearing_error_messages(rand_bearing_constant_coefs):\n kxx = [1e6, 2e6]\n cxx = [1e3, 2e3]\n freq = [500, 1000]\n with pytest.raises(ValueError) as ex:\n ST_BearingElement(\n n=1, kxx=kxx, cxx=cxx, frequency=freq, is_random=[\"kxx\", \"cxx\", \"frequency\"]\n )\n assert \"frequency can not be a random variable\" in str(ex.value)\n\n with pytest.raises(ValueError) as ex:\n rand_bearing_constant_coefs.plot_random_var([\"kxy\"])\n assert (\n \"Not random variable in var_list. Select variables from ['kxx', 'cxx', 'kyy', 'cyy']\"\n in str(ex.value)\n )\n\n with pytest.raises(KeyError) as ex:\n rand_bearing_constant_coefs[\"odd\"] = [1, 2]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n with pytest.raises(KeyError) as ex:\n rand_bearing_constant_coefs[\"odd\"]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n\ndef test_st_disk_error_messages(rand_disk_from_inertia):\n with pytest.raises(ValueError) as ex:\n rand_disk_from_inertia.plot_random_var([\"n\"])\n assert (\n \"Not random variable in var_list. Select variables from ['m', 'Id', 'Ip']\"\n in str(ex.value)\n )\n\n with pytest.raises(KeyError) as ex:\n rand_disk_from_inertia[\"odd\"] = [1, 2]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n with pytest.raises(KeyError) as ex:\n rand_disk_from_inertia[\"odd\"]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n\ndef test_st_shaft_error_messages(rand_shaft):\n with pytest.raises(ValueError) as ex:\n rand_shaft.plot_random_var([\"n\"])\n assert (\n \"Not random variable in var_list. Select variables from ['L', 'idl', 'odl', 'idr', 'odr']\"\n in str(ex.value)\n )\n\n with pytest.raises(KeyError) as ex:\n rand_shaft[\"odd\"] = [1, 2]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n with pytest.raises(KeyError) as ex:\n rand_shaft[\"odd\"]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n\ndef test_st_point_mass_error_messages(rand_point_mass):\n with pytest.raises(ValueError) as ex:\n rand_point_mass.plot_random_var([\"n\"])\n assert \"Not random variable in var_list. Select variables from ['mx', 'my']\" in str(\n ex.value\n )\n\n with pytest.raises(KeyError) as ex:\n rand_point_mass[\"odd\"] = [1, 2]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n with pytest.raises(KeyError) as ex:\n rand_point_mass[\"odd\"]\n assert \"Object does not have parameter: odd.\" in str(ex.value)\n\n\n###############################################################################\n# testing elements plot type\n###############################################################################\ndef test_element_stats_plot(\n rand_shaft, rand_disk_from_inertia, rand_bearing_constant_coefs, rand_point_mass\n):\n figure_type = type(go.Figure())\n fig = rand_shaft.plot_random_var([\"L\"])\n assert type(fig) == figure_type\n\n fig = rand_disk_from_inertia.plot_random_var([\"Id\"])\n assert type(fig) == figure_type\n\n fig = rand_bearing_constant_coefs.plot_random_var([\"kxx\"])\n assert type(fig) == figure_type\n\n fig = rand_point_mass.plot_random_var([\"mx\"])\n assert type(fig) == figure_type\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gargrohin/Visual-Recognition
[ "2db81526532a23c4cfe5f1824d09e19e2fa25911" ]
[ "Object Detection/train.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport copy\nimport time\nimport os\n\n\n# Data augmentation and normalization for training\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.Scale((224,224)),\n #transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Scale((224,224)),\n #transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\ndata_dir = 'cv3'\n\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\nclass_names = image_datasets['train'].classes\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\n# Get a batch of training data\ninputs, classes = next(iter(dataloaders['train']))\n\n# Make a grid from batch\nout = torchvision.utils.make_grid(inputs)\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n scheduler.step()\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\n\nmodel_ft = models.resnet18(pretrained=True)\n\n\nnum_ftrs = model_ft.fc.in_features\nmodel_ft.fc = nn.Linear(num_ftrs, 4) # output classes\n\nmodel_ft = model_ft.to(device)\n\ncriterion = nn.CrossEntropyLoss() # loss function\n\n# Observe that all parameters are being optimized\noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n\n# Decay LR by a factor of 0.1 every 7 epochs\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=15)\n\ntorch.save(model_ft , 'model1.pth')\n" ]
[ [ "torch.optim.lr_scheduler.StepLR", "torch.nn.CrossEntropyLoss", "torch.max", "torch.utils.data.DataLoader", "torch.sum", "torch.nn.Linear", "torch.set_grad_enabled", "torch.cuda.is_available", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cthoyt/ged4py
[ "ab7940dd5bcd9eadf35e670f2c5313cf23b3d4c4" ]
[ "src/ged4py/abstract_graph_edit_dist.py" ]
[ "# -*- coding: UTF-8 -*-\n\nfrom __future__ import print_function\n\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\nfrom networkx import __version__ as nxv\nfrom scipy.optimize import linear_sum_assignment\n\n\ndef _get_nodes(graph):\n if float(nxv) < 2:\n return graph.nodes()\n\n return list(graph.nodes())\n\n\nclass AbstractGraphEditDistance(ABC):\n def __init__(self, g, h):\n self.g = g\n self.h = h\n\n self.cost_matrix = self._create_cost_matrix()\n self.edit_costs = self._calculate_edit_costs()\n self.distance = sum(self.edit_costs)\n\n def normalized_distance(self):\n \"\"\"Return the graph edit distance between graph g1 & g2.\n\n The distance is normalized on the size of the two graphs.\n This is done to avoid favorisation towards smaller graphs\n \"\"\"\n return self.distance * 2 / (len(self.g) + len(self.h))\n\n def _calculate_edit_costs(self):\n row_ind, col_ind = linear_sum_assignment(self.cost_matrix)\n return [\n self.cost_matrix[i][j]\n for i, j in zip(row_ind, col_ind)\n ]\n\n def _create_cost_matrix(self):\n \"\"\"\n Creates a |N+M| X |N+M| cost matrix between all nodes in\n graphs g1 and g2\n Each cost represents the cost of substituting,\n deleting or inserting a node\n The cost matrix consists of four regions:\n\n substitute \t| insert costs\n -------------------------------\n delete \t\t| delete -> delete\n\n The delete -> delete region is filled with zeros\n \"\"\"\n n = len(self.g)\n m = len(self.h)\n cost_matrix = np.zeros((n + m, n + m))\n\n nodes_1 = _get_nodes(self.g)\n nodes_2 = _get_nodes(self.h)\n\n for i in range(n):\n for j in range(m):\n cost_matrix[i, j] = self.substitute_cost(nodes_1[i], nodes_2[j])\n\n for i in range(m):\n for j in range(m):\n cost_matrix[i + n, j] = self.insert_cost(i, j)\n\n for i in range(n):\n for j in range(n):\n cost_matrix[j, i + m] = self.delete_cost(i, j)\n\n return cost_matrix\n\n @abstractmethod\n def insert_cost(self, i, j):\n raise NotImplementedError\n\n @abstractmethod\n def delete_cost(self, i, j):\n raise NotImplementedError\n\n @abstractmethod\n def substitute_cost(self, nodes1, nodes2):\n raise NotImplementedError\n\n def __str__(self):\n return str(self._create_cost_matrix())\n\n def print_matrix(self):\n print(self)\n\n @classmethod\n def compare(cls, g, h):\n ged = cls(g, h)\n return ged.normalized_distance()\n" ]
[ [ "scipy.optimize.linear_sum_assignment", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
ai-pest/shapemask
[ "9a8b08f9a57f2bd1790761497d37ef9144463993" ]
[ "modeling/architecture/nasfpn.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n# Copyright 2022 Northern System Service Co., Ltd. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"NAS-FPN.\n\nGolnaz Ghiasi, Tsung-Yi Lin, Ruoming Pang, Quoc V. Le.\nNAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection.\nhttps://arxiv.org/abs/1904.07392. CVPR 2019.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom absl import logging\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nfrom modeling.architecture import nn_blocks\nfrom modeling.architecture import nn_ops\nfrom modeling.architecture import resnet\nfrom ops import spatial_transform_ops\n\n\n# The fixed NAS-FPN architecture discovered by NAS.\n# Each element represents a specification of a building block:\n# (block_level, combine_fn, (input_offset0, input_offset1), is_output).\nNASFPN_BLOCK_SPECS = [\n (4, 'attention', (1, 3), False),\n (4, 'sum', (1, 5), False),\n (3, 'sum', (0, 6), True),\n (4, 'sum', (6, 7), True),\n (5, 'attention', (7, 8), True),\n (7, 'attention', (6, 9), True),\n (6, 'attention', (9, 10), True),\n]\n\n\nclass BlockSpec(object):\n \"\"\"A container class that specifies the block configuration for NAS-FPN.\"\"\"\n\n def __init__(self, level, combine_fn, input_offsets, is_output):\n self.level = level\n self.combine_fn = combine_fn\n self.input_offsets = input_offsets\n self.is_output = is_output\n\n\ndef build_block_specs(block_specs=None):\n \"\"\"Builds the list of BlockSpec objects for NAS-FPN.\"\"\"\n if not block_specs:\n block_specs = NASFPN_BLOCK_SPECS\n logging.info('Building NAS-FPN block specs: %s', block_specs)\n return [BlockSpec(*b) for b in block_specs]\n\n\ndef block_group(inputs,\n filters,\n strides,\n block_fn,\n block_repeats,\n conv2d_op=None,\n activation=tf.nn.swish,\n batch_norm_activation=nn_ops.BatchNormActivation(),\n dropblock=nn_ops.Dropblock(),\n drop_connect_rate=None,\n data_format='channels_last',\n name=None,\n is_training=False):\n \"\"\"Creates one group of blocks for NAS-FPN.\"\"\"\n if block_fn == 'conv':\n inputs = conv2d_op(\n inputs,\n filters=filters,\n kernel_size=(3, 3),\n padding='same',\n data_format=data_format,\n name='conv')\n inputs = batch_norm_activation(\n inputs, is_training=is_training, relu=False, name='bn')\n inputs = dropblock(inputs, is_training=is_training)\n return inputs\n\n if block_fn != 'bottleneck':\n raise ValueError('Block function {} not implemented.'.format(block_fn))\n _, _, _, num_filters = inputs.get_shape().as_list()\n block_fn = nn_blocks.bottleneck_block\n use_projection = not (num_filters == (filters * 4) and strides == 1)\n\n return resnet.block_group(\n inputs=inputs,\n filters=filters,\n strides=strides,\n use_projection=use_projection,\n block_fn=block_fn,\n block_repeats=block_repeats,\n activation=activation,\n batch_norm_activation=batch_norm_activation,\n dropblock=dropblock,\n drop_connect_rate=drop_connect_rate,\n data_format=data_format,\n name=name,\n is_training=is_training)\n\n\ndef resample_feature_map(feat,\n level,\n target_level,\n is_training,\n target_feat_dims=256,\n conv2d_op=tf.layers.conv2d,\n batch_norm_activation=nn_ops.BatchNormActivation(),\n data_format='channels_last',\n name=None):\n \"\"\"Resample input feature map to have target number of channels and width.\"\"\"\n feat_dims = feat.get_shape().as_list()[3]\n with tf.variable_scope('resample_{}'.format(name)):\n if feat_dims != target_feat_dims:\n feat = conv2d_op(\n feat,\n filters=target_feat_dims,\n kernel_size=(1, 1),\n padding='same',\n data_format=data_format)\n feat = batch_norm_activation(\n feat,\n is_training=is_training,\n relu=False,\n name='bn')\n if level < target_level:\n stride = int(2**(target_level-level))\n feat = tf.layers.max_pooling2d(\n inputs=feat,\n pool_size=stride,\n strides=[stride, stride],\n padding='SAME')\n elif level > target_level:\n scale = int(2**(level - target_level))\n feat = spatial_transform_ops.nearest_upsampling(feat, scale=scale)\n return feat\n\n\ndef global_attention(feat0, feat1):\n with tf.variable_scope('global_attention'):\n m = tf.reduce_max(feat0, axis=[1, 2], keepdims=True)\n m = tf.sigmoid(m)\n return feat0 + feat1 * m\n\n\nclass Nasfpn(object):\n \"\"\"Feature pyramid networks.\"\"\"\n\n def __init__(self,\n min_level=3,\n max_level=7,\n block_specs=build_block_specs(),\n fpn_feat_dims=256,\n num_repeats=7,\n use_separable_conv=False,\n dropblock=nn_ops.Dropblock(),\n block_fn='conv',\n block_repeats=1,\n activation='relu',\n batch_norm_activation=nn_ops.BatchNormActivation(\n activation='relu'),\n init_drop_connect_rate=None,\n data_format='channels_last'):\n \"\"\"NAS-FPN initialization function.\n\n Args:\n min_level: `int` minimum level in NAS-FPN output feature maps.\n max_level: `int` maximum level in NAS-FPN output feature maps.\n block_specs: a list of BlockSpec objects that specifies the SpineNet\n network topology. By default, the previously discovered architecture is\n used.\n fpn_feat_dims: `int` number of filters in FPN layers.\n num_repeats: number of repeats for feature pyramid network.\n use_separable_conv: `bool`, if True use separable convolution for\n convolution in NAS-FPN layers.\n dropblock: a Dropblock layer.\n block_fn: `string` representing types of block group support: conv,\n bottleneck.\n block_repeats: `int` representing the number of repeats per block group\n when block group is bottleneck.\n activation: activation function. Support 'relu' and 'swish'.\n batch_norm_activation: an operation that includes a batch normalization\n layer followed by an optional activation layer.\n init_drop_connect_rate: a 'float' number that specifies the initial drop\n connection rate. Note that the default `None` means no drop connection\n is applied.\n data_format: An optional string from: \"channels_last\", \"channels_first\".\n Defaults to \"channels_last\".\n \"\"\"\n self._min_level = min_level\n self._max_level = max_level\n self._block_specs = block_specs\n self._fpn_feat_dims = fpn_feat_dims\n self._num_repeats = num_repeats\n self._block_fn = block_fn\n self._block_repeats = block_repeats\n if use_separable_conv:\n self._conv2d_op = functools.partial(\n tf.layers.separable_conv2d, depth_multiplier=1)\n else:\n self._conv2d_op = tf.layers.conv2d\n self._dropblock = dropblock\n if activation == 'relu':\n self._activation = tf.nn.relu\n elif activation == 'swish':\n self._activation = tf.nn.swish\n else:\n raise ValueError('Activation {} not implemented.'.format(activation))\n self._batch_norm_activation = batch_norm_activation\n self._init_drop_connect_rate = init_drop_connect_rate\n self._data_format = data_format\n self._resample_feature_map = functools.partial(\n resample_feature_map,\n target_feat_dims=fpn_feat_dims,\n conv2d_op=self._conv2d_op,\n batch_norm_activation=batch_norm_activation,\n data_format=self._data_format)\n\n def __call__(self, multilevel_features, is_training=False):\n \"\"\"Returns the FPN features for a given multilevel features.\n\n Args:\n multilevel_features: a `dict` containing `int` keys for continuous feature\n levels, e.g., [2, 3, 4, 5]. The values are corresponding features with\n shape [batch_size, height_l, width_l, num_filters].\n is_training: `bool` if True, the model is in training mode.\n\n Returns:\n a `dict` containing `int` keys for continuous feature levels\n [min_level, min_level + 1, ..., max_level]. The values are corresponding\n FPN features with shape [batch_size, height_l, width_l, fpn_feat_dims].\n \"\"\"\n feats = []\n for level in range(self._min_level, self._max_level + 1):\n if level in list(multilevel_features.keys()):\n # TODO(tsungyi): The original impl. does't downsample the backbone feat.\n feats.append(self._resample_feature_map(\n multilevel_features[level], level, level, is_training,\n name='l%d' % level))\n else:\n # Adds a coarser level by downsampling the last feature map.\n feats.append(self._resample_feature_map(\n feats[-1], level - 1, level, is_training,\n name='p%d' % level))\n\n with tf.variable_scope('fpn_cells'):\n for i in range(self._num_repeats):\n with tf.variable_scope('cell_{}'.format(i)):\n logging.info('building cell %s', i)\n feats_dict = self._build_feature_pyramid(feats, is_training)\n feats = [feats_dict[level] for level in range(\n self._min_level, self._max_level + 1)]\n return feats_dict\n\n def _build_feature_pyramid(self, feats, is_training):\n \"\"\"Function to build a feature pyramid network.\"\"\"\n # Number of output connections from each feat.\n num_output_connections = [0] * len(feats)\n num_output_levels = self._max_level - self._min_level + 1\n feat_levels = list(range(self._min_level, self._max_level + 1))\n\n for i, sub_policy in enumerate(self._block_specs):\n with tf.variable_scope('sub_policy{}'.format(i)):\n logging.info('sub_policy %d : %s', i, sub_policy)\n new_level = sub_policy.level\n\n # Checks the range of input_offsets.\n for input_offset in sub_policy.input_offsets:\n if input_offset >= len(feats):\n raise ValueError(\n 'input_offset ({}) is larger than num feats({})'.format(\n input_offset, len(feats)))\n input0 = sub_policy.input_offsets[0]\n input1 = sub_policy.input_offsets[1]\n\n # Update graph with inputs.\n node0 = feats[input0]\n node0_level = feat_levels[input0]\n num_output_connections[input0] += 1\n node0 = self._resample_feature_map(\n node0, node0_level, new_level, is_training,\n name='0_{}_{}'.format(input0, len(feats)))\n node1 = feats[input1]\n node1_level = feat_levels[input1]\n num_output_connections[input1] += 1\n node1 = self._resample_feature_map(\n node1, node1_level, new_level, is_training,\n name='1_{}_{}'.format(input1, len(feats)))\n\n # Combine node0 and node1 to create new feat.\n if sub_policy.combine_fn == 'sum':\n new_node = node0 + node1\n elif sub_policy.combine_fn == 'attention':\n if node0_level >= node1_level:\n new_node = global_attention(node0, node1)\n else:\n new_node = global_attention(node1, node0)\n else:\n raise ValueError('unknown combine_fn `{}`.'\n .format(sub_policy.combine_fn))\n\n # Add intermediate nodes that do not have any connections to output.\n if sub_policy.is_output:\n for j, (feat, feat_level, num_output) in enumerate(\n zip(feats, feat_levels, num_output_connections)):\n if num_output == 0 and feat_level == new_level:\n num_output_connections[j] += 1\n\n feat_ = self._resample_feature_map(\n feat, feat_level, new_level, is_training,\n name='fa_{}_{}'.format(i, j))\n new_node += feat_\n\n with tf.variable_scope('op_after_combine{}'.format(len(feats))):\n new_node = self._activation(new_node)\n new_node = block_group(\n inputs=new_node,\n filters=self._fpn_feat_dims,\n strides=1,\n block_fn=self._block_fn,\n block_repeats=self._block_repeats,\n conv2d_op=self._conv2d_op,\n activation=self._activation,\n batch_norm_activation=self._batch_norm_activation,\n dropblock=self._dropblock,\n drop_connect_rate=self._init_drop_connect_rate,\n data_format=self._data_format,\n name='block_{}'.format(i),\n is_training=is_training)\n feats.append(new_node)\n feat_levels.append(new_level)\n num_output_connections.append(0)\n\n output_feats = {}\n for i in range(len(feats) - num_output_levels, len(feats)):\n level = feat_levels[i]\n output_feats[level] = feats[i]\n logging.info('Output feature pyramid: %s', output_feats)\n return output_feats\n" ]
[ [ "tensorflow.compat.v1.layers.max_pooling2d", "tensorflow.compat.v1.reduce_max", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aggle/webbpsf_ext
[ "b4e50d02d6fe0e89421403f214a7cd3142f3437c" ]
[ "webbpsf_ext/psfs.py" ]
[ "# Import libraries\nimport numpy as np\nimport multiprocessing as mp\n\nfrom . import conf\nfrom .utils import poppy, S\nfrom .maths import jl_poly\nfrom .image_manip import krebin, fshift\nfrom .bandpasses import nircam_grism_res, niriss_grism_res\n\nimport logging\n_log = logging.getLogger('webbpsf_ext')\n\nfrom scipy.interpolate import griddata, RegularGridInterpolator\n\n__epsilon = np.finfo(float).eps\n\ndef nproc_use(fov_pix, oversample, nwavelengths, coron=False):\n \"\"\"Estimate Number of Processors\n\n Attempt to estimate a reasonable number of processors to use\n for a multi-wavelength calculation. One really does not want\n to end up swapping to disk with huge arrays.\n\n NOTE: Requires ``psutil`` package. Otherwise defaults to ``mp.cpu_count() / 2``\n\n Parameters\n -----------\n fov_pix : int\n Square size in detector-sampled pixels of final PSF image.\n oversample : int\n The optical system that we will be calculating for.\n nwavelengths : int\n Number of wavelengths.\n coron : bool\n Is the nproc recommendation for coronagraphic imaging?\n If so, the total RAM usage is different than for direct imaging.\n \"\"\"\n\n try:\n import psutil\n except ImportError:\n nproc = int(mp.cpu_count() // 2)\n if nproc < 1: nproc = 1\n\n _log.info(\"No psutil package available, cannot estimate optimal nprocesses.\")\n _log.info(\"Returning nproc=ncpu/2={}.\".format(nproc))\n return nproc\n\n mem = psutil.virtual_memory()\n avail_GB = mem.available / 1024**3\n # Leave 10% for other things\n avail_GB *= 0.9\n\n fov_pix_over = fov_pix * oversample\n\n # For multiprocessing, memory accumulates into the main process\n # so we have to subtract the total from the available amount\n reserve_GB = nwavelengths * fov_pix_over**2 * 8 / 1024**3\n # If not enough available memory, then just return nproc=1\n if avail_GB < reserve_GB:\n _log.warn('Not enough available memory ({} GB) to \\\n to hold resulting PSF info ({} GB)!'.\\\n format(avail_GB,reserve_GB))\n return 1\n\n avail_GB -= reserve_GB\n\n # Memory formulas are based on fits to memory usage stats for:\n # fov_arr = np.array([16,32,128,160,256,320,512,640,1024,2048])\n # os_arr = np.array([1,2,4,8])\n if coron: # Coronagraphic Imaging (in MB)\n mem_total = (oversample*1024*2.4)**2 * 16 / (1024**2) + 500\n if fov_pix > 1024: mem_total *= 1.6\n else: # Direct Imaging (also spectral imaging)\n mem_total = 5*(fov_pix_over)**2 * 8 / (1024**2) + 300.\n\n # Convert to GB\n mem_total /= 1024\n\n # How many processors to split into?\n nproc = int(avail_GB / mem_total)\n nproc = np.min([nproc, mp.cpu_count(), poppy.conf.n_processes])\n\n # Each PSF calculation will constantly use multiple processors\n # when not oversampled, so let's divide by 2 for some time\n # and memory savings on those large calculations\n if oversample==1:\n nproc = np.ceil(nproc / 2)\n\n _log.debug('avail mem {}; mem tot: {}; nproc_init: {:.0f}'.\\\n format(avail_GB, mem_total, nproc))\n\n nproc = np.min([nproc, nwavelengths])\n # Resource optimization:\n # Split iterations evenly over processors to free up minimally used processors.\n # For example, if there are 5 processes only doing 1 iteration, but a single\n #\tprocessor doing 2 iterations, those 5 processors (and their memory) will not\n # \tget freed until the final processor is finished. So, to minimize the number\n #\tof idle resources, take the total iterations and divide by two (round up),\n #\tand that should be the final number of processors to use.\n np_max = np.ceil(nwavelengths / nproc)\n nproc = int(np.ceil(nwavelengths / np_max))\n\n if nproc < 1: nproc = 1\n\n # Multiprocessing can only swap up to 2GB of data from the child\n # process to the master process. Return nproc=1 if too much data.\n im_size = (fov_pix_over)**2 * 8 / (1024**3)\n nproc = 1 if (im_size * np_max) >=2 else nproc\n\n _log.debug('avail mem {}; mem tot: {}; nproc_fin: {:.0f}'.\\\n format(avail_GB, mem_total, nproc))\n\n return int(nproc)\n\ndef gen_image_from_coeff(inst, coeff, coeff_hdr, sp_norm=None, nwaves=None, \n use_sp_waveset=False, return_oversample=False):\n \n \"\"\"Generate PSF\n\n Create an image (direct, coronagraphic, grism, or DHS) based on a set of\n instrument parameters and PSF coefficients. The image is noiseless and\n doesn't take into account any non-linearity or saturation effects, but is\n convolved with the instrument throughput. Pixel values are in counts/sec.\n The result is effectively an idealized slope image.\n\n If no spectral dispersers, then this returns a single image or list of \n images if sp_norm is a list of spectra.\n\n Parameters\n ----------\n coeff : ndarray\n A cube of polynomial coefficients for generating PSFs. This is\n generally oversampled with a shape (fov_pix*oversamp, fov_pix*oversamp, deg).\n coeff_hdr : FITS header\n Header information saved while generating coefficients.\n sp_norm : :mod:`pysynphot.spectrum`\n A normalized Pysynphot spectrum to generate image. If not specified,\n the default is flat in phot lam (equal number of photons per spectral bin).\n The default is normalized to produce 1 count/sec within that bandpass,\n assuming the telescope collecting area. Coronagraphic PSFs will further\n decrease this flux.\n nwaves : int\n Option to specify the number of evenly spaced wavelength bins to\n generate and sum over to make final PSF. Useful for wide band filters\n with large PSFs over continuum source.\n use_sp_waveset : bool\n Set this option to use `sp_norm` waveset instead of bandpass waveset.\n Useful if user inputs a high-resolution spectrum with line emissions,\n so may wants to keep a grism PSF (for instance) at native resolution\n rather than blurred with the bandpass waveset. TODO: Test. \n return_oversample: bool\n If True, then instead returns the oversampled version of the PSF.\n\n Keyword Args\n ------------\n grism_order : int\n Grism spectral order (default=1).\n ND_acq : bool\n ND acquisition square in coronagraphic mask.\n \"\"\"\n\n # Sort out any spectroscopic modes\n if (inst.name=='NIRCam') or (inst.name=='NIRISS'):\n is_grism = inst.is_grism\n else:\n is_grism = False\n is_dhs = False\n\n if (inst.name=='MIRI') or (inst.name=='NIRSpec'):\n is_slitspec = inst.is_slitspec\n else:\n is_slitspec = False\n\n # Get Bandpass\n bp = inst.bandpass\n\n # Get wavelength range\n npix = coeff.shape[-1]\n # waveset = create_waveset(bp, npix, nwaves=nwaves, is_grism=is_grism)\n\n # List of sp observation converted to count rate\n obs_list = create_obslist(bp, npix, nwaves=nwaves, is_grism=is_grism,\n sp_norm=sp_norm, use_sp_waveset=use_sp_waveset)\n nspec = len(obs_list)\n\n # Get wavelength range\n waveset = obs_list[0].binwave\n wgood = waveset / 1e4\n w1 = wgood.min()\n w2 = wgood.max()\n wrange = w2 - w1\n\n # Create a PSF for each wgood wavelength\n use_legendre = True if coeff_hdr['LEGNDR'] else False\n lxmap = [coeff_hdr['WAVE1'], coeff_hdr['WAVE2']]\n psf_fit = jl_poly(wgood, coeff, use_legendre=use_legendre, lxmap=lxmap)\n\n # Multiply each monochromatic PSFs by the binned e/sec at each wavelength\n # Array broadcasting: [nx,ny,nwave] x [1,1,nwave]\n # Do this for each spectrum/observation\n if nspec==1:\n psf_fit *= obs_list[0].binflux.reshape([-1,1,1])\n psf_list = [psf_fit]\n else:\n psf_list = [psf_fit*obs.binflux.reshape([-1,1,1]) for obs in obs_list]\n del psf_fit\n\n # The number of pixels to span spatially\n fov_pix = int(coeff_hdr['FOVPIX'])\n oversample = int(coeff_hdr['OSAMP'])\n fov_pix_over = int(fov_pix * oversample)\n\n # Grism spectroscopy\n if is_grism:\n pupil = inst.pupil_mask\n if 'GRISM0' in pupil:\n pupil = 'GRISMR'\n elif 'GRISM90' in pupil:\n pupil = 'GRISMC'\n\n # spectral resolution in um/pixel\n # res is in pixels per um and dw is inverse\n grism_order = inst._grism_order\n if inst.name=='NIRCam':\n res, dw = nircam_grism_res(pupil, inst.module, grism_order)\n elif inst.name=='NIRISS':\n res, dw = niriss_grism_res(grism_order)\n\n # Number of real pixels that spectra will span\n npix_spec = int(wrange // dw + 1 + fov_pix)\n npix_spec_over = int(npix_spec * oversample)\n\n spec_list = []\n spec_list_over = []\n for psf_fit in psf_list:\n # If GRISMC (along columns) rotate image by 90 deg CW \n if 'GRISMC' in pupil:\n psf_fit = np.rot90(psf_fit, k=1) \n elif (inst.name=='NIRCam') and (inst.module=='B'): \n # Flip right to left to disperse in correct orientation\n psf_fit = psf_fit[:,:,::-1]\n\n # Create oversampled spectral image\n spec_over = np.zeros([fov_pix_over, npix_spec_over])\n # Place each PSF at its dispersed location\n for i, w in enumerate(wgood):\n # Separate shift into an integer and fractional shift\n delx = oversample * (w-w1) / dw # Number of oversampled pixels to shift\n intx = int(delx)\n fracx = delx - intx\n if fracx < 0:\n fracx = fracx + 1\n intx = intx - 1\n\n # TODO: Benchmark and compare these two different methods\n # spec_over[:,intx:intx+fov_pix_over] += fshift(psf_fit[i], delx=fracx, interp='cubic')\n im = psf_fit[i]\n spec_over[:,intx:intx+fov_pix_over] += im*(1.-fracx) + np.roll(im,1,axis=1)*fracx\n\n spec_over[spec_over<__epsilon] = 0 #__epsilon\n\n # Rotate spectrum to its V2/V3 coordinates\n spec_bin = krebin(spec_over, (fov_pix,npix_spec))\n if 'GRISMC' in pupil: # Rotate image 90 deg CCW\n spec_over = np.rot90(spec_over, k=-1)\n spec_bin = np.rot90(spec_bin, k=-1)\n elif (inst.name=='NIRCam') and (inst.module=='B'): \n # Flip right to left for sci coords\n spec_over = spec_over[:,::-1]\n spec_bin = spec_bin[:,::-1]\n\n # Rebin ovesampled spectral image to real pixels\n spec_list.append(spec_bin)\n spec_list_over.append(spec_over)\n\n # Wavelength solutions\n dw_over = dw/oversample\n w1_spec = w1 - dw_over*fov_pix_over/2\n wspec_over = np.arange(npix_spec_over)*dw_over + w1_spec\n wspec = wspec_over.reshape((npix_spec,-1)).mean(axis=1)\n if (inst.name=='NIRCam') and ('GRISMR' in pupil) and (inst.module=='B'): \n # Flip wavelength for sci coords\n wspec = wspec[::-1]\n\n if nspec == 1: \n spec_list = spec_list[0]\n spec_list_over = spec_list_over[0]\n\n # _log.debug('jl_poly: {:.2f} sec; binflux: {:.2f} sec; disperse: {:.2f} sec'.format(t5-t4, t6-t5, t7-t6))\n # Return list of wavelengths for each horizontal pixel as well as spectral image\n if return_oversample:\n return (wspec_over, spec_list_over)\n else:\n return (wspec, spec_list)\n\n # DHS spectroscopy\n elif is_dhs:\n raise NotImplementedError('DHS has yet to be fully included')\n\n # Imaging\n else:\n # Create source image slopes (no noise)\n data_list = []\n data_list_over = []\n eps = np.finfo(float).eps\n for psf_fit in psf_list:\n data_over = psf_fit.sum(axis=0)\n data_over[data_over<=eps] = data_over[data_over>eps].min() / 10\n data_list_over.append(data_over)\n data_list.append(krebin(data_over, (fov_pix,fov_pix)))\n\n if nspec == 1: \n data_list = data_list[0]\n data_list_over = data_list_over[0]\n\n #_log.debug('jl_poly: {:.2f} sec; binflux: {:.2f} sec; PSF sum: {:.2f} sec'.format(t5-t4, t6-t5, t7-t6))\n if return_oversample:\n return data_list_over\n else:\n return data_list\n\n\ndef create_waveset(bp, npix, nwaves=None, is_grism=False):\n\n waveset = np.copy(bp.wave)\n if nwaves is not None:\n # Evenly spaced wavelengths\n waveset = np.linspace(waveset.min(), waveset.max(), nwaves)\n elif is_grism:\n waveset = waveset\n else:\n # For generating the PSF, let's save some time and memory by not using\n # ever single wavelength in the bandpass.\n # Do NOT do this for dispersed modes.\n binsize = 1\n if npix>2000:\n binsize = 7\n elif npix>1000:\n binsize = 5\n elif npix>700:\n binsize = 3\n\n if binsize>1:\n excess = waveset.size % binsize\n waveset = waveset[:waveset.size-excess]\n waveset = waveset.reshape(-1,binsize) # Reshape\n waveset = waveset[:,binsize//2] # Use the middle values\n waveset = np.concatenate(([bp.wave[0]],waveset,[bp.wave[-1]]))\n \n return waveset\n\ndef create_obslist(bp, npix, nwaves=None, is_grism=False,\n sp_norm=None, use_sp_waveset=False):\n\n waveset = create_waveset(bp, npix, nwaves=nwaves, is_grism=is_grism)\n wgood = waveset / 1e4\n w1 = wgood.min()\n w2 = wgood.max()\n\n # Flat spectrum with equal photon flux in each spectal bin\n if sp_norm is None:\n sp_flat = S.ArraySpectrum(waveset, 0*waveset + 10.)\n sp_flat.name = 'Flat spectrum in flam'\n\n # Bandpass unit response is the flux (in flam) of a star that\n # produces a response of one count per second in that bandpass\n sp_norm = sp_flat.renorm(bp.unit_response(), 'flam', bp)\n\n # Make sp_norm a list of spectral objects if it already isn't\n if not isinstance(sp_norm, list): \n sp_norm = [sp_norm]\n nspec = len(sp_norm)\n\n # Set up an observation of the spectrum using the specified bandpass\n if use_sp_waveset:\n if nspec>1:\n raise AttributeError(\"Only 1 spectrum allowed when use_sp_waveset=True.\")\n # Modify waveset if use_sp_waveset=True\n obs_list = []\n for sp in sp_norm:\n # Select only wavelengths within bandpass\n waveset = sp.wave\n waveset = waveset[(waveset>=w1*1e4) and (waveset<=w2*1e4)]\n obs_list.append(S.Observation(sp, bp, binset=waveset))\n else:\n # Use the bandpass wavelength set to bin the fluxes\n obs_list = [S.Observation(sp, bp, binset=waveset) for sp in sp_norm]\n\n # Convert to count rate\n for obs in obs_list: \n obs.convert('counts')\n\n return obs_list\n\n\ndef make_coeff_resid_grid(xin, yin, cf_resid, xgrid, ygrid):\n\n # Create 2D grid arrays of coordinates\n xnew, ynew = np.meshgrid(xgrid,ygrid)\n nx, ny = len(xgrid), len(ygrid)\n\n _log.warn(\"Interpolating coefficient residuals onto regular grid...\")\n\n sh = cf_resid.shape\n cf_resid_grid = np.zeros([ny,nx,sh[1],sh[2],sh[3]])\n\n # Cycle through each coefficient to interpolate onto V2/V3 grid\n for i in range(sh[1]):\n cf_resid_grid[:,:,i,:,:] = griddata((xin, yin), cf_resid[:,i,:,:], (xnew, ynew), method='cubic')\n\n return cf_resid_grid\n\n\ndef field_coeff_func(v2grid, v3grid, cf_fields, v2_new, v3_new, method='linear'):\n \"\"\"Interpolation function for PSF coefficient residuals\n\n Uses `RegularGridInterpolator` to quickly determine new coefficient\n residulas at specified points.\n\n Parameters\n ----------\n v2grid : ndarray\n V2 values corresponding to `cf_fields`.\n v3grid : ndarray\n V3 values corresponding to `cf_fields`.\n cf_fields : ndarray\n Coefficient residuals at different field points\n Shape is (nV3, nV2, ncoeff, ypix, xpix)\n v2_new : ndarray\n New V2 point(s) to interpolate on. Same units as v2grid.\n v3_new : ndarray\n New V3 point(s) to interpolate on. Same units as v3grid.\n \"\"\"\n\n func = RegularGridInterpolator((v3grid, v2grid), cf_fields, method=method, \n bounds_error=False, fill_value=None)\n\n pts = np.array([v3_new,v2_new]).transpose()\n \n if np.size(v2_new)>1:\n res = np.asarray([func(pt).squeeze() for pt in pts])\n else:\n res = func(pts)\n\n # If only 1 point, remove first axes\n res = res.squeeze() if res.shape[0]==1 else res\n return res\n\n" ]
[ [ "numpy.rot90", "numpy.min", "numpy.arange", "scipy.interpolate.RegularGridInterpolator", "numpy.finfo", "numpy.concatenate", "numpy.ceil", "numpy.copy", "numpy.size", "scipy.interpolate.griddata", "numpy.array", "numpy.meshgrid", "numpy.zeros", "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
linjieyangsc/video_seg
[ "b956142691660f02bd72fad936879fc156ee5b47" ]
[ "mobilenet_v1.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"MobileNet v1.\n\nMobileNet is a general architecture and can be used for multiple use cases.\nDepending on the use case, it can use different input layer size and different\nhead (for example: embeddings, localization and classification).\n\nAs described in https://arxiv.org/abs/1704.04861.\n\n MobileNets: Efficient Convolutional Neural Networks for\n Mobile Vision Applications\n Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,\n Tobias Weyand, Marco Andreetto, Hartwig Adam\n\n100% Mobilenet V1 (base) with input size 224x224:\n\nLayer params macs\n--------------------------------------------------------------------------------\nMobilenetV1/Conv2d_0/Conv2D: 864 10,838,016\nMobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672\nMobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112\nMobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336\nMobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112\nMobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672\nMobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224\nMobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168\nMobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112\nMobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336\nMobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224\nMobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584\nMobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112\nMobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792\nMobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112\nMobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584\nMobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224\n--------------------------------------------------------------------------------\nTotal: 3,185,088 567,716,352\n\n\n75% Mobilenet V1 (base) with input size 128x128:\n\nLayer params macs\n--------------------------------------------------------------------------------\nMobilenetV1/Conv2d_0/Conv2D: 648 2,654,208\nMobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736\nMobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592\nMobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368\nMobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592\nMobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736\nMobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184\nMobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184\nMobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592\nMobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368\nMobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184\nMobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592\nMobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592\nMobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296\nMobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592\nMobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592\nMobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184\n--------------------------------------------------------------------------------\nTotal: 1,800,144 106,002,432\n\n\"\"\"\n\n# Tensorflow mandates these.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\n\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture\n# Conv defines 3x3 convolution layers\n# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.\n# stride is the stride of the convolution\n# depth is the number of channels or filters in a layer\nConv = namedtuple('Conv', ['kernel', 'stride', 'depth'])\nDepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])\n\n# _CONV_DEFS specifies the MobileNet body\n_CONV_DEFS = [\n Conv(kernel=[3, 3], stride=2, depth=32),\n DepthSepConv(kernel=[3, 3], stride=1, depth=64),\n DepthSepConv(kernel=[3, 3], stride=2, depth=128),\n DepthSepConv(kernel=[3, 3], stride=1, depth=128),\n DepthSepConv(kernel=[3, 3], stride=2, depth=256),\n DepthSepConv(kernel=[3, 3], stride=1, depth=256),\n DepthSepConv(kernel=[3, 3], stride=2, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=2, depth=1024),\n DepthSepConv(kernel=[3, 3], stride=1, depth=1024)\n]\n\n\ndef mobilenet_v1_base(inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=8,\n depth_multiplier=1.0,\n vis_mod_params=None,\n sp_mod_params=None,\n mod_layer_ids=[],\n conv_defs=None,\n output_stride=None,\n scope=None):\n \"\"\"Mobilenet v1.\n\n Constructs a Mobilenet v1 network from inputs to the given final endpoint.\n\n Args:\n inputs: a tensor of shape [batch_size, height, width, channels].\n final_endpoint: specifies the endpoint to construct the network up to. It\n can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,\n 'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',\n 'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',\n 'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n conv_defs: A list of ConvDef namedtuples specifying the net architecture.\n output_stride: An integer that specifies the requested ratio of input to\n output spatial resolution. If not None, then we invoke atrous convolution\n if necessary to prevent the network from reducing the spatial resolution\n of the activation maps. Allowed values are 8 (accurate fully convolutional\n mode), 16 (fast fully convolutional mode), 32 (classification mode).\n scope: Optional variable_scope.\n\n Returns:\n tensor_out: output tensor corresponding to the final_endpoint.\n end_points: a set of activations for external use, for example summaries or\n losses.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values,\n or depth_multiplier <= 0, or the target output_stride is not\n allowed.\n \"\"\"\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\n end_points = {}\n if vis_mod_params is not None:\n vis_mod_idx = 0\n sp_mod_idx = 0\n # Used to find thinned depths for each layer.\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n\n if conv_defs is None:\n conv_defs = _CONV_DEFS\n\n if output_stride is not None and output_stride not in [8, 16, 32]:\n raise ValueError('Only allowed output_stride values are 8, 16, 32.')\n\n with tf.variable_scope(scope, 'MobilenetV1', [inputs]):\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME'):\n # The current_stride variable keeps track of the output stride of the\n # activations, i.e., the running product of convolution strides up to the\n # current network layer. This allows us to invoke atrous convolution\n # whenever applying the next convolution would result in the activations\n # having output stride larger than the target output_stride.\n current_stride = 1\n\n # The atrous convolution rate parameter.\n rate = 1\n\n net = inputs\n for i, conv_def in enumerate(conv_defs):\n end_point_base = 'Conv2d_%d' % i\n\n if output_stride is not None and current_stride == output_stride:\n # If we have reached the target output_stride, then we need to employ\n # atrous convolution with stride=1 and multiply the atrous rate by the\n # current unit's stride for use in subsequent layers.\n layer_stride = 1\n layer_rate = rate\n rate *= conv_def.stride\n else:\n layer_stride = conv_def.stride\n layer_rate = 1\n current_stride *= conv_def.stride\n\n if isinstance(conv_def, Conv):\n end_point = end_point_base\n net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,\n stride=conv_def.stride,\n normalizer_fn=slim.batch_norm,\n scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n\n elif isinstance(conv_def, DepthSepConv):\n end_point = end_point_base + '_depthwise'\n\n # By passing filters=None\n # separable_conv2d produces only a depthwise convolution layer\n net = slim.separable_conv2d(net, None, conv_def.kernel,\n depth_multiplier=1,\n stride=layer_stride,\n rate=layer_rate,\n normalizer_fn=slim.batch_norm,\n scope=end_point)\n\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n\n end_point = end_point_base + '_pointwise'\n\n net = slim.conv2d(net, depth(conv_def.depth), [1, 1],\n stride=1,\n normalizer_fn=slim.batch_norm,\n scope=end_point)\n\n if i in mod_layer_ids and vis_mod_params is not None:\n ch = depth(conv_def.depth)\n vis_mod_params_cur = vis_mod_params[vis_mod_idx]\n sp_mod_params_cur = sp_mod_params[sp_mod_idx]\n vis_mod_idx += 1\n sp_mod_idx += 1\n net = net * vis_mod_params_cur + sp_mod_params_cur\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n else:\n raise ValueError('Unknown convolution type %s for layer %d'\n % (conv_def.ltype, i))\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\n\n\ndef mobilenet_v1(inputs,\n num_classes=1000,\n dropout_keep_prob=0.999,\n is_training=True,\n min_depth=8,\n depth_multiplier=1.0,\n conv_defs=None,\n prediction_fn=tf.contrib.layers.softmax,\n spatial_squeeze=True,\n reuse=None,\n scope='MobilenetV1'):\n \"\"\"Mobilenet v1 model for classification.\n\n Args:\n inputs: a tensor of shape [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n dropout_keep_prob: the percentage of activation values that are retained.\n is_training: whether is training or not.\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n conv_defs: A list of ConvDef namedtuples specifying the net architecture.\n prediction_fn: a function to get predictions out of logits.\n spatial_squeeze: if True, logits is of shape is [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, num_classes]\n end_points: a dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: Input rank is invalid.\n \"\"\"\n input_shape = inputs.get_shape().as_list()\n if len(input_shape) != 4:\n raise ValueError('Invalid input tensor rank, expected 4, was: %d' %\n len(input_shape))\n\n with tf.variable_scope(scope, 'MobilenetV1', [inputs, num_classes],\n reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n net, end_points = mobilenet_v1_base(inputs, scope=scope,\n min_depth=min_depth,\n depth_multiplier=depth_multiplier,\n conv_defs=conv_defs)\n with tf.variable_scope('Logits'):\n kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])\n net = slim.avg_pool2d(net, kernel_size, padding='VALID',\n scope='AvgPool_1a')\n end_points['AvgPool_1a'] = net\n # 1 x 1 x 1024\n #net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')\n logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n weights_initializer=tf.zeros_initializer(),\n biases_initializer = tf.ones_initializer(),\n normalizer_fn=None, scope='Conv2d_1c_1x1')\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n return logits, end_points\n\nmobilenet_v1.default_image_size = 224\n\n\ndef _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n \"\"\"Define kernel size which is automatically reduced for small input.\n\n If the shape of the input images is unknown at graph construction time this\n function assumes that the input images are large enough.\n\n Args:\n input_tensor: input tensor of size [batch_size, height, width, channels].\n kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]\n\n Returns:\n a tensor with the kernel size.\n \"\"\"\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size_out = kernel_size\n else:\n kernel_size_out = [min(shape[1], kernel_size[0]),\n min(shape[2], kernel_size[1])]\n return kernel_size_out\n\n\ndef mobilenet_v1_arg_scope(is_training=True,\n weight_decay=0.00004,\n stddev=0.09,\n regularize_depthwise=False):\n \"\"\"Defines the default MobilenetV1 arg scope.\n\n Args:\n is_training: Whether or not we're training the model.\n weight_decay: The weight decay to use for regularizing the model.\n stddev: The standard deviation of the trunctated normal weight initializer.\n regularize_depthwise: Whether or not apply regularization on depthwise.\n\n Returns:\n An `arg_scope` to use for the mobilenet v1 model.\n \"\"\"\n batch_norm_params = {\n 'is_training': is_training,\n 'center': True,\n 'scale': True,\n 'decay': 0.997,\n 'epsilon': 0.001,\n 'updates_collections': None\n }\n\n # Set weight_decay for weights in Conv and DepthSepConv layers.\n weights_init = tf.truncated_normal_initializer(stddev=stddev)\n regularizer = tf.contrib.layers.l2_regularizer(weight_decay)\n if regularize_depthwise:\n depthwise_regularizer = regularizer\n else:\n depthwise_regularizer = None\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n weights_initializer=weights_init,\n activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):\n with slim.arg_scope([slim.separable_conv2d],\n weights_regularizer=depthwise_regularizer) as sc:\n return sc\n" ]
[ [ "tensorflow.zeros_initializer", "tensorflow.squeeze", "tensorflow.truncated_normal_initializer", "tensorflow.ones_initializer", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
steinate/a-simple-implement-of-FasterRCNN
[ "2279f40b4b1180317db35412c8dd9e8eb216f1e7" ]
[ "Faster RCNN/vis_tools.py" ]
[ "import time\r\n\r\nimport numpy as np\r\nimport matplotlib\r\nimport torch as t\r\nimport visdom\r\n\r\nmatplotlib.use('Agg')\r\nfrom matplotlib import pyplot as plot\r\n\r\nVOC_BBOX_LABEL_NAMES = (\r\n 'fly',\r\n 'bike',\r\n 'bird',\r\n 'boat',\r\n 'pin',\r\n 'bus',\r\n 'c',\r\n 'cat',\r\n 'chair',\r\n 'cow',\r\n 'table',\r\n 'dog',\r\n 'horse',\r\n 'moto',\r\n 'p',\r\n 'plant',\r\n 'shep',\r\n 'sofa',\r\n 'train',\r\n 'tv',\r\n)\r\n\r\ndef vis_image(img, ax=None):\r\n if ax is None:\r\n fig = plot.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n img = img.transpose(1, 2, 0)\r\n ax.imshow(img.astype(np.uint8))\r\n return ax\r\n\r\ndef vis_bbox(img, bbox, label=None, score=None, ax=None):\r\n label_names = list(VOC_BBOX_LABEL_NAMES) + ['bg']\r\n ax = vis_image(img, ax=ax)\r\n\r\n # 没有框\r\n if len(bbox) == 0:\r\n return ax\r\n\r\n # 绘制矩形框\r\n for i, b in enumerate(bbox):\r\n x_y = (b[1], b[0])\r\n height = b[2] - b[0]\r\n width = b[3] - b[1]\r\n ax.add_patch(plot.Rectangle(\r\n x_y, width, height, fill=False, edgecolor='red', linewidth=2\r\n ))\r\n\r\n # 标scores\r\n caption = list()\r\n if label is not None and label_names is not None:\r\n lb = label[i]\r\n if not (-1 <= lb < len(label_names)):\r\n raise ValueError('没有对应的物体类别')\r\n caption.append(label_names[lb])\r\n if score is not None:\r\n sc = score[i]\r\n caption.append('{:.2f}'.format(sc))\r\n if len(caption) > 0:\r\n ax.text(b[1], b[0], ': '.join(caption), style='italic',\r\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 0})\r\n return ax\r\n\r\ndef fig2data(fig):\r\n fig.canvas.draw()\r\n w, h = fig.canvas.get_width_height()\r\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.int8)\r\n buf.shape = (w, h, 4)\r\n\r\n buf = np.roll(buf, 3, axis=2)\r\n return buf.reshape(h, w, 4)\r\n\r\n\r\ndef fig4vis(fig):\r\n ax = fig.get_figure()\r\n img_data = fig2data(ax).astype(np.int32)\r\n plot.close()\r\n return img_data[:, :, :3].transpose((2, 0, 1)) / 255\r\n\r\ndef visdom_bbox(*arg, **kwargs):\r\n fig = vis_bbox(*arg, **kwargs)\r\n data = fig4vis(fig)\r\n return data\r\n\r\nclass Visualizer(object):\r\n\r\n def __init__(self, env='default', **kwargs):\r\n self.vis = visdom.Visdom('localhost', env=env, use_incoming_socket=False, **kwargs)\r\n self._vis_kw = kwargs\r\n\r\n self.index = {}\r\n self.log_text = ''\r\n\r\n def reinit(self, env='default', **kwargs):\r\n self.vis = visdom.Visdom(env=env, **kwargs)\r\n\r\n def plot_many(self, d):\r\n for k, v in d.items():\r\n if v is not None:\r\n self.plot(k, v)\r\n\r\n def img_many(self, d):\r\n for k, v in d.items():\r\n self.plot(k, v)\r\n\r\n def plot(self, name, y, **kwargs):\r\n x = self.index.get(name, 0)\r\n self.vis.line(Y=np.array([y]), X=np.array([x]),\r\n win=name, opts=dict(title=name),\r\n update=None if x == 0 else 'append',\r\n **kwargs\r\n )\r\n self.index[name] = x + 1\r\n\r\n def img(self, name, img_, **kwargs):\r\n self.vis.images(\r\n t.Tensor(img_).cpu().numpy(),\r\n win=name,\r\n opts=dict(title=name),\r\n **kwargs\r\n )\r\n\r\n def log(self, info, win='log_text'):\r\n self.log_text += ('[{time}]{info} <br>'.format(\r\n time=time.strftime('%m%d_%H%M%S'),\r\n info=info\r\n ))\r\n self.vis.text(self.log_text, win)\r\n\r\n def __getattr__(self, name):\r\n return getattr(self.vis, name)\r\n\r\n def state_dict(self):\r\n return {\r\n 'index': self.index,\r\n 'vis_kw': self._vis_kw,\r\n 'log_text': self.log_text,\r\n 'env': self.vis.env\r\n }\r\n\r\n def load_state_dict(self, d):\r\n self.vis = visdom.Visdom(env=d.get('env', self.vis.env), **(self.d.get('vis_kw')))\r\n self.log_text = d.get('log_text', '')\r\n self.index = d.get('index', dict())\r\n return self" ]
[ [ "matplotlib.pyplot.Rectangle", "torch.Tensor", "matplotlib.use", "matplotlib.pyplot.close", "numpy.array", "numpy.roll", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gtg4059/PPO
[ "599780026b12247383e5edfb889eebbe73a4f647" ]
[ "scripts/test.py" ]
[ "#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import PoseStamped #4 Angle Data to 3\nfrom sensor_msgs.msg import LaserScan #20 LAser Data\nimport time\nfrom PPO import PPO, Memory\nfrom PIL import Image\nimport torch\n\ndef call_Dist(laser): \n print(laser)\n #print(data.pose.orientation.x)\n #print(c.avg_resultx, c.avg_resulty)\n\n# def call_Pose(data): \n #print(\"{}, {}, {}, {}\".format(data.pose.orientation.x))\n \n# def callback(data): \n# print(\"{}, {}, {}, {}\".format(data.pose.orientation.x))\n\ndef listener(): \n rospy.init_node('listener', anonymous=True)\n data = PoseStamped()\n laser = LaserScan()\n laser.ranges\n data.pose.orientation\n rospy.Subscriber(\"/scan\", laser, call_Dist) \n rospy.Subscriber(\"/mavros/local_position/pose\", PoseStamped, call_Pose) \n rospy.spin()\n\ndef test():\n ############## Hyperparameters ##############\n # env_name = \"LunarLander-v3\"\n # # creating environment\n # env = gym.make(env_name)\n # state_dim = env.observation_space.shape[0]\n # action_dim = 4\n # creating environment\n state_dim = 29\n action_dim = 2\n render = False\n max_timesteps = 300\n n_latent_var = 64 # number of variables in hidden layer\n lr = 0.0007\n betas = (0.9, 0.999)\n gamma = 0.99 # discount factor\n K_epochs = 4 # update policy for K epochs\n eps_clip = 0.2 # clip parameter for PPO\n #############################################\n\n n_episodes = 1\n max_timesteps = 300\n render = True\n save_gif = False\n\n filename = \"PPO_{}.pth\".format(env_name)\n directory = \"./preTrained/\"\n \n memory = Memory()\n ppo = PPO(state_dim, action_dim, n_latent_var, lr, betas, gamma, K_epochs, eps_clip)\n \n ppo.policy_old.load_state_dict(torch.load(directory+filename))\n \n for ep in range(1, n_episodes+1):\n ep_reward = 0\n state = env.reset()\n for t in range(max_timesteps):\n action = ppo.policy_old.act(state, memory)\n state, reward, done, _ = env.step(action)\n ep_reward += reward\n if render:\n env.render()\n if save_gif:\n img = env.render(mode = 'rgb_array')\n img = Image.fromarray(img)\n img.save('./gif/{}.jpg'.format(t)) \n if done:\n break\n \n print('Episode: {}\\tReward: {}'.format(ep, int(ep_reward)))\n ep_reward = 0\n env.close()\n \nif __name__ == '__main__':\n listener()\n #test()\n \n \n" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yegmor/Final_Project
[ "70241b18f2cfe374e75d78e21be78170a0649956" ]
[ "target_models.py" ]
[ "# Modified from https://github.com/mathcbc/advGAN_pytorch/blob/master/models.py\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MNIST_target_net(nn.Module):\n def __init__(self):\n super(MNIST_target_net, self).__init__()\n\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3)\n self.conv2 = nn.Conv2d(32, 32, kernel_size=3)\n self.conv3 = nn.Conv2d(32, 64, kernel_size=3)\n self.conv4 = nn.Conv2d(64, 64, kernel_size=3)\n\n self.fc1 = nn.Linear(64*4*4, 200)\n self.fc2 = nn.Linear(200, 200)\n self.logits = nn.Linear(200, 10)\n\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2)\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = F.max_pool2d(x, 2)\n\n x = x.view(-1, 64*4*4)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, 0.5)\n x = F.relu(self.fc2(x))\n x = self.logits(x)\n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.functional.max_pool2d", "torch.nn.functional.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
btashton/puppyface
[ "180ed35eebb5f532e3e2c0f642994d78fcd1db4b" ]
[ "eigensave.py" ]
[ "'''\nThis code was inspired by the code at:\nhttps://github.com/edent/Tate-Hack/blob/master/eigensave.py\n'''\n\n\nimport argparse\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport json\n\n\ndef read_images(path, sz=None):\n img_meta = {}\n X,y = [], []\n count = 0\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n try:\n print(subject_path,filename)\n im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)\n # resize to given size (if given)\n if (sz is not None):\n im = cv2.resize(im, sz)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(count)\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n continue\n img_meta[str(count)] = {'subject':subdirname} \n count = count+1\n return (img_meta, [X,y])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Train faces')\n parser.add_argument('img_dir',\n help='Directory with subject training images')\n parser.add_argument('--model_out', default='eigenModel.xml')\n parser.add_argument('--meta_out', default='imgmeta.json')\n\n args = parser.parse_args()\n (img_meta, [X,y]) = read_images(args.img_dir, (256,256))\n \n \n # Convert labels to 32bit integers. This is a workaround for 64bit machines,\n y = np.asarray(y, dtype=np.int32)\n\n # Create the Eigenfaces model.\n model = cv2.createEigenFaceRecognizer(num_components=40)\n # Learn the model. Remember our function returns Python lists,\n # so we use np.asarray to turn them into NumPy lists to make\n # the OpenCV wrapper happy:\n model.train(np.asarray(X), np.asarray(y))\n\n # Save the model for later use\n model.save(args.model_out)\n\n with open(args.meta_out, 'w') as outfile:\n json.dump(img_meta, outfile)\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AxelTchaikovsky/MachineLearningFinalProj
[ "ca488683a6276c09a49fe5390ae2e91b1fdd86c0" ]
[ "model/VoxNet.py" ]
[ "\nimport torch\nfrom collections import OrderedDict\n\n\nclass MVVoxNet(torch.nn.Module):\n\n def __init__(self, num_classes, input_shape=(100, 100, 100)):\n #weights_path=None,\n #load_body_weights=True,\n #load_head_weights=True):\n \"\"\"\n VoxNet: A 3D Convolutional Neural Network for Real-Time Object Recognition.\n Modified in order to accept different input shapes.\n Parameters\n ----------\n num_classes: int, optional\n Default: 10\n input_shape: (x, y, z) tuple, optional\n Default: (32, 32, 32)\n weights_path: str or None, optional\n Default: None\n load_body_weights: bool, optional\n Default: True\n load_head_weights: bool, optional\n Default: True\n Notes\n -----\n Weights available at: url to be added\n If you want to finetune with custom classes, set load_head_weights to False.\n Default head weights are pretrained with ModelNet10.\n \"\"\"\n super(MVVoxNet, self).__init__()\n self.body = torch.nn.Sequential(OrderedDict([\n ('conv1', torch.nn.Conv3d(in_channels=1,\n out_channels=32, kernel_size=3, stride=2)),#kernel 3\n ('lkrelu1', torch.nn.LeakyReLU()),\n ('drop1', torch.nn.Dropout(p=0.1)),\n ('conv2', torch.nn.Conv3d(in_channels=32, out_channels=32, kernel_size=3)),\n ('lkrelu2', torch.nn.LeakyReLU()),\n ('pool2', torch.nn.MaxPool3d(2)),\n ('drop2', torch.nn.Dropout(p=0.2))#0.3\n ]))\n\n # Trick to accept different input shapes\n x = self.body(torch.autograd.Variable(\n torch.rand((1, 1) + input_shape)))\n first_fc_in_features = 1\n for n in x.size()[1:]:\n first_fc_in_features *= n\n\n self.head = torch.nn.Sequential(OrderedDict([\n ('fc1', torch.nn.Linear(first_fc_in_features, 128)),\n ('relu1', torch.nn.ReLU()),\n ('drop3', torch.nn.Dropout(p=0.4)),\n ('fc2', torch.nn.Linear(128, num_classes))\n ]))\n\n #if weights_path is not None:\n # weights = torch.load(weights_path)\n # if load_body_weights:\n # self.body.load_state_dict(weights[\"body\"])\n # elif load_head_weights:\n # self.head.load_state_dict(weights[\"head\"])\n\n # def forward(self, x):\n # # shape x: BxVx1xDxDxD\n # view_pool = []\n # for v_idx in range(x.size(1)):\n # v = x[:, v_idx]\n # v = self.body(v)\n # v = v.view(v.size(0), -1)\n # view_pool.append(v)\n \n # pooled_view = view_pool[0]\n # for i in range(1, len(view_pool)):\n # pooled_view = torch.max(pooled_view, view_pool[i])\n \n # pooled_view = self.head(pooled_view)\n # return pooled_view \n\n def forward(self, x):\n x = self.body(x)\n x = x.view(x.size(0), -1)\n x = self.head(x)\n return x" ]
[ [ "torch.nn.Dropout", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.rand", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jbjaveed/transformers
[ "01c1e4b8fe8a275540831acdad6747c51e9a24de" ]
[ "src/transformers/optimization_tf.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\n\n\nimport re\n\nimport tensorflow as tf\n\n\nclass WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Applies a warmup schedule on a given learning rate decay schedule.\"\"\"\n\n def __init__(\n self, initial_learning_rate, decay_schedule_fn, warmup_steps, power=1.0, name=None,\n ):\n super().__init__()\n self.initial_learning_rate = initial_learning_rate\n self.warmup_steps = warmup_steps\n self.power = power\n self.decay_schedule_fn = decay_schedule_fn\n self.name = name\n\n def __call__(self, step):\n with tf.name_scope(self.name or \"WarmUp\") as name:\n # Implements polynomial warmup. i.e., if global_step < warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n global_step_float = tf.cast(step, tf.float32)\n warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)\n warmup_percent_done = global_step_float / warmup_steps_float\n warmup_learning_rate = self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power)\n return tf.cond(\n global_step_float < warmup_steps_float,\n lambda: warmup_learning_rate,\n lambda: self.decay_schedule_fn(step),\n name=name,\n )\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_schedule_fn\": self.decay_schedule_fn,\n \"warmup_steps\": self.warmup_steps,\n \"power\": self.power,\n \"name\": self.name,\n }\n\n\ndef create_optimizer(init_lr, num_train_steps, num_warmup_steps, end_lr=0.0, optimizer_type=\"adamw\"):\n \"\"\"Creates an optimizer with learning rate schedule.\"\"\"\n # Implements linear decay of the learning rate.\n lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=init_lr, decay_steps=num_train_steps, end_learning_rate=end_lr,\n )\n if num_warmup_steps:\n lr_schedule = WarmUp(\n initial_learning_rate=init_lr, decay_schedule_fn=lr_schedule, warmup_steps=num_warmup_steps,\n )\n\n optimizer = AdamWeightDecay(\n learning_rate=lr_schedule,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"layer_norm\", \"bias\"],\n )\n\n return optimizer\n\n\nclass AdamWeightDecay(tf.keras.optimizers.Adam):\n \"\"\"Adam enables L2 weight decay and clip_by_global_norm on gradients.\n Just adding the square of the weights to the loss function is *not* the\n correct way of using L2 regularization/weight decay with Adam, since that will\n interact with the m and v parameters in strange ways.\n Instead we want ot decay the weights in a manner that doesn't interact with\n the m/v parameters. This is equivalent to adding the square of the weights to\n the loss with plain (non-momentum) SGD.\n \"\"\"\n\n def __init__(\n self,\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-7,\n amsgrad=False,\n weight_decay_rate=0.0,\n include_in_weight_decay=None,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecay\",\n **kwargs\n ):\n super().__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)\n self.weight_decay_rate = weight_decay_rate\n self._include_in_weight_decay = include_in_weight_decay\n self._exclude_from_weight_decay = exclude_from_weight_decay\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates an optimizer from its config with WarmUp custom object.\"\"\"\n custom_objects = {\"WarmUp\": WarmUp}\n return super(AdamWeightDecay, cls).from_config(config, custom_objects=custom_objects)\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state)\n apply_state[(var_device, var_dtype)][\"weight_decay_rate\"] = tf.constant(\n self.weight_decay_rate, name=\"adam_weight_decay_rate\"\n )\n\n def _decay_weights_op(self, var, learning_rate, apply_state):\n do_decay = self._do_use_weight_decay(var.name)\n if do_decay:\n return var.assign_sub(\n learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)][\"weight_decay_rate\"],\n use_locking=self._use_locking,\n )\n return tf.no_op()\n\n def apply_gradients(self, grads_and_vars, name=None):\n grads, tvars = list(zip(*grads_and_vars))\n return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars), name=name,)\n\n def _get_lr(self, var_device, var_dtype, apply_state):\n \"\"\"Retrieves the learning rate with the given state.\"\"\"\n if apply_state is None:\n return self._decayed_lr_t[var_dtype], {}\n\n apply_state = apply_state or {}\n coefficients = apply_state.get((var_device, var_dtype))\n if coefficients is None:\n coefficients = self._fallback_apply_state(var_device, var_dtype)\n apply_state[(var_device, var_dtype)] = coefficients\n\n return coefficients[\"lr_t\"], dict(apply_state=apply_state)\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super(AdamWeightDecay, self)._resource_apply_dense(grad, var, **kwargs)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super(AdamWeightDecay, self)._resource_apply_sparse(grad, var, indices, **kwargs)\n\n def get_config(self):\n config = super().get_config()\n config.update({\"weight_decay_rate\": self.weight_decay_rate})\n return config\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if self.weight_decay_rate == 0:\n return False\n\n if self._include_in_weight_decay:\n for r in self._include_in_weight_decay:\n if re.search(r, param_name) is not None:\n return True\n\n if self._exclude_from_weight_decay:\n for r in self._exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n\n# Extracted from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py\nclass GradientAccumulator(object):\n \"\"\"Gradient accumulation utility.\n When used with a distribution strategy, the accumulator should be called in a\n replica context. Gradients will be accumulated locally on each replica and\n without synchronization. Users should then call ``.gradients``, scale the\n gradients if required, and pass the result to ``apply_gradients``.\n \"\"\"\n\n # We use the ON_READ synchronization policy so that no synchronization is\n # performed on assignment. To get the value, we call .value() which returns the\n # value on the current replica without synchronization.\n\n def __init__(self):\n \"\"\"Initializes the accumulator.\"\"\"\n self._gradients = []\n self._accum_steps = None\n\n @property\n def step(self):\n \"\"\"Number of accumulated steps.\"\"\"\n if self._accum_steps is None:\n self._accum_steps = tf.Variable(\n tf.constant(0, dtype=tf.int64),\n trainable=False,\n synchronization=tf.VariableSynchronization.ON_READ,\n aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,\n )\n\n return self._accum_steps.value()\n\n @property\n def gradients(self):\n \"\"\"The accumulated gradients on the current replica.\"\"\"\n if not self._gradients:\n raise ValueError(\"The accumulator should be called first to initialize the gradients\")\n return list(gradient.value() if gradient is not None else gradient for gradient in self._gradients)\n\n def __call__(self, gradients):\n \"\"\"Accumulates :obj:`gradients` on the current replica.\"\"\"\n if not self._gradients:\n _ = self.step # Create the step variable.\n self._gradients.extend(\n [\n tf.Variable(\n tf.zeros_like(gradient),\n trainable=False,\n synchronization=tf.VariableSynchronization.ON_READ,\n aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,\n )\n if gradient is not None\n else gradient\n for gradient in gradients\n ]\n )\n if len(gradients) != len(self._gradients):\n raise ValueError(\"Expected %s gradients, but got %d\" % (len(self._gradients), len(gradients)))\n\n for accum_gradient, gradient in zip(self._gradients, gradients):\n if accum_gradient is not None and gradient is not None:\n accum_gradient.assign_add(gradient)\n\n self._accum_steps.assign_add(1)\n\n def reset(self):\n \"\"\"Resets the accumulated gradients on the current replica.\"\"\"\n if not self._gradients:\n return\n self._accum_steps.assign(0)\n for gradient in self._gradients:\n if gradient is not None:\n gradient.assign(tf.zeros_like(gradient))\n" ]
[ [ "tensorflow.keras.optimizers.schedules.PolynomialDecay", "tensorflow.constant", "tensorflow.control_dependencies", "tensorflow.cast", "tensorflow.zeros_like", "tensorflow.no_op", "tensorflow.name_scope", "tensorflow.math.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SamerSaber/CarND-Capstone-Solution
[ "dbff0c722cd9c9b85f14ef5887fc6d594f7b5861" ]
[ "ros/src/waypoint_updater/waypoint_updater.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom scipy.spatial import KDTree\nfrom std_msgs.msg import Int32\nfrom styx_msgs.msg import Lane, Waypoint\n\nimport math\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number\nMAX_DECEL = .5\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n \n # TODO: Add other member variables you need below\n self.base_lane = None\n self.stopline_wp_idx = -1\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoints_tree = None\n\n\n #Topic: current_pose\n #Message: PoseStamped\n #Callback: pose_cb\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n\n #Topic: base_waypoints\n #Message: Lane\n #Callback: waypoints_cb\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n self.loop()\n\n #[BlockingCall] until a shutdown request is received by the node\n #rospy.spin()\n\n \n def loop(self):\n #it could be 30Hz\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints:\n #if self.pose and self.base_lane:\n #Get closest waypoint\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n #Publish closest way point index\n self.publish_waypoints(closest_waypoint_idx)\n \n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n #Get Coordinates of our car\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n \n #Query on waypoints tree giving back closest point index in our KB Tree\n closest_idx = self.waypoints_tree.query([x, y], 1)[1]\n \n # Check if closest is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n \n #Equation for hyperplane through closest_coords\n #2D coordinate of the WayPoint\n cl_vect = np.array(closest_coord)\n #2D coordinate of the car's previous waypoint\n prev_vect = np.array(prev_coord)\n #2D coordinate of the car's current waypoint\n pos_vect = np.array([x, y])\n \n #Dot Prodcut to see positive(way point is behind the car) or negative(way point is in front of the car)\n val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)\n \n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n \n def publish_waypoints(self, closest_idx):\n #Create a new lane message\n\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n \n \n def generate_lane(self):\n lane = Lane()\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n #base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]\n base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]\n\n #We didn't find any traffic lights\n if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):\n #publish waypoints directly\n lane.waypoints = base_waypoints\n else:\n lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)\n\n return lane\n\n def decelerate_waypoints(self, waypoints, closest_idx):\n #Creating new waypoints messages \n temp = []\n for i, wp in enumerate(waypoints):\n #Create new waypoint message\n p = Waypoint()\n #Set the pose to the base waypoint pose\n p.pose = wp.pose\n #Two waypoints back from line so front of the car stops at line\n stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0)\n #calculate the distance to stop at (zero if it's after the stop index)\n dist = self.distance(waypoints, i, stop_idx)\n\n #Truncate at 0 if the velociy gots too small\n vel = math.sqrt(2 * MAX_DECEL * dist)\n if vel < 1:\n vel = 0\n \n #Truncate at current velocity if the velociy gots larger (speed limit)\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n\n #Add to the newly created list\n temp.append(p)\n\n return temp\n \n\n def pose_cb(self, msg):\n # TODO: Implement\n self.pose = msg\n pass\n\n def waypoints_cb(self, waypoints):\n # TODO: Implement\n self.waypoints = waypoints\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoints_tree = KDTree(self.waypoints_2d)\n \n pass\n\n def traffic_cb(self, msg):\n # TODO: Callback for /traffic_waypoint message. Implement\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n #gets the linear velocity (x-direction) for a single waypoint.\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n #sets the linear velocity (x-direction) for a single waypoint in a list of waypoints\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n #Computes the distance between two waypoints in a list along the piecewise linear arc connecting all waypoints between the two\n #helpful in determining the velocities for a sequence of waypoints leading up to a red light \n #the velocities should gradually decrease to zero starting some distance from the light\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n" ]
[ [ "numpy.dot", "numpy.array", "scipy.spatial.KDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
PeaBrane/lava-dl
[ "b205b4e0466788c5232ff20497ac0fc433cbccca" ]
[ "tests/lava/lib/dl/slayer/neuron/test_rf_iz.py" ]
[ "# Copyright (C) 2021 Intel Corporation\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport sys\nimport os\nimport unittest\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nfrom lava.lib.dl.slayer.neuron import rf_iz\n\nverbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False\n\nseed = np.random.randint(1000)\n# seed = 133\nnp.random.seed(seed)\nif verbose:\n print(f'{seed=}')\n\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n if verbose:\n print(\n 'CUDA is not available in the system. '\n 'Testing for CPU version only.'\n )\n device = torch.device('cpu')\n\n# neuron parameters\nthreshold = 1\ndecay = np.random.random() * 0.1\nperiod = np.random.randint(4, 50)\n\n# create input\ntime = torch.FloatTensor(np.arange(200)).to(device)\n# expand to (batch, neuron, time) tensor\nspike_input = torch.autograd.Variable(\n torch.zeros([5, 4, len(time)]), requires_grad=True\n).to(device)\nspike_input.data[..., np.random.randint(spike_input.shape[-1], size=5)] = 1\n\nreal_weight = torch.FloatTensor(\n 5 * np.random.random(size=spike_input.shape[-1]) - 0.5\n).reshape(\n [1, 1, spike_input.shape[-1]]\n).to(device)\n\nimag_weight = torch.FloatTensor(\n 5 * np.random.random(size=spike_input.shape[-1]) - 0.5\n).reshape(\n [1, 1, spike_input.shape[-1]]\n).to(device)\n\n# initialize neuron\nneuron = rf_iz.Neuron(\n threshold, period, decay, persistent_state=True\n).to(device)\nquantized_real_weight = neuron.quantize_8bit(real_weight)\nquantized_imag_weight = neuron.quantize_8bit(imag_weight)\nneuron.debug = True\n\nreal, imag = neuron.dynamics((\n quantized_real_weight * spike_input,\n quantized_imag_weight * spike_input\n))\nspike = neuron.spike(real, imag)\n\n\nclass TestRF(unittest.TestCase):\n def test_input_range(self):\n # not doing it for output spikes because RF neurons\n # tend to spike sparsely.\n if verbose:\n print(spike.sum(), spike.flatten())\n\n self.assertTrue(\n spike_input.sum().item() > 0,\n 'There was zero input spike. Check the test setting.'\n )\n\n def test_properties(self):\n _ = neuron.lam\n _ = neuron.decay\n _ = neuron.period\n _ = neuron.frequency\n _ = neuron.cx_sin_decay\n _ = neuron.cx_cos_decay\n _ = neuron.scale\n _ = neuron.shape\n _ = neuron.device\n\n # just looking for errors\n self.assertTrue(True, 'Encountered errors.')\n\n def test_batch_consistency(self):\n spike_var = torch.norm(torch.var(spike, dim=0)).item()\n real_var = torch.norm(torch.var(real, dim=0)).item()\n imag_var = torch.norm(torch.var(imag, dim=0)).item()\n self.assertTrue(\n spike_var < 1e-5,\n f'Spike variation across batch dimension is inconsistent. '\n f'Variance was {spike_var}. Expected 0.'\n )\n self.assertTrue(\n real_var < 1e-5,\n f'Real state variation across batch dimension is inconsistent. '\n f'Variance was {real_var}. Expected 0.'\n )\n self.assertTrue(\n imag_var < 1e-5,\n f'Voltage variation across batch dimension is inconsistent. '\n f'Variance was {imag_var}. Expected 0.'\n )\n\n def test_integer_states(self):\n # there should be no quantization error when\n # states are scaled with s_scale\n real_error = torch.norm(\n torch.floor(real * neuron.s_scale) - real * neuron.s_scale\n )\n imag_error = torch.norm(\n torch.floor(imag * neuron.s_scale) - imag * neuron.s_scale\n )\n\n self.assertTrue(\n real_error < 1e-5,\n f'Real calculation has issues with scaling. '\n f'De-Scaling must result in integer states. '\n f'Error was {real_error}'\n )\n self.assertTrue(\n imag_error < 1e-5,\n f'Imag calculation has issues with scaling. '\n f'De-Scaling must result in integer states. '\n f'Error was {imag_error}'\n )\n\n def test_persistent_state(self):\n # clear previous persistent state\n neuron.real_state *= 0\n neuron.imag_state *= 0\n\n # break the calculation into two parts: before ind and after ind\n ind = int(np.random.random() * spike_input.shape[-1])\n # ind = 57\n real0, imag0 = neuron.dynamics((\n quantized_real_weight[..., :ind] * spike_input[..., :ind],\n quantized_imag_weight[..., :ind] * spike_input[..., :ind]\n ))\n spike0 = neuron.spike(real0, imag0)\n real1, imag1 = neuron.dynamics((\n quantized_real_weight[..., ind:] * spike_input[..., ind:],\n quantized_imag_weight[..., ind:] * spike_input[..., ind:]\n ))\n spike1 = neuron.spike(real1, imag1)\n\n spike_error = (\n torch.norm(spike[..., :ind] - spike0)\n + torch.norm(spike[..., ind:] - spike1)\n ).item()\n real_error = (\n torch.norm(real[..., :ind] - real0)\n + torch.norm(real[..., ind:] - real1)\n ).item()\n imag_error = (\n torch.norm(imag[..., :ind] - imag0)\n + torch.norm(imag[..., ind:] - imag1)\n ).item()\n\n if verbose:\n print(ind)\n if spike_error >= 1e-5:\n print('Persistent spike states')\n print(\n spike[0, 0, ind - 10:ind + 10].cpu().data.numpy().tolist()\n )\n print(spike0[0, 0, -10:].cpu().data.numpy().tolist())\n print(spike1[0, 0, :10].cpu().data.numpy().tolist())\n if real_error >= 1e-5:\n print('Persistent real states')\n print((\n neuron.s_scale * real[0, 0, ind - 10:ind + 10]\n ).cpu().data.numpy().astype(int).tolist())\n print((\n neuron.s_scale * real0[0, 0, -10:]\n ).cpu().data.numpy().astype(int).tolist())\n print((\n neuron.s_scale * real1[0, 0, :10]\n ).cpu().data.numpy().astype(int).tolist())\n if imag_error >= 1e-5:\n print('Persistent imag states')\n print((\n neuron.s_scale * imag[0, 0, ind - 10:ind + 10]\n ).cpu().data.numpy().astype(int).tolist())\n print((\n neuron.s_scale * imag0[0, 0, -10:]\n ).cpu().data.numpy().astype(int).tolist())\n print((\n neuron.s_scale * imag1[0, 0, :10]\n ).cpu().data.numpy().astype(int).tolist())\n\n if verbose:\n if bool(os.environ.get('DISPLAY', None)):\n plt.figure()\n plt.plot(\n time.cpu().data.numpy(),\n imag[0, 0].cpu().data.numpy(),\n label='imag'\n )\n plt.plot(\n time[:ind].cpu().data.numpy(),\n imag0[0, 0].cpu().data.numpy(),\n label=':ind'\n )\n plt.plot(\n time[ind:].cpu().data.numpy(),\n imag1[0, 0].cpu().data.numpy(),\n label='ind:'\n )\n plt.xlabel('time')\n plt.legend()\n\n plt.figure()\n plt.plot(\n time.cpu().data.numpy(),\n real[0, 0].cpu().data.numpy(),\n label='real'\n )\n plt.plot(\n time[:ind].cpu().data.numpy(),\n real0[0, 0].cpu().data.numpy(),\n label=':ind'\n )\n plt.plot(\n time[ind:].cpu().data.numpy(),\n real1[0, 0].cpu().data.numpy(),\n label='ind:'\n )\n\n plt.plot(\n time[spike[0, 0] > 0].cpu().data.numpy(),\n 0 * spike[0, 0][spike[0, 0] > 0].cpu().data.numpy(),\n '.', markersize=12, label='spike'\n )\n plt.plot(\n time[:ind][spike0[0, 0] > 0].cpu().data.numpy(),\n 0 * spike0[0, 0][spike0[0, 0] > 0].cpu().data.numpy(),\n '.', label=':ind'\n )\n plt.plot(\n time[ind:][spike1[0, 0] > 0].cpu().data.numpy(),\n 0 * spike1[0, 0][spike1[0, 0] > 0].cpu().data.numpy(),\n '.', label='ind:'\n )\n plt.xlabel('time')\n plt.legend()\n plt.show()\n\n self.assertTrue(\n spike_error < 1e-5,\n f'Persistent state has errors in spike calculation. '\n f'Error was {spike_error}.'\n f'{seed=}'\n )\n self.assertTrue(\n real_error < 1e-5,\n f'Persistent state has errors in real calculation. '\n f'Error was {real_error}.'\n f'{seed=}'\n )\n self.assertTrue(\n imag_error < 1e-5,\n f'Persistent state has errors in imag calculation. '\n f'Error was {imag_error}.'\n f'{seed=}'\n )\n\n def test_backward(self):\n spike_target = spike.clone().detach()\n real_target = real.clone().detach()\n imag_target = imag.clone().detach()\n\n spike_target[\n ..., np.random.randint(spike_input.shape[-1], size=5)\n ] = 1\n real_target[\n ..., np.random.randint(spike_input.shape[-1], size=5)\n ] -= 1\n imag_target[\n ..., np.random.randint(spike_input.shape[-1], size=5)\n ] -= -1\n\n loss = F.mse_loss(spike, spike_target) \\\n + F.mse_loss(real, real_target) \\\n + F.mse_loss(imag, imag_target)\n loss.backward()\n\n # just looking for errors\n self.assertTrue(True, 'Encountered errors.')\n\n def test_graded_spikes(self):\n # TODO: after further study of network behavior with graded spikes.\n pass\n" ]
[ [ "matplotlib.pyplot.legend", "torch.norm", "numpy.random.random", "torch.floor", "numpy.random.seed", "torch.var", "numpy.arange", "matplotlib.pyplot.figure", "torch.nn.functional.mse_loss", "torch.cuda.is_available", "torch.device", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ctrl-gaurav/Mark-IT
[ "96b3f49b35ecc0c1c746961f41e7eafb1edeebe9" ]
[ "Attendance.py" ]
[ "import cv2\nimport numpy as np\nimport face_recognition\nimport os\nimport datetime\nimport pyrebase\n\nconfig = {\n \"apiKey\": \"AIzaSyBttgLVbtWWdtRhos39BzbqvQZDIJaIe5U\",\n \"authDomain\": \"mark-it-ec28b.firebaseapp.com\",\n \"projectId\": \"mark-it-ec28b\",\n \"storageBucket\": \"mark-it-ec28b.appspot.com\",\n \"messagingSenderId\": \"187768173767\",\n \"appId\": \"1:187768173767:web:e9ba36b17e9112fc6cfae2\",\n \"measurementId\": \"G-42HYDHB4Q1\",\n \"databaseURL\": \"gs://mark-it-ec28b.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\nstorage = firebase.storage()\n\npath_on_cloud = \"Attendance/Attendance.csv\"\npath_local = \"Attendance.csv\"\n\npath = 'Image Database'\nimages = []\nclassNames = []\nmyList = os.listdir(path)\nprint(myList)\n\nfor cls in myList:\n curImg = cv2.imread(f'{path}/{cls}')\n images.append(curImg)\n classNames.append(os.path.splitext(cls)[0])\n\nprint(classNames)\n\n\ndef findEncodings(images):\n encodeList = []\n for img in images:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n encode = face_recognition.face_encodings(img)[0]\n encodeList.append(encode)\n return encodeList\n\n\ndef markAttendance(name):\n\n with open('Attendance.csv', 'r+') as f:\n myDataList = f.readlines()\n nameList = []\n for line in myDataList:\n entry = line.split(',')\n nameList.append(entry[0])\n\n if name not in nameList:\n now = datetime.datetime.now()\n dtString = now.strftime('%H:%M:%S')\n f.writelines(f'\\n{name}, {dtString}')\n # upload csv to firebase\n storage.child(path_on_cloud).put(path_local)\n\n\nencodeListKnown = findEncodings(images)\nprint('Encoding Complete')\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n success, img = cap.read()\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\n\n facesCurFrame = face_recognition.face_locations(imgS)\n encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)\n\n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\n matches = face_recognition.compare_faces(encodeListKnown, encodeFace)\n faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)\n # print(faceDis)\n matchIndex = np.argmin(faceDis)\n\n if matches[matchIndex]:\n name = classNames[matchIndex].upper()\n # print(name)\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n markAttendance(name)\n storage.child(path_on_cloud).put(path_local)\n\n cv2.imshow('Webcam', img)\n cv2.waitKey(1)\n" ]
[ [ "numpy.argmin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
logarithm27/Improved_Gans_PyTorch
[ "743d836bf1b3e072221e04cd4d7234084d4f0648" ]
[ "Generator.py" ]
[ "import torch\nfrom torch import device\nfrom torch.nn import *\nimport torch.nn.init as weight_initialization\nfrom utilities import weight_normalization\n\n# Make computations over CPU OR GPU\nGPU = 'cuda'\nCPU = 'cpu'\n\n\nclass Generator(Module):\n def __init__(self, noise_dimension, output_dimension=28 * 28):\n super(Generator, self).__init__()\n self.noise_dimension = noise_dimension\n # first hidden layer with noise dimension as input and 500 neurons in output\n self.dense_layer_1 = Sequential(Linear(noise_dimension, 500, bias=False),\n BatchNorm1d(500, affine=False,eps=1e-6, momentum=0.5),\n Softplus())\n # second hidden layer with 500 neurons in input (output of previous hidden layer) and 500 in output\n self.dense_layer_2 = Sequential(Linear(500,500,bias=False),\n BatchNorm1d(500, affine=False, eps=1e-6, momentum=0.5),\n Softplus())\n # apply weight norm at output of the model\n self.weight_normalization_ = Sequential(weight_normalization(500,output_dimension),Softplus())\n # weight initialization using kaiming uniform distribution for the first and second dense layers (fully connected layers)\n weight_initialization.kaiming_uniform_(self.dense_layer_1[0].weight)\n weight_initialization.kaiming_uniform_(self.dense_layer_2[0].weight)\n\n def forward(self,batch_size):\n x = torch.rand(batch_size, self.noise_dimension).to(device=GPU)\n x = self.dense_layer_1(x)\n x = self.dense_layer_2(x)\n x = self.weight_normalization_(x)\n return x\n\n" ]
[ [ "torch.nn.init.kaiming_uniform_", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Arijit-hydrated/pgmpy
[ "66164e08a21267cb844c0b26fd6a657eecff57bb" ]
[ "pgmpy/tests/test_models/test_MarkovModel.py" ]
[ "import unittest\n\nimport networkx as nx\nimport numpy as np\n\nfrom pgmpy.factors.discrete import DiscreteFactor\nfrom pgmpy.factors import factor_product\nfrom pgmpy.independencies import Independencies\nfrom pgmpy.extern import six\nfrom pgmpy.extern.six.moves import range\nfrom pgmpy.models import BayesianModel, MarkovModel, FactorGraph\nfrom pgmpy.tests import help_functions as hf\n\n\nclass TestMarkovModelCreation(unittest.TestCase):\n def setUp(self):\n self.graph = MarkovModel()\n\n def test_class_init_without_data(self):\n self.assertIsInstance(self.graph, MarkovModel)\n\n def test_class_init_with_data_string(self):\n self.g = MarkovModel([('a', 'b'), ('b', 'c')])\n self.assertListEqual(sorted(self.g.nodes()), ['a', 'b', 'c'])\n self.assertListEqual(hf.recursive_sorted(self.g.edges()),\n [['a', 'b'], ['b', 'c']])\n\n def test_class_init_with_data_nonstring(self):\n self.g = MarkovModel([(1, 2), (2, 3)])\n\n def test_add_node_string(self):\n self.graph.add_node('a')\n self.assertListEqual(self.graph.nodes(), ['a'])\n\n def test_add_node_nonstring(self):\n self.graph.add_node(1)\n\n def test_add_nodes_from_string(self):\n self.graph.add_nodes_from(['a', 'b', 'c', 'd'])\n self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'c', 'd'])\n\n def test_add_nodes_from_non_string(self):\n self.graph.add_nodes_from([1, 2, 3, 4])\n\n def test_add_edge_string(self):\n self.graph.add_edge('d', 'e')\n self.assertListEqual(sorted(self.graph.nodes()), ['d', 'e'])\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['d', 'e']])\n self.graph.add_nodes_from(['a', 'b', 'c'])\n self.graph.add_edge('a', 'b')\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['d', 'e']])\n\n def test_add_edge_nonstring(self):\n self.graph.add_edge(1, 2)\n\n def test_add_edge_selfloop(self):\n self.assertRaises(ValueError, self.graph.add_edge, 'a', 'a')\n\n def test_add_edges_from_string(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c')])\n self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'c'])\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['b', 'c']])\n self.graph.add_nodes_from(['d', 'e', 'f'])\n self.graph.add_edges_from([('d', 'e'), ('e', 'f')])\n self.assertListEqual(sorted(self.graph.nodes()),\n ['a', 'b', 'c', 'd', 'e', 'f'])\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n hf.recursive_sorted([('a', 'b'), ('b', 'c'),\n ('d', 'e'), ('e', 'f')]))\n\n def test_add_edges_from_nonstring(self):\n self.graph.add_edges_from([(1, 2), (2, 3)])\n\n def test_add_edges_from_self_loop(self):\n self.assertRaises(ValueError, self.graph.add_edges_from,\n [('a', 'a')])\n\n def test_number_of_neighbors(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c')])\n self.assertEqual(len(self.graph.neighbors('b')), 2)\n\n def tearDown(self):\n del self.graph\n\n\nclass TestMarkovModelMethods(unittest.TestCase):\n def setUp(self):\n self.graph = MarkovModel()\n\n def test_get_cardinality(self):\n\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n\n self.assertDictEqual(self.graph.get_cardinality(), {})\n\n phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi1)\n self.assertDictEqual(self.graph.get_cardinality(), {'a': 1, 'b': 2})\n self.graph.remove_factors(phi1)\n self.assertDictEqual(self.graph.get_cardinality(), {})\n\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))\n phi2 = DiscreteFactor(['c', 'd'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi1, phi2)\n self.assertDictEqual(self.graph.get_cardinality(), {'d': 2, 'a': 2, 'b': 2, 'c': 1})\n\n phi3 = DiscreteFactor(['d', 'a'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi3)\n self.assertDictEqual(self.graph.get_cardinality(), {'d': 1, 'c': 1, 'b': 2, 'a': 2})\n\n self.graph.remove_factors(phi1, phi2, phi3)\n self.assertDictEqual(self.graph.get_cardinality(), {})\n\n def test_get_cardinality_with_node(self):\n\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))\n phi2 = DiscreteFactor(['c', 'd'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi1, phi2)\n self.assertEqual(self.graph.get_cardinality('a'), 2)\n self.assertEqual(self.graph.get_cardinality('b'), 2)\n self.assertEqual(self.graph.get_cardinality('c'), 1)\n self.assertEqual(self.graph.get_cardinality('d'), 2)\n\n def test_check_model(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n\n phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi1)\n self.assertRaises(ValueError, self.graph.check_model)\n\n phi2 = DiscreteFactor(['a', 'c'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi2)\n self.assertRaises(ValueError, self.graph.check_model)\n\n def test_check_model1(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2))\n phi2 = DiscreteFactor(['c', 'b'], [3, 2], np.random.rand(6))\n phi3 = DiscreteFactor(['c', 'd'], [3, 4], np.random.rand(12))\n phi4 = DiscreteFactor(['d', 'a'], [4, 1], np.random.rand(4))\n\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n self.assertTrue(self.graph.check_model())\n\n self.graph.remove_factors(phi1, phi4)\n phi1 = DiscreteFactor(['a', 'b'], [4, 2], np.random.rand(8))\n self.graph.add_factors(phi1)\n self.assertTrue(self.graph.check_model())\n\n def test_check_model2(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n\n phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2))\n\n phi2 = DiscreteFactor(['b', 'c'], [3, 3], np.random.rand(9))\n self.graph.add_factors(phi1, phi2)\n self.assertRaises(ValueError, self.graph.check_model)\n self.graph.remove_factors(phi2)\n\n phi3 = DiscreteFactor(['c', 'a'], [4, 4], np.random.rand(16))\n self.graph.add_factors(phi3)\n self.assertRaises(ValueError, self.graph.check_model)\n self.graph.remove_factors(phi3)\n\n phi2 = DiscreteFactor(['b', 'c'], [2, 3], np.random.rand(6))\n phi3 = DiscreteFactor(['c', 'd'], [3, 4], np.random.rand(12))\n phi4 = DiscreteFactor(['d', 'a'], [4, 3], np.random.rand(12))\n self.graph.add_factors(phi2, phi3, phi4)\n self.assertRaises(ValueError, self.graph.check_model)\n self.graph.remove_factors(phi2, phi3, phi4)\n\n phi2 = DiscreteFactor(['a', 'b'], [1, 3], np.random.rand(3))\n self.graph.add_factors(phi1, phi2)\n self.assertRaises(ValueError, self.graph.check_model)\n self.graph.remove_factors(phi2)\n\n def test_check_model3(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n\n phi1 = DiscreteFactor(['a', 'c'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi1)\n self.assertRaises(ValueError, self.graph.check_model)\n self.graph.remove_factors(phi1)\n\n phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2))\n phi2 = DiscreteFactor(['a', 'c'], [1, 2], np.random.rand(2))\n self.graph.add_factors(phi1, phi2)\n self.assertRaises(ValueError, self.graph.check_model)\n self.graph.remove_factors(phi1, phi2)\n\n phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2))\n phi2 = DiscreteFactor(['b', 'c'], [2, 3], np.random.rand(6))\n phi3 = DiscreteFactor(['c', 'd'], [3, 4], np.random.rand(12))\n phi4 = DiscreteFactor(['d', 'a'], [4, 1], np.random.rand(4))\n phi5 = DiscreteFactor(['d', 'b'], [4, 2], np.random.rand(8))\n self.graph.add_factors(phi1, phi2, phi3, phi4, phi5)\n self.assertRaises(ValueError, self.graph.check_model)\n self.graph.remove_factors(phi1, phi2, phi3, phi4, phi5)\n\n def test_factor_graph(self):\n phi1 = DiscreteFactor(['Alice', 'Bob'], [3, 2], np.random.rand(6))\n phi2 = DiscreteFactor(['Bob', 'Charles'], [2, 2], np.random.rand(4))\n self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])\n self.graph.add_factors(phi1, phi2)\n\n factor_graph = self.graph.to_factor_graph()\n self.assertIsInstance(factor_graph, FactorGraph)\n self.assertListEqual(sorted(factor_graph.nodes()),\n ['Alice', 'Bob', 'Charles', 'phi_Alice_Bob',\n 'phi_Bob_Charles'])\n self.assertListEqual(hf.recursive_sorted(factor_graph.edges()),\n [['Alice', 'phi_Alice_Bob'], ['Bob', 'phi_Alice_Bob'],\n ['Bob', 'phi_Bob_Charles'], ['Charles', 'phi_Bob_Charles']])\n self.assertListEqual(factor_graph.get_factors(), [phi1, phi2])\n\n def test_factor_graph_raises_error(self):\n self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])\n self.assertRaises(ValueError, self.graph.to_factor_graph)\n\n def test_junction_tree(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n\n junction_tree = self.graph.to_junction_tree()\n self.assertListEqual(hf.recursive_sorted(junction_tree.nodes()),\n [['a', 'b', 'd'], ['b', 'c', 'd']])\n self.assertEqual(len(junction_tree.edges()), 1)\n\n def test_junction_tree_single_clique(self):\n\n self.graph.add_edges_from([('x1', 'x2'), ('x2', 'x3'), ('x1', 'x3')])\n phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in self.graph.edges()]\n self.graph.add_factors(*phi)\n\n junction_tree = self.graph.to_junction_tree()\n self.assertListEqual(hf.recursive_sorted(junction_tree.nodes()),\n [['x1', 'x2', 'x3']])\n factors = junction_tree.get_factors()\n self.assertEqual(factors[0], factor_product(*phi))\n\n def test_markov_blanket(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c')])\n self.assertListEqual(self.graph.markov_blanket('a'), ['b'])\n self.assertListEqual(sorted(self.graph.markov_blanket('b')),\n ['a', 'c'])\n\n def test_local_independencies(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c')])\n independencies = self.graph.get_local_independencies()\n self.assertIsInstance(independencies, Independencies)\n self.assertEqual(independencies, Independencies(['a', 'c', 'b']))\n\n def test_bayesian_model(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n\n bm = self.graph.to_bayesian_model()\n self.assertIsInstance(bm, BayesianModel)\n self.assertListEqual(sorted(bm.nodes()), ['a', 'b', 'c', 'd'])\n self.assertTrue(nx.is_chordal(bm.to_undirected()))\n\n def tearDown(self):\n del self.graph\n\n\nclass TestUndirectedGraphFactorOperations(unittest.TestCase):\n def setUp(self):\n self.graph = MarkovModel()\n\n def test_add_factor_raises_error(self):\n self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles'),\n ('Charles', 'Debbie'), ('Debbie', 'Alice')])\n factor = DiscreteFactor(['Alice', 'Bob', 'John'], [2, 2, 2], np.random.rand(8))\n self.assertRaises(ValueError, self.graph.add_factors, factor)\n\n def test_add_single_factor(self):\n self.graph.add_nodes_from(['a', 'b', 'c'])\n phi = DiscreteFactor(['a', 'b'], [2, 2], range(4))\n self.graph.add_factors(phi)\n six.assertCountEqual(self, self.graph.factors, [phi])\n\n def test_add_multiple_factors(self):\n self.graph.add_nodes_from(['a', 'b', 'c'])\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], range(4))\n phi2 = DiscreteFactor(['b', 'c'], [2, 2], range(4))\n self.graph.add_factors(phi1, phi2)\n six.assertCountEqual(self, self.graph.factors, [phi1, phi2])\n\n def test_get_factors(self):\n self.graph.add_nodes_from(['a', 'b', 'c'])\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], range(4))\n phi2 = DiscreteFactor(['b', 'c'], [2, 2], range(4))\n six.assertCountEqual(self, self.graph.get_factors(), [])\n self.graph.add_factors(phi1, phi2)\n six.assertCountEqual(self, self.graph.get_factors(), [phi1, phi2])\n six.assertCountEqual(self, self.graph.get_factors('a'), [phi1])\n\n def test_remove_single_factor(self):\n self.graph.add_nodes_from(['a', 'b', 'c'])\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], range(4))\n phi2 = DiscreteFactor(['b', 'c'], [2, 2], range(4))\n self.graph.add_factors(phi1, phi2)\n self.graph.remove_factors(phi1)\n six.assertCountEqual(self, self.graph.factors, [phi2])\n\n def test_remove_multiple_factors(self):\n self.graph.add_nodes_from(['a', 'b', 'c'])\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], range(4))\n phi2 = DiscreteFactor(['b', 'c'], [2, 2], range(4))\n self.graph.add_factors(phi1, phi2)\n self.graph.remove_factors(phi1, phi2)\n six.assertCountEqual(self, self.graph.factors, [])\n\n def test_partition_function(self):\n self.graph.add_nodes_from(['a', 'b', 'c'])\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], range(4))\n phi2 = DiscreteFactor(['b', 'c'], [2, 2], range(4))\n self.graph.add_factors(phi1, phi2)\n self.graph.add_edges_from([('a', 'b'), ('b', 'c')])\n self.assertEqual(self.graph.get_partition_function(), 22.0)\n\n def test_partition_function_raises_error(self):\n self.graph.add_nodes_from(['a', 'b', 'c', 'd'])\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], range(4))\n phi2 = DiscreteFactor(['b', 'c'], [2, 2], range(4))\n self.graph.add_factors(phi1, phi2)\n self.assertRaises(ValueError,\n self.graph.get_partition_function)\n\n def tearDown(self):\n del self.graph\n\n\nclass TestUndirectedGraphTriangulation(unittest.TestCase):\n def setUp(self):\n self.graph = MarkovModel()\n\n def test_check_clique(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'a')])\n self.assertTrue(self.graph.is_clique(['a', 'b', 'c']))\n\n def test_is_triangulated(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'a')])\n self.assertTrue(self.graph.is_triangulated())\n\n def test_triangulation_h1_inplace(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n self.graph.triangulate(heuristic='H1', inplace=True)\n self.assertTrue(self.graph.is_triangulated())\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['a', 'c'], ['a', 'd'],\n ['b', 'c'], ['c', 'd']])\n\n def test_triangulation_h2_inplace(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n self.graph.triangulate(heuristic='H2', inplace=True)\n self.assertTrue(self.graph.is_triangulated())\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['a', 'c'], ['a', 'd'],\n ['b', 'c'], ['c', 'd']])\n\n def test_triangulation_h3_inplace(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n self.graph.triangulate(heuristic='H3', inplace=True)\n self.assertTrue(self.graph.is_triangulated())\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_triangulation_h4_inplace(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n self.graph.triangulate(heuristic='H4', inplace=True)\n self.assertTrue(self.graph.is_triangulated())\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_triangulation_h5_inplace(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n self.graph.triangulate(heuristic='H4', inplace=True)\n self.assertTrue(self.graph.is_triangulated())\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_triangulation_h6_inplace(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n self.graph.triangulate(heuristic='H4', inplace=True)\n self.assertTrue(self.graph.is_triangulated())\n self.assertListEqual(hf.recursive_sorted(self.graph.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_cardinality_mismatch_raises_error(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n factor_list = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in\n self.graph.edges()]\n self.graph.add_factors(*factor_list)\n self.graph.add_factors(DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6)))\n self.assertRaises(ValueError, self.graph.triangulate)\n\n def test_triangulation_h1_create_new(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n H = self.graph.triangulate(heuristic='H1', inplace=True)\n self.assertListEqual(hf.recursive_sorted(H.edges()),\n [['a', 'b'], ['a', 'c'], ['a', 'd'],\n ['b', 'c'], ['c', 'd']])\n\n def test_triangulation_h2_create_new(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n H = self.graph.triangulate(heuristic='H2', inplace=True)\n self.assertListEqual(hf.recursive_sorted(H.edges()),\n [['a', 'b'], ['a', 'c'], ['a', 'd'],\n ['b', 'c'], ['c', 'd']])\n\n def test_triangulation_h3_create_new(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n H = self.graph.triangulate(heuristic='H3', inplace=True)\n self.assertListEqual(hf.recursive_sorted(H.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_triangulation_h4_create_new(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n H = self.graph.triangulate(heuristic='H4', inplace=True)\n self.assertListEqual(hf.recursive_sorted(H.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_triangulation_h5_create_new(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n H = self.graph.triangulate(heuristic='H5', inplace=True)\n self.assertListEqual(hf.recursive_sorted(H.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_triangulation_h6_create_new(self):\n self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),\n ('d', 'a')])\n phi1 = DiscreteFactor(['a', 'b'], [2, 3], np.random.rand(6))\n phi2 = DiscreteFactor(['b', 'c'], [3, 4], np.random.rand(12))\n phi3 = DiscreteFactor(['c', 'd'], [4, 5], np.random.rand(20))\n phi4 = DiscreteFactor(['d', 'a'], [5, 2], np.random.random(10))\n self.graph.add_factors(phi1, phi2, phi3, phi4)\n H = self.graph.triangulate(heuristic='H6', inplace=True)\n self.assertListEqual(hf.recursive_sorted(H.edges()),\n [['a', 'b'], ['a', 'd'], ['b', 'c'],\n ['b', 'd'], ['c', 'd']])\n\n def test_copy(self):\n # Setup the original graph\n self.graph.add_nodes_from(['a', 'b'])\n self.graph.add_edges_from([('a', 'b')])\n\n # Generate the copy\n copy = self.graph.copy()\n\n # Ensure the copied model is correct\n self.assertTrue(copy.check_model())\n\n # Basic sanity checks to ensure the graph was copied correctly\n self.assertEqual(len(copy.nodes()), 2)\n self.assertListEqual(copy.neighbors('a'), ['b'])\n self.assertListEqual(copy.neighbors('b'), ['a'])\n\n # Modify the original graph ...\n self.graph.add_nodes_from(['c'])\n self.graph.add_edges_from([('c', 'b')])\n\n # ... and ensure none of those changes get propagated\n self.assertEqual(len(copy.nodes()), 2)\n self.assertListEqual(copy.neighbors('a'), ['b'])\n self.assertListEqual(copy.neighbors('b'), ['a'])\n with self.assertRaises(nx.NetworkXError):\n copy.neighbors('c')\n\n # Ensure the copy has no factors at this point\n self.assertEqual(len(copy.get_factors()), 0)\n\n # Add factors to the original graph\n phi1 = DiscreteFactor(['a', 'b'], [2, 2], [[0.3, 0.7], [0.9, 0.1]])\n self.graph.add_factors(phi1)\n\n # The factors should not get copied over\n with self.assertRaises(AssertionError):\n self.assertListEqual(copy.get_factors(), self.graph.get_factors())\n\n # Create a fresh copy\n del copy\n copy = self.graph.copy()\n self.assertListEqual(copy.get_factors(), self.graph.get_factors())\n\n # If we change factors in the original, it should not be passed to the clone\n phi1.values = np.array([[0.5, 0.5], [0.5, 0.5]])\n self.assertNotEqual(self.graph.get_factors(), copy.get_factors())\n\n # Start with a fresh copy\n del copy\n self.graph.add_nodes_from(['d'])\n copy = self.graph.copy()\n\n # Ensure an unconnected node gets copied over as well\n self.assertEqual(len(copy.nodes()), 4)\n self.assertListEqual(self.graph.neighbors('a'), ['b'])\n self.assertTrue('a' in self.graph.neighbors('b'))\n self.assertTrue('c' in self.graph.neighbors('b'))\n self.assertListEqual(self.graph.neighbors('c'), ['b'])\n self.assertListEqual(self.graph.neighbors('d'), [])\n\n # Verify that changing the copied model should not update the original\n copy.add_nodes_from(['e'])\n self.assertListEqual(copy.neighbors('e'), [])\n with self.assertRaises(nx.NetworkXError):\n self.graph.neighbors('e')\n\n # Verify that changing edges in the copy doesn't create edges in the original\n copy.add_edges_from([('d', 'b')])\n\n self.assertTrue('a' in copy.neighbors('b'))\n self.assertTrue('c' in copy.neighbors('b'))\n self.assertTrue('d' in copy.neighbors('b'))\n\n self.assertTrue('a' in self.graph.neighbors('b'))\n self.assertTrue('c' in self.graph.neighbors('b'))\n self.assertFalse('d' in self.graph.neighbors('b'))\n\n # If we remove factors from the copied model, it should not reflect in the original\n copy.remove_factors(phi1)\n self.assertEqual(len(self.graph.get_factors()), 1)\n self.assertEqual(len(copy.get_factors()), 0)\n\n def tearDown(self):\n del self.graph\n" ]
[ [ "numpy.array", "numpy.random.random", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DKJJ/Swin-Transformer-Semantic-Segmentation
[ "c8707951ddabdc0189451bcbd25c145f1f6cc041" ]
[ "mmseg/models/backbones/resnet.py" ]
[ "import torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,\n constant_init, kaiming_init)\nfrom mmcv.runner import load_checkpoint\nfrom mmcv.utils.parrots_wrapper import _BatchNorm\n\nfrom mmseg.utils import get_root_logger\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\nimport torch.utils.checkpoint as checkpoint\n\n\nclass BasicBlock(nn.Module):\n \"\"\"Basic block for ResNet.\"\"\"\n\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n dcn=None,\n plugins=None):\n super(BasicBlock, self).__init__()\n assert dcn is None, 'Not implemented yet.'\n assert plugins is None, 'Not implemented yet.'\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n 3,\n stride=stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = build_conv_layer(\n conv_cfg, planes, planes, 3, padding=1, bias=False)\n self.add_module(self.norm2_name, norm2)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.with_cp = with_cp\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n \"\"\"Bottleneck block for ResNet.\n\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if it is\n \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n dcn=None,\n plugins=None):\n super(Bottleneck, self).__init__()\n assert style in ['pytorch', 'caffe']\n assert dcn is None or isinstance(dcn, dict)\n assert plugins is None or isinstance(plugins, list)\n if plugins is not None:\n allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']\n assert all(p['position'] in allowed_position for p in plugins)\n\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.dcn = dcn\n self.with_dcn = dcn is not None\n self.plugins = plugins\n self.with_plugins = plugins is not None\n\n if self.with_plugins:\n # collect plugins for conv1/conv2/conv3\n self.after_conv1_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv1'\n ]\n self.after_conv2_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv2'\n ]\n self.after_conv3_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv3'\n ]\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n norm_cfg, planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n fallback_on_stride = False\n if self.with_dcn:\n fallback_on_stride = dcn.pop('fallback_on_stride', False)\n if not self.with_dcn or fallback_on_stride:\n self.conv2 = build_conv_layer(\n conv_cfg,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n else:\n assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n self.conv2 = build_conv_layer(\n dcn,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n conv_cfg,\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n\n if self.with_plugins:\n self.after_conv1_plugin_names = self.make_block_plugins(\n planes, self.after_conv1_plugins)\n self.after_conv2_plugin_names = self.make_block_plugins(\n planes, self.after_conv2_plugins)\n self.after_conv3_plugin_names = self.make_block_plugins(\n planes * self.expansion, self.after_conv3_plugins)\n\n def make_block_plugins(self, in_channels, plugins):\n \"\"\"make plugins for block.\n\n Args:\n in_channels (int): Input channels of plugin.\n plugins (list[dict]): List of plugins cfg to build.\n\n Returns:\n list[str]: List of the names of plugin.\n \"\"\"\n assert isinstance(plugins, list)\n plugin_names = []\n for plugin in plugins:\n plugin = plugin.copy()\n name, layer = build_plugin_layer(\n plugin,\n in_channels=in_channels,\n postfix=plugin.pop('postfix', ''))\n assert not hasattr(self, name), f'duplicate plugin {name}'\n self.add_module(name, layer)\n plugin_names.append(name)\n return plugin_names\n\n def forward_plugin(self, x, plugin_names):\n \"\"\"Forward function for plugins.\"\"\"\n out = x\n for name in plugin_names:\n out = getattr(self, name)(x)\n return out\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n @property\n def norm3(self):\n \"\"\"nn.Module: normalization layer after the third convolution layer\"\"\"\n return getattr(self, self.norm3_name)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = self.relu(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n out = self.conv3(out)\n out = self.norm3(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\[email protected]_module()\nclass ResNet(nn.Module):\n \"\"\"ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default\" 3.\n stem_channels (int): Number of stem channels. Default: 64.\n base_channels (int): Number of base channels of res layer. Default: 64.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n\n - position (str, required): Position inside block to insert plugin,\n options: 'after_conv1', 'after_conv2', 'after_conv3'.\n\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as 'num_stages'\n multi_grid (Sequence[int]|None): Multi grid dilation rates of last\n stage. Default: None\n contract_dilation (bool): Whether contract first dilation of each layer\n Default: False\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from mmseg.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n \"\"\"\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self,\n depth,\n in_channels=3,\n stem_channels=64,\n base_channels=64,\n num_stages=4,\n strides=(1, 2, 2, 2),\n dilations=(1, 1, 1, 1),\n out_indices=(0, 1, 2, 3),\n style='pytorch',\n deep_stem=False,\n avg_down=False,\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=False,\n dcn=None,\n stage_with_dcn=(False, False, False, False),\n plugins=None,\n multi_grid=None,\n contract_dilation=False,\n with_cp=False,\n zero_init_residual=True):\n super(ResNet, self).__init__()\n if depth not in self.arch_settings:\n raise KeyError(f'invalid depth {depth} for resnet')\n self.depth = depth\n self.stem_channels = stem_channels\n self.base_channels = base_channels\n self.num_stages = num_stages\n assert num_stages >= 1 and num_stages <= 4\n self.strides = strides\n self.dilations = dilations\n assert len(strides) == len(dilations) == num_stages\n self.out_indices = out_indices\n assert max(out_indices) < num_stages\n self.style = style\n self.deep_stem = deep_stem\n self.avg_down = avg_down\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.with_cp = with_cp\n self.norm_eval = norm_eval\n self.dcn = dcn\n self.stage_with_dcn = stage_with_dcn\n if dcn is not None:\n assert len(stage_with_dcn) == num_stages\n self.plugins = plugins\n self.multi_grid = multi_grid\n self.contract_dilation = contract_dilation\n self.zero_init_residual = zero_init_residual\n self.block, stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n self.inplanes = stem_channels\n\n self._make_stem_layer(in_channels, stem_channels)\n\n self.res_layers = []\n for i, num_blocks in enumerate(self.stage_blocks):\n stride = strides[i]\n dilation = dilations[i]\n dcn = self.dcn if self.stage_with_dcn[i] else None\n if plugins is not None:\n stage_plugins = self.make_stage_plugins(plugins, i)\n else:\n stage_plugins = None\n # multi grid is applied to last layer only\n stage_multi_grid = multi_grid if i == len(\n self.stage_blocks) - 1 else None\n planes = base_channels * 2**i\n res_layer = self.make_res_layer(\n block=self.block,\n inplanes=self.inplanes,\n planes=planes,\n num_blocks=num_blocks,\n stride=stride,\n dilation=dilation,\n style=self.style,\n avg_down=self.avg_down,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n dcn=dcn,\n plugins=stage_plugins,\n multi_grid=stage_multi_grid,\n contract_dilation=contract_dilation)\n self.inplanes = planes * self.block.expansion\n layer_name = f'layer{i+1}'\n self.add_module(layer_name, res_layer)\n self.res_layers.append(layer_name)\n\n self._freeze_stages()\n\n self.feat_dim = self.block.expansion * base_channels * 2**(\n len(self.stage_blocks) - 1)\n\n def make_stage_plugins(self, plugins, stage_idx):\n \"\"\"make plugins for ResNet 'stage_idx'th stage .\n\n Currently we support to insert 'context_block',\n 'empirical_attention_block', 'nonlocal_block' into the backbone like\n ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n Bottleneck.\n\n An example of plugins format could be :\n >>> plugins=[\n ... dict(cfg=dict(type='xxx', arg1='xxx'),\n ... stages=(False, True, True, True),\n ... position='after_conv2'),\n ... dict(cfg=dict(type='yyy'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='1'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='2'),\n ... stages=(True, True, True, True),\n ... position='after_conv3')\n ... ]\n >>> self = ResNet(depth=18)\n >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n >>> assert len(stage_plugins) == 3\n\n Suppose 'stage_idx=0', the structure of blocks in the stage would be:\n conv1-> conv2->conv3->yyy->zzz1->zzz2\n Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n If stages is missing, the plugin would be applied to all stages.\n\n Args:\n plugins (list[dict]): List of plugins cfg to build. The postfix is\n required if multiple same type plugins are inserted.\n stage_idx (int): Index of stage to build\n\n Returns:\n list[dict]: Plugins for current stage\n \"\"\"\n stage_plugins = []\n for plugin in plugins:\n plugin = plugin.copy()\n stages = plugin.pop('stages', None)\n assert stages is None or len(stages) == self.num_stages\n # whether to insert plugin into current stage\n if stages is None or stages[stage_idx]:\n stage_plugins.append(plugin)\n\n return stage_plugins\n\n def make_res_layer(self, **kwargs):\n \"\"\"Pack all blocks in a stage into a ``ResLayer``.\"\"\"\n return ResLayer(**kwargs)\n\n @property\n def norm1(self):\n \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n return getattr(self, self.norm1_name)\n\n def _make_stem_layer(self, in_channels, stem_channels):\n \"\"\"Make stem layer for ResNet.\"\"\"\n if self.deep_stem:\n self.stem = nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n in_channels,\n stem_channels // 2,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n nn.ReLU(inplace=True),\n build_conv_layer(\n self.conv_cfg,\n stem_channels // 2,\n stem_channels // 2,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n nn.ReLU(inplace=True),\n build_conv_layer(\n self.conv_cfg,\n stem_channels // 2,\n stem_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels)[1],\n nn.ReLU(inplace=True))\n else:\n self.conv1 = build_conv_layer(\n self.conv_cfg,\n in_channels,\n stem_channels,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=False)\n self.norm1_name, norm1 = build_norm_layer(\n self.norm_cfg, stem_channels, postfix=1)\n self.add_module(self.norm1_name, norm1)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def _freeze_stages(self):\n \"\"\"Freeze stages param and norm stats.\"\"\"\n if self.frozen_stages >= 0:\n if self.deep_stem:\n self.stem.eval()\n for param in self.stem.parameters():\n param.requires_grad = False\n else:\n self.norm1.eval()\n for m in [self.conv1, self.norm1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n\n if self.dcn is not None:\n for m in self.modules():\n if isinstance(m, Bottleneck) and hasattr(\n m, 'conv2_offset'):\n constant_init(m.conv2_offset, 0)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n if self.deep_stem:\n x = self.stem(x)\n else:\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n outs = []\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = res_layer(x)\n if i in self.out_indices:\n outs.append(x)\n return tuple(outs)\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode while keep normalization layer\n freezed.\"\"\"\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n\n\[email protected]_module()\nclass ResNetV1c(ResNet):\n \"\"\"ResNetV1c variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv\n in the input stem with three 3x3 convs.\n\n References:\n .. [1] https://arxiv.org/pdf/1812.01187.pdf\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1c, self).__init__(\n deep_stem=True, avg_down=False, **kwargs)\n\n\[email protected]_module()\nclass ResNetV1d(ResNet):\n \"\"\"ResNetV1d variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1d, self).__init__(\n deep_stem=True, avg_down=True, **kwargs)\n\[email protected]_module()\nclass ResNet_sp(ResNet):\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n # input x: (B, S, H, W)\n _B, _S, _H, _W = x.shape\n x = x.reshape((_B*_S, 1, _H, _W))\n # reshape x: (B*S, 1, H, W)\n x = x.repeat_interleave(3, dim=1)\n # repeat x: (B*S, 3, H, W)\n if self.deep_stem:\n x = self.stem(x)\n else:\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n outs = []\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = checkpoint.checkpoint(res_layer, x)\n if i in self.out_indices:\n # now x: (B*S, C, H, W)\n _, _, _H, _W = x.shape\n outs.append(x.reshape((_B, _S, -1, _H, _W)))\n return tuple(outs)\n\[email protected]_module()\nclass ResNetV1c_sp(ResNet_sp):\n \"\"\"ResNetV1c variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv\n in the input stem with three 3x3 convs.\n\n References:\n .. [1] https://arxiv.org/pdf/1812.01187.pdf\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1c_sp, self).__init__(\n deep_stem=True, avg_down=False, **kwargs)\n\n\[email protected]_module()\nclass ResNetV1d_sp(ResNet_sp):\n \"\"\"ResNetV1d variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1d_sp, self).__init__(\n deep_stem=True, avg_down=True, **kwargs)\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.utils.checkpoint.checkpoint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shreyagummadi/Traffic-Sign-Detection-and-Recognition
[ "a92d1386a123d27c31324a1e02d6be83e0cf4ce7" ]
[ "trainClassifier.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 16 09:42:36 2019\r\n\r\n@author: Sneha\r\n\"\"\"\r\nfrom PIL import Image\r\nimport glob\r\nimport os\r\nimport cv2\r\nfrom skimage import exposure\r\nfrom skimage import feature\r\nfrom sklearn import svm\r\nimport cv2\r\n\r\n\r\n\r\ndef trainClassifier():\r\n print(\"Here\")\r\n for name in glob.glob('training_set/*.*'):\r\n print(name)\r\n winSize = (64,64)\r\n blockSize = (8,8)\r\n blockStride = (4,4)\r\n cellSize = (4,4)\r\n nbins = 9\r\n derivAperture = 1\r\n winSigma = -1.\r\n histogramNormType = 0\r\n L2HysThreshold = 0.2\r\n gammaCorrection = 1\r\n nlevels = 64\r\n signedGradient = True\r\n\r\n hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels, signedGradient)\r\n x=''\r\n trainingFeaturesSK,trainingFeaturesCV = dict(),dict()\r\n features,labels=[],[]\r\n for root, dirs, files in os.walk('training_selected/'):\r\n# print(root,dirs,files)\r\n for name in files:\r\n# print(name)\r\n if (name.endswith((\".ppm\")) or name.endswith((\".jpg\")) ):\r\n# print(root)\r\n if(x is not root):\r\n \r\n x=root\r\n print(x)\r\n arr=[]\r\n arr1=[]\r\n img = cv2.imread(root+'/'+name)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n median = cv2.medianBlur(gray,3)\r\n img=cv2.resize(median,(64,64))\r\n# print(img.shape)\r\n# cv2.imshow('result',img)\r\n# cv2.waitKey(0)\r\n img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)\r\n\r\n# ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)\r\n# cv2.imshow('result',img)\r\n# cv2.waitKey(0)\r\n# print(root.split('/')[1])\r\n# trainingFeatures[root.split('/')[1]]=(hog.compute(img))\r\n #-------Through skimage------------\r\n (H, hogImage) = feature.hog(img, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=True, block_norm=\"L1\",visualize=True)\r\n# hogImage = exposure.rescale_intensity(hogImage, out_range=(0, 255))\r\n# hogImage = hogImage.astype(\"uint8\")\r\n \r\n# cv2.imshow(\"HOG Image\", hogImage)\r\n features.append(H)\r\n labels.append(root.split('/')[1])\r\n arr1.append(H)\r\n trainingFeaturesSK[root.split('/')[1]]=arr1\r\n \r\n #-----------------------------------\r\n \r\n \r\n #----------Through opencv-------\r\n h=hog.compute(img)\r\n# h=h.ravel()\r\n# features.append(h)\r\n# labels.append(root.split('/')[1])\r\n# cv2.imshow('result',h)\r\n# cv2.waitKey(0)\r\n arr.append(h)\r\n trainingFeaturesCV[root.split('/')[1]]=arr\r\n # ------------------\r\n \r\n for root, dirs, files in os.walk('neg'):\r\n# print(root,dirs,files)\r\n print('neg')\r\n for name in files:\r\n \r\n if name.endswith((\".jpg\")):\r\n# print(root)\r\n# if(x is not root):\r\n \r\n# x=root\r\n# print(root+'/'+name)\r\n arr=[]\r\n arr1=[]\r\n img = cv2.imread(root+'/'+name)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n median = cv2.medianBlur(gray,3)\r\n img=cv2.resize(median,(64,64))\r\n# print(img.shape)\r\n# cv2.imshow('result',img)\r\n# cv2.waitKey(0)\r\n img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)\r\n\r\n# ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)\r\n# cv2.imshow('result',img)\r\n# cv2.waitKey(0)\r\n# print(root.split('/')[1])\r\n# trainingFeatures[root.split('/')[1]]=(hog.compute(img))\r\n #-------Through skimage------------\r\n (H, hogImage) = feature.hog(img, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=True, block_norm=\"L1\",visualize=True)\r\n# hogImage = exposure.rescale_intensity(hogImage, out_range=(0, 255))\r\n# hogImage = hogImage.astype(\"uint8\")\r\n \r\n# cv2.imshow(\"HOG Image\", hogImage)\r\n features.append(H)\r\n labels.append('negatives')\r\n# print(trainingFeaturesSK.keys())\r\n clf = svm.SVC(kernel='linear', probability=True, decision_function_shape='ovo')\r\n# clf.probability='True'\r\n clf.fit(features,labels) \r\n \r\n# print(clf)\r\n#cv.destroyAllWindows()\r\n# trainingSet = imageDatastore('../training_selected', 'IncludeSubfolders', true, 'LabelSource', 'foldernames');\r\n# testSet = imageDatastore('../testing_selected', 'IncludeSubfolders', true, 'LabelSource', 'foldernames');\r\n# \r\n# \r\n# numImages = numel(trainingSet.Files);\r\n# % trainingFeatures = zeros(numImages, hogFeatureSize, 'single');\r\n# \r\n# for i = 1:numImages\r\n# img = readimage(trainingSet, i);\r\n# img = rgb2gray(img);\r\n# img = medfilt2(img, [3 3]);\r\n# img = imresize(img, [64 64]); \r\n# % Apply pre-processing steps\r\n# img = imbinarize(img);\r\n# \r\n# trainingFeatures(i, :) = extractHOGFeatures(img, 'CellSize', [4 4]); \r\n# end\r\n# % Get labels for each image.\r\n# trainingLabels = trainingSet.Labels;\r\n# classifier = fitcecoc(trainingFeatures, trainingLabels);\r\n return features, labels,clf" ]
[ [ "sklearn.svm.SVC" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PacktPublishing/Automated-Machine-Learning-on-AWS
[ "e232aee0d89c066c8f6f95522a45f2d25495db1c" ]
[ "Chapter09/Files/airflow/dags/model/model_training.py" ]
[ "import argparse\nimport os\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam\nfrom sklearn import preprocessing\n\ntf.get_logger().setLevel(\"ERROR\")\n\nif __name__ == \"__main__\":\n print(f\"Tensorflow Version: {tf.__version__}\")\n column_names = [\"rings\", \"sex\", \"length\", \"diameter\", \"height\", \"whole_weight\", \"shucked_weight\", \"viscera_weight\", \"shell_weight\"]\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epochs\", type=int, default=2)\n parser.add_argument(\"--batch-size\", type=int, default=8)\n parser.add_argument(\"--model-dir\", type=str, default=os.environ[\"SM_MODEL_DIR\"])\n parser.add_argument(\"--training\", type=str, default=os.environ[\"SM_CHANNEL_TRAINING\"])\n args, _ = parser.parse_known_args()\n epochs = args.epochs\n batch_size = args.batch_size\n training_path = args.training\n model_path = args.model_dir\n train_data = pd.read_csv(os.path.join(training_path, \"training.csv\"), sep=\",\", names=column_names)\n val_data = pd.read_csv(os.path.join(training_path, \"validation.csv\"), sep=\",\", names=column_names)\n train_y = train_data[\"rings\"].to_numpy()\n train_X = train_data.drop([\"rings\"], axis=1).to_numpy()\n val_y = val_data[\"rings\"].to_numpy()\n val_X = val_data.drop([\"rings\"], axis=1).to_numpy()\n train_X = preprocessing.normalize(train_X)\n val_X = preprocessing.normalize(val_X)\n network_layers = [\n Dense(64, activation=\"relu\", kernel_initializer=\"normal\", input_dim=8),\n Dense(64, activation=\"relu\"),\n Dense(1, activation=\"linear\")\n ]\n model = Sequential(network_layers)\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=[\"mae\", \"accuracy\"])\n model.summary()\n model.fit(\n train_X,\n train_y,\n validation_data=(val_X, val_y),\n batch_size=batch_size,\n epochs=epochs,\n shuffle=True,\n verbose=1\n )\n \n model.save(os.path.join(model_path, \"model.h5\"))\n model_version = 1\n export_path = os.path.join(model_path, str(model_version))\n tf.keras.models.save_model(\n model,\n export_path,\n overwrite=True,\n include_optimizer=True,\n save_format=None,\n signatures=None,\n options=None\n )" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.models.save_model", "tensorflow.get_logger", "sklearn.preprocessing.normalize", "tensorflow.keras.models.Sequential" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
danbider/lightning-pose
[ "23dc5f22e4b40fa8b71193322f11fca703fd8ec9" ]
[ "lightning_pose/models/base.py" ]
[ "\"\"\"Base class for resnet backbone that acts as a feature extractor.\"\"\"\n\nfrom pytorch_lightning.core.lightning import LightningModule\nimport torch\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR, MultiStepLR\nfrom torchtyping import TensorType, patch_typeguard\nimport torchvision.models as models\nfrom typeguard import typechecked\nfrom typing import Any, Callable, Dict, List, Literal, Optional, Tuple, TypedDict, Union\n\npatch_typeguard() # use before @typechecked\n\nMULTISTEPLR_MILESTONES_DEFAULT = [100, 200, 300]\nMULTISTEPLR_GAMMA_DEFAULT = 0.5\n\n\n@typechecked\ndef grab_resnet_backbone(\n resnet_version: Literal[18, 34, 50, 101, 152] = 18,\n pretrained: bool = True,\n) -> models.resnet.ResNet:\n \"\"\"Load resnet architecture from torchvision.\n\n Args:\n resnet_version: choose network depth\n pretrained: True to load weights pretrained on imagenet\n\n Returns:\n selected resnet architecture as a model object\n\n \"\"\"\n resnets = {\n 18: models.resnet18,\n 34: models.resnet34,\n 50: models.resnet50,\n 101: models.resnet101,\n 152: models.resnet152,\n }\n return resnets[resnet_version](pretrained)\n\n\n@typechecked\ndef grab_layers_sequential(\n model: models.resnet.ResNet, last_layer_ind: Optional[int] = None\n) -> torch.nn.modules.container.Sequential:\n \"\"\"Package selected number of layers into a nn.Sequential object.\n\n Args:\n model: original resnet model\n last_layer_ind: final layer to pass data through\n\n Returns:\n potentially reduced backbone model\n\n \"\"\"\n layers = list(model.children())[: last_layer_ind + 1]\n return nn.Sequential(*layers)\n\n\nclass BaseBatchDict(TypedDict):\n \"\"\"Class for finer control over typechecking.\"\"\"\n\n images: TensorType[\"batch\", \"RGB\":3, \"image_height\", \"image_width\", float]\n keypoints: TensorType[\"batch\", \"num_targets\", float]\n idxs: TensorType[\"batch\", int]\n\n\nclass HeatmapBatchDict(BaseBatchDict):\n \"\"\"Class for finer control over typechecking.\"\"\"\n\n heatmaps: TensorType[\n \"batch\", \"num_keypoints\", \"heatmap_height\", \"heatmap_width\", float\n ]\n\n\nclass SemiSupervisedBatchDict(TypedDict):\n \"\"\"Class for finer control over typechecking.\"\"\"\n\n labeled: BaseBatchDict\n unlabeled: TensorType[\n \"sequence_length\", \"RGB\":3, \"image_height\", \"image_width\", float\n ]\n\n\nclass SemiSupervisedHeatmapBatchDict(TypedDict):\n \"\"\"Class for finer control over typechecking.\"\"\"\n\n labeled: HeatmapBatchDict\n unlabeled: TensorType[\n \"sequence_length\", \"RGB\":3, \"image_height\", \"image_width\", float\n ]\n\n\nclass BaseFeatureExtractor(LightningModule):\n \"\"\"Object that contains the base resnet feature extractor.\"\"\"\n\n def __init__(\n self,\n resnet_version: Literal[18, 34, 50, 101, 152] = 18,\n pretrained: bool = True,\n last_resnet_layer_to_get: int = -2,\n lr_scheduler: str = \"multisteplr\",\n lr_scheduler_params: Optional[dict] = None,\n ) -> None:\n \"\"\"A ResNet model that takes in images and generates features.\n\n ResNets will be loaded from torchvision and can be either pre-trained\n on ImageNet or randomly initialized. These were originally used for\n classification tasks, so we truncate their final fully connected layer.\n\n Args:\n resnet_version: which ResNet version to use; defaults to 18\n pretrained: True to load weights pretrained on imagenet\n last_resnet_layer_to_get: Defaults to -2.\n lr_scheduler: how to schedule learning rate\n lr_scheduler_params: params for specific learning rate schedulers\n\n \"\"\"\n super().__init__()\n print(\"\\n Initializing a {} instance.\".format(self._get_name()))\n\n self.resnet_version = resnet_version\n base = grab_resnet_backbone(\n resnet_version=self.resnet_version, pretrained=pretrained\n )\n self.num_fc_input_features = base.fc.in_features\n self.backbone = grab_layers_sequential(\n model=base,\n last_layer_ind=last_resnet_layer_to_get,\n )\n\n self.lr_scheduler = lr_scheduler\n self.lr_scheduler_params = lr_scheduler_params\n\n def get_representations(\n self,\n images: TensorType[\"batch\", \"channels\":3, \"image_height\", \"image_width\", float],\n ) -> TensorType[\"batch\", \"features\", \"rep_height\", \"rep_width\", float]:\n \"\"\"Forward pass from images to feature maps.\n\n Wrapper around the backbone's feature_extractor() method for typechecking\n purposes.\n See tests/models/test_base.py for example shapes.\n\n Args:\n images: a batch of images\n\n Returns:\n a representation of the images; features differ as a function of resnet\n version. Representation height and width differ as a function of image\n dimensions, and are not necessarily equal.\n\n \"\"\"\n return self.backbone(images)\n\n def forward(self, images):\n \"\"\"Forward pass from images to representations.\n\n Wrapper around self.get_representations().\n Fancier childern models will use get_representations() in their forward\n methods.\n\n Args:\n images (torch.tensor(float)): a batch of images.\n\n Returns:\n torch.tensor(float): a representation of the images.\n \"\"\"\n return self.get_representations(images)\n\n def configure_optimizers(self):\n \"\"\"Select optimizer, lr scheduler, and metric for monitoring.\"\"\"\n\n # standard adam optimizer\n optimizer = Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=1e-3)\n\n # define a scheduler that reduces the base learning rate\n if self.lr_scheduler == \"multisteplr\" or self.lr_scheduler == \"multistep_lr\":\n\n if self.lr_scheduler_params is None:\n milestones = MULTISTEPLR_MILESTONES_DEFAULT\n gamma = MULTISTEPLR_GAMMA_DEFAULT\n else:\n milestones = self.lr_scheduler_params.get(\n \"milestones\", MULTISTEPLR_MILESTONES_DEFAULT)\n gamma = self.lr_scheduler_params.get(\n \"gamma\", MULTISTEPLR_GAMMA_DEFAULT)\n\n scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=gamma)\n\n else:\n raise NotImplementedError(\n \"'%s' is an invalid LR scheduler\" % self.lr_scheduler\n )\n\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": scheduler,\n \"monitor\": \"val_supervised_loss\",\n }\n\n\nclass BaseSupervisedTracker(BaseFeatureExtractor):\n \"\"\"Base class for supervised trackers.\"\"\"\n\n @typechecked\n def get_loss_inputs_labeled(\n self,\n batch_dict: Union[BaseBatchDict, HeatmapBatchDict],\n ) -> dict:\n \"\"\"Return predicted coordinates for a batch of data.\"\"\"\n raise NotImplementedError\n\n @typechecked\n def evaluate_labeled(\n self,\n batch_dict: Union[BaseBatchDict, HeatmapBatchDict],\n stage: Optional[Literal[\"train\", \"val\", \"test\"]] = None,\n ) -> TensorType[(), float]:\n \"\"\"Compute and log the losses on a batch of labeled data.\"\"\"\n\n # forward pass; collected true and predicted heatmaps, keypoints\n data_dict = self.get_loss_inputs_labeled(batch_dict=batch_dict)\n\n # compute and log loss on labeled data\n loss, log_list = self.loss_factory(stage=stage, **data_dict)\n\n # compute and log rmse loss on labeled data\n loss_rmse, _ = self.rmse_loss(stage=stage, **data_dict)\n\n if stage:\n # log overall supervised loss\n self.log(f\"{stage}_supervised_loss\", loss, prog_bar=True)\n # log supervised rmse\n self.log(f\"{stage}_supervised_rmse\", loss_rmse)\n # log individual supervised losses\n for log_dict in log_list:\n self.log(**log_dict)\n\n return loss\n\n @typechecked\n def training_step(\n self,\n train_batch: Union[BaseBatchDict, HeatmapBatchDict],\n batch_idx: int,\n ) -> Dict[str, TensorType[(), float]]:\n \"\"\"Base training step, a wrapper around the `evaluate_labeled` method.\"\"\"\n loss = self.evaluate_labeled(train_batch, \"train\")\n return {\"loss\": loss}\n\n @typechecked\n def validation_step(\n self,\n val_batch: Union[BaseBatchDict, HeatmapBatchDict],\n batch_idx: int,\n ) -> None:\n \"\"\"Base validation step, a wrapper around the `evaluate_labeled` method.\"\"\"\n self.evaluate_labeled(val_batch, \"val\")\n\n @typechecked\n def test_step(\n self,\n test_batch: Union[BaseBatchDict, HeatmapBatchDict],\n batch_idx: int,\n ) -> None:\n \"\"\"Base test step, a wrapper around the `evaluate_labeled` method.\"\"\"\n self.evaluate_labeled(test_batch, \"test\")\n\n @typechecked\n def configure_optimizers(self) -> dict:\n \"\"\"Select optimizer, lr scheduler, and metric for monitoring.\"\"\"\n\n if getattr(self, \"upsampling_layers\", None) is not None:\n\n # single optimizer with single learning rate\n params = [\n # {\"params\": self.backbone.parameters()},\n # don't uncomment above line; the BackboneFinetuning callback should\n # add backbone to the params.\n {\n \"params\": self.upsampling_layers.parameters()\n }, # important this is the 0th element, for BackboneFinetuning callback\n ]\n\n else:\n # standard adam optimizer\n params = filter(lambda p: p.requires_grad, self.parameters())\n\n optimizer = Adam(params, lr=1e-3)\n\n # define a scheduler that reduces the base learning rate\n if self.lr_scheduler == \"multisteplr\" or self.lr_scheduler == \"multistep_lr\":\n\n if self.lr_scheduler_params is None:\n milestones = MULTISTEPLR_MILESTONES_DEFAULT\n gamma = MULTISTEPLR_GAMMA_DEFAULT\n else:\n milestones = self.lr_scheduler_params.get(\n \"milestones\", MULTISTEPLR_MILESTONES_DEFAULT)\n gamma = self.lr_scheduler_params.get(\n \"gamma\", MULTISTEPLR_GAMMA_DEFAULT)\n\n scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=gamma)\n\n else:\n raise NotImplementedError(\n \"'%s' is an invalid LR scheduler\" % self.lr_scheduler\n )\n\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": scheduler,\n \"monitor\": \"val_supervised_loss\",\n }\n\n\nclass SemiSupervisedTrackerMixin(object):\n \"\"\"Mixin class providing training step function for semi-supervised models.\"\"\"\n\n @typechecked\n def get_loss_inputs_unlabeled(self, batch: torch.Tensor) -> dict:\n \"\"\"Return predicted heatmaps and their softmaxes (estimated keypoints).\"\"\"\n raise NotImplementedError\n\n @typechecked\n def evaluate_unlabeled(\n self,\n batch: TensorType[\"batch\", \"channels\":3, \"image_height\", \"image_width\", float],\n stage: Optional[Literal[\"train\", \"val\", \"test\"]] = None,\n anneal_weight: Union[float, torch.Tensor] = 1.0,\n ) -> TensorType[(), float]:\n \"\"\"Compute and log the losses on a batch of unlabeled data (frames only).\"\"\"\n\n # forward pass: collect predicted heatmaps and keypoints\n data_dict = self.get_loss_inputs_unlabeled(batch=batch)\n\n # compute loss on unlabeled data\n loss, log_list = self.loss_factory_unsup(\n stage=stage,\n anneal_weight=anneal_weight,\n **data_dict,\n )\n\n if stage:\n # log individual unsupervised losses\n for log_dict in log_list:\n self.log(**log_dict)\n\n return loss\n\n @typechecked\n def training_step(\n self,\n train_batch: Union[SemiSupervisedBatchDict, SemiSupervisedHeatmapBatchDict],\n batch_idx: int,\n ) -> Dict[str, TensorType[(), float]]:\n \"\"\"Training step computes and logs both supervised and unsupervised losses.\"\"\"\n\n # on each epoch, self.total_unsupervised_importance is modified by the\n # AnnealWeight callback\n self.log(\n \"total_unsupervised_importance\",\n self.total_unsupervised_importance,\n prog_bar=True,\n )\n\n # computes and logs supervised losses\n # train_batch[\"labeled\"] contains:\n # - images\n # - keypoints\n # - heatmaps\n loss_super = self.evaluate_labeled(\n batch_dict=train_batch[\"labeled\"],\n stage=\"train\",\n )\n\n # computes and logs unsupervised losses\n # train_batch[\"unlabeled\"] contains:\n # - images\n loss_unsuper = self.evaluate_unlabeled(\n batch=train_batch[\"unlabeled\"],\n stage=\"train\",\n anneal_weight=self.total_unsupervised_importance,\n )\n\n # log total loss\n total_loss = loss_super + loss_unsuper\n self.log(\"total_loss\", total_loss, prog_bar=True)\n\n return {\"loss\": total_loss}\n\n def configure_optimizers(self):\n \"\"\"Single optimizer with different learning rates.\"\"\"\n\n if getattr(self, \"upsampling_layers\", None) is not None:\n # check if heatmap\n params = [\n # {\"params\": self.backbone.parameters()},\n # don't uncomment above line; the BackboneFinetuning callback should\n # add backbone to the params.\n {\n \"params\": self.upsampling_layers.parameters()\n }, # important this is the 0th element, for BackboneFinetuning callback\n ]\n\n else:\n # standard adam optimizer\n params = filter(lambda p: p.requires_grad, self.parameters())\n\n # define different learning rate for weights in front of unsupervised losses\n if len(self.loss_factory_unsup.loss_weights_parameter_dict) > 0:\n params.append(\n {\n \"params\": self.loss_factory_unsup.loss_weights_parameter_dict.parameters(),\n \"lr\": 1e-2,\n }\n )\n\n optimizer = Adam(params, lr=1e-3)\n\n # define a scheduler that reduces the base learning rate\n if self.lr_scheduler == \"multisteplr\" or self.lr_scheduler == \"multistep_lr\":\n\n if self.lr_scheduler_params is None:\n milestones = MULTISTEPLR_MILESTONES_DEFAULT\n gamma = MULTISTEPLR_GAMMA_DEFAULT\n else:\n milestones = self.lr_scheduler_params.get(\n \"milestones\", MULTISTEPLR_MILESTONES_DEFAULT)\n gamma = self.lr_scheduler_params.get(\n \"gamma\", MULTISTEPLR_GAMMA_DEFAULT)\n\n scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=gamma)\n\n else:\n raise NotImplementedError(\n \"'%s' is an invalid LR scheduler\" % self.lr_scheduler\n )\n\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": scheduler,\n \"monitor\": \"val_supervised_loss\",\n }\n\n # # single optimizer with different learning rates\n # def configure_optimizers(self):\n # params_net = [\n # # {\"params\": self.backbone.parameters()},\n # # don't uncomment above line; the BackboneFinetuning callback should add\n # # backbone to the params.\n # {\n # \"params\": self.upsampling_layers.parameters()\n # }, # important that this is the 0th element, for BackboneFineTuning\n # ]\n # optimizer = Adam(params_net, lr=1e-3)\n # scheduler = MultiStepLR(optimizer, milestones=[100, 200, 300], gamma=0.5)\n #\n # optimizers = [optimizer]\n # lr_schedulers = [scheduler]\n #\n # if self.learn_weights:\n # params_weights = [{\"params\": self.loss_weights_dict.parameters()}]\n # optimizer_weights = Adam(params_weights, lr=1e-3)\n # optimizers.append(optimizer_weights)\n # scheduler_weights = MultiStepLR(\n # optimizer, milestones=[100, 200, 300], gamma=0.5\n # )\n # lr_schedulers.append(scheduler_weights)\n #\n # return optimizers, lr_schedulers\n" ]
[ [ "torch.optim.Adam", "torch.nn.Sequential", "torch.optim.lr_scheduler.MultiStepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fofore/new-4d2-8-16u2
[ "4acffcc5763f77f202894ece9b39030a76a2e8ed" ]
[ "demo_with_time_cal.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport thread\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\nimport cv2\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dset\nfrom scipy.misc import imread, imresize\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections, vis_kiktech_detections\nfrom model.utils.blob import im_list_to_blob\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\n\nfrom model.faster_rcnn.mobilenet import mobilenet\nfrom model.faster_rcnn.shufflenet import shufflenet\n\nfrom scipy import misc\nimport Metrics\n\nimport pdb\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/vgg16.yml', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models',\n default=\"/srv/share/jyang375/models\")\n parser.add_argument('--image_dir', dest='image_dir',\n help='directory to load images for demo',\n default=\"images\")\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--bs', dest='batch_size',\n help='batch_size',\n default=1, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n parser.add_argument('--webcam_num', dest='webcam_num',\n help='webcam ID number',\n default=-1, type=int)\n # Start add by Minming, add the gt into the visualization\n parser.add_argument('--data_path', dest='data_path',\n help='the ground truth of the data set',\n default=\"./data/kiktech/kiktech2018joint10\", )\n # End add\n\n args = parser.parse_args()\n return args\n\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\n\ndef _get_image_blob(im):\n \"\"\"Converts an image into a network input.\n Arguments:\n im (ndarray): a color image in BGR order\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale_factors (list): list of image scales (relative to im) used\n in the image pyramid\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, np.array(im_scale_factors)\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n cfg.USE_GPU_NMS = args.cuda\n\n print('Using config:')\n pprint.pprint(cfg)\n np.random.seed(cfg.RNG_SEED)\n\n # train set\n # -- Note: Use validation set and disable the flipped to enable faster loading.\n\n input_dir = args.load_dir + \"/\" + args.net + \"/\" + args.dataset\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n\n pascal_classes = np.asarray(['__background__',\n 'person'\n ])\n\n ##now test with 20 classes\n pascal_classes = ('__background__', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(pascal_classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(pascal_classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(pascal_classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'mobilenet':\n fasterRCNN = mobilenet(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'shufflenet':\n fasterRCNN = shufflenet(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n if args.cuda > 0:\n checkpoint = torch.load(load_name)\n else:\n checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n print('load model successfully!')\n\n # pdb.set_trace()\n\n print(\"load checkpoint %s\" % (load_name))\n\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n dl_data = torch.LongTensor(1)\n\n # ship to cuda\n if args.cuda > 0:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n dl_data = dl_data.cuda()\n\n # make variable\n im_data = Variable(im_data, volatile=True)\n im_info = Variable(im_info, volatile=True)\n num_boxes = Variable(num_boxes, volatile=True)\n gt_boxes = Variable(gt_boxes, volatile=True)\n dl_data = Variable(dl_data, volatile=True)\n\n if args.cuda > 0:\n cfg.CUDA = True\n\n if args.cuda > 0:\n fasterRCNN.cuda()\n\n fasterRCNN.eval()\n\n start = time.time()\n max_per_image = 100\n thresh = 0.05\n vis = True\n\n webcam_num = args.webcam_num\n # Set up webcam or get image directories\n if webcam_num >= 0:\n cap = cv2.VideoCapture(webcam_num)\n num_images = 0\n else:\n imglist = os.listdir(args.image_dir)\n num_images = len(imglist)\n\n print('Loaded Photo: {} images.'.format(num_images))\n\n all_time = 0\n while (num_images >= 0):\n if webcam_num == -1:\n num_images -= 1\n\n # et image from the webcam\n if webcam_num >= 0:\n if not cap.isOpened():\n raise RuntimeError(\"Webcam could not open. Please check connection.\")\n ret, frame = cap.read()\n im_in = np.array(frame)\n # Load the demo image\n else:\n im_file = os.path.join(args.image_dir, imglist[num_images])\n # im = cv2.imread(im_file)\n im_in = np.array(imread(im_file))\n if len(im_in.shape) == 2:\n im_in = im_in[:, :, np.newaxis]\n im_in = np.concatenate((im_in, im_in, im_in), axis=2)\n # rgb -> bgr\n im = im_in[:, :, ::-1]\n # print(im.shape)\n\n blobs, im_scales = _get_image_blob(im)\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n im_blob = blobs\n im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)\n\n im_data_pt = torch.from_numpy(im_blob)\n im_data_pt = im_data_pt.permute(0, 3, 1, 2)\n im_info_pt = torch.from_numpy(im_info_np)\n\n im_data.data.resize_(im_data_pt.size()).copy_(im_data_pt)\n im_info.data.resize_(im_info_pt.size()).copy_(im_info_pt)\n gt_boxes.data.resize_(1, 1, 5).zero_()\n num_boxes.data.resize_(1).zero_()\n # dl_data.data.resize_(im_data_pt.size()).copy_(im_data_pt)\n dl_data.data.resize_(im_data_pt.size()).zero_()\n\n # pdb.set_trace()\n\n # Mod: by Jie, add evaluation of segmentation\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label, drive_line, drive_line_loss = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, dl_data)\n\n total_tic = time.time()\n\n def vis_drive_line(im2show, drive_line):\n # print('drive_line.shape', drive_line.shape)\n y_pred = drive_line.cpu().data.numpy()\n _idx = 0\n bs, c, h, w = drive_line.shape\n y_pred_flag = np.argmax(y_pred[_idx,], axis=0) # one-hot: (C, H, W)--> label: (H, W)\n hs, ws, cs = im.shape\n y_pred_flag = y_pred_flag.astype(np.uint8) # This step is very important\n y_pred_flag = imresize(y_pred_flag, (hs, ws), interp='nearest')\n\n # find pred index\n pred_idx = np.where(y_pred_flag == 1)\n\n # 颜色顺序为BGR\n mask_result = np.zeros((hs, ws, 3), dtype=np.uint8)\n mask_result[pred_idx[0], pred_idx[1], :] = 0, 255, 0 # 漏报率,Red\n # End mode\n\n im2show = cv2.addWeighted(im2show, 1, mask_result, 0.4, 0)\n\n return im2show\n\n def vis_bbox(im, rois, bbox_pred):\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n if args.cuda > 0:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n if args.cuda > 0:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= im_scales[0]\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n misc_tic = time.time()\n if vis:\n im2show = np.copy(im)\n for j in xrange(1, len(pascal_classes)):\n inds = torch.nonzero(scores[:, j] > thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:, j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS, force_cpu=not cfg.USE_GPU_NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n #add object of one class into the image\n if vis:\n data_path = args.data_path\n im2show = vis_kiktech_detections(im2show, pascal_classes[j], cls_dets.cpu().numpy(),\n data_path, im_file, 0.5)\n\n return im2show\n\n total_tic = time.time()\n\n tic = time.time()\n im2show = vis_bbox(im, rois, bbox_pred)\n toc = time.time()\n detect_time = toc - tic\n\n tic = time.time()\n im2show = vis_drive_line(im2show, drive_line)\n det_toc = time.time()\n toc = time.time()\n seg_time = toc - tic\n\n\n\n\n if vis and (webcam_num == -1):\n result_path = os.path.join(args.image_dir, imglist[num_images][:-4] + \"_joint.jpg\")\n cv2.imwrite(result_path, im2show)\n\n else:\n im2showRGB = cv2.cvtColor(im2show, cv2.COLOR_BGR2RGB)\n cv2.imshow(\"frame\", im2showRGB)\n total_toc = time.time()\n total_time = total_toc - total_tic\n frame_rate = 1 / total_time\n print('Frame rate:', frame_rate)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n total_toc = time.time()\n total_time = total_toc - total_tic\n\n if webcam_num == -1:\n print('im_detect: {:d}/{:d} det_vis: {:.3f}s seg_vis: {:.3f}s total_time: {:.3f}s \\r' \\\n .format(num_images + 1, len(imglist), detect_time, seg_time, total_time))\n all_time += total_time\n\n print('total_time: {:.3f}s'.format(all_time))\n\n if webcam_num >= 0:\n cap.release()\n cv2.destroyAllWindows()\n" ]
[ [ "torch.load", "numpy.asarray", "numpy.round", "numpy.max", "numpy.concatenate", "torch.FloatTensor", "numpy.where", "torch.autograd.Variable", "torch.from_numpy", "numpy.copy", "numpy.argmax", "torch.sort", "torch.nonzero", "numpy.zeros", "torch.LongTensor", "numpy.min", "numpy.array", "scipy.misc.imresize", "numpy.random.seed", "numpy.tile", "scipy.misc.imread" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.10", "0.16", "0.19", "0.18", "0.12", "1.0", "0.17", "1.2" ], "tensorflow": [] } ]
EPFL-LCSB/geek
[ "62fd7bf19aaf6b5f08928825e09ae8f6e7a41bb3" ]
[ "paper/geek_qssa_example.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: geek\n :platform: Unix, Windows\n :synopsis: GEneralised Elementary Kinetics\n\n.. moduleauthor:: geek team\n\n[---------]\n\nCopyright 2018 Laboratory of Computational Systems Biotechnology (LCSB),\nEcole Polytechnique Federale de Lausanne (EPFL), Switzerland\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\nfrom pandas import read_csv, DataFrame\nimport random\nimport numpy as np\nfrom geek.analysis import geek_regression\n\n\n\"\"\"\nDefine the GEEK Michaelis-Menten ODE-System\n\n\"\"\"\n\nseed = 1\n\ndf = read_csv('../data/result_full_factorial_pgm.csv')\n\n# Reference concentrations\npgm = 64e-6\n\ntotal_enzyme = pgm\n\ng3p = 49e-6\ng2p = g3p\n\n# Define microscopic reaction rate constants:\nk1f = 1.52e5 # 1/Ms\nk1b = 10.0 # 1/s\nk2f = 22.0 # 1/s\nk2b = 3.29e5 # 1/Ms\n\n# For Reviwer 2 comments\nk2b = 0.0 # 1/Ms\n\n\n\nreference_concentrations = [pgm*0.5, pgm*0.5, g3p, g2p]\nconcentrations = ['enzyme_complex_concentration',\n 'enzyme_concentration',\n 'product_concentration',\n 'substrate_concentration']\n\n\n# Filter the data frame for specific condition\n\nthis_volume_fraction = 0.0\nthis_mu = 31.9\nthis_sigma = 0.825\n\nthis_df = df [ (df['sigma_mass'] == this_sigma) &\n (df['mu_mass'] == this_mu) &\n (df['volume_fraction'] == this_volume_fraction)]\n\n\n# Extract the GEEK parameters from Linear regression\nk1_fwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k1_fwd_relative',\n verbose=False)\n\nk1_bwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k1_bwd_relative',\n verbose=False)\nk2_fwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k1_fwd_relative',\n verbose=False)\n\nk2_bwd_params = geek_regression(this_df,\n concentrations,\n reference_concentrations,\n 'k2_bwd_relative',\n verbose=False)\n\n\n\nrandom.seed(seed)\n#Map to parameter dict\nparam_dict = {\n 'k_1f0': k1f,\n 'k_1b0': k1b,\n 'beta_1f': k1_fwd_params['beta_lb'] + (k1_fwd_params['beta_ub'] - k1_fwd_params['beta_lb']) * random.random(),\n 'alpha_ES_1f': k1_fwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k1_fwd_params['alpha_enzyme_complex_concentration_ub'] - k1_fwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_1f': k1_fwd_params['alpha_enzyme_concentration_lb'] + (\n k1_fwd_params['alpha_enzyme_concentration_ub'] - k1_fwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_1f': k1_fwd_params['alpha_product_concentration_lb'] + (\n k1_fwd_params['alpha_product_concentration_ub'] - k1_fwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_1f': k1_fwd_params['alpha_substrate_concentration_lb'] + (\n k1_fwd_params['alpha_substrate_concentration_ub'] - k1_fwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'beta_1b': k1_bwd_params['beta_lb'] + (k1_bwd_params['beta_ub'] - k1_bwd_params['beta_lb']) * random.random(),\n 'alpha_ES_1b': k1_bwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k1_bwd_params['alpha_enzyme_complex_concentration_ub'] - k1_bwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_1b': k1_bwd_params['alpha_enzyme_concentration_lb'] + (\n k1_bwd_params['alpha_enzyme_concentration_ub'] - k1_bwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_1b': k1_bwd_params['alpha_product_concentration_lb'] + (\n k1_bwd_params['alpha_product_concentration_ub'] - k1_bwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_1b': k1_bwd_params['alpha_substrate_concentration_lb'] + (\n k1_bwd_params['alpha_substrate_concentration_ub'] - k1_bwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'k_2f0': k2f,\n 'k_2b0': k2b,\n 'beta_2f': k2_fwd_params['beta_lb'] + (k2_fwd_params['beta_ub'] - k2_fwd_params['beta_lb']) * random.random(),\n 'alpha_ES_2f': k2_fwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k2_fwd_params['alpha_enzyme_complex_concentration_ub'] - k2_fwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_2f': k2_fwd_params['alpha_enzyme_concentration_lb'] + (\n k2_fwd_params['alpha_enzyme_concentration_ub'] - k2_fwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_2f': k2_fwd_params['alpha_product_concentration_lb'] + (\n k2_fwd_params['alpha_product_concentration_ub'] - k2_fwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_2f': k2_fwd_params['alpha_substrate_concentration_lb'] + (\n k2_fwd_params['alpha_substrate_concentration_ub'] - k2_fwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'beta_2b': k2_bwd_params['beta_lb'] + (k2_bwd_params['beta_ub'] - k2_bwd_params['beta_lb']) * random.random(),\n 'alpha_ES_2b': k1_bwd_params['alpha_enzyme_complex_concentration_lb'] + (\n k2_bwd_params['alpha_enzyme_complex_concentration_ub'] - k2_bwd_params[\n 'alpha_enzyme_complex_concentration_lb']) * random.random(),\n 'alpha_E_2b': k2_bwd_params['alpha_enzyme_concentration_lb'] + (\n k2_bwd_params['alpha_enzyme_concentration_ub'] - k2_bwd_params[\n 'alpha_enzyme_concentration_lb']) * random.random(),\n 'alpha_P_2b': k2_bwd_params['alpha_product_concentration_lb'] + (\n k2_bwd_params['alpha_product_concentration_ub'] - k2_bwd_params[\n 'alpha_product_concentration_lb']) * random.random(),\n 'alpha_S_2b': k2_bwd_params['alpha_substrate_concentration_lb'] + (\n k2_bwd_params['alpha_substrate_concentration_ub'] - k2_bwd_params[\n 'alpha_substrate_concentration_lb']) * random.random(),\n\n 'ES0': reference_concentrations[0],\n 'E0': reference_concentrations[1],\n 'P0': reference_concentrations[2],\n 'S0': reference_concentrations[2],\n 'E_tot':total_enzyme,\n}\n\n\"\"\"\nDeclare ODE-Problem\n\"\"\"\nfrom sympy import symbols,Symbol, nsolve\nfrom sympy import N as numerical\nfrom sympy import re as real\nfrom sympy import im as imag\nfrom sympy import exp as sym_exp\n\n# Variables\nES, E, P, S = symbols(['ES', 'E', 'P', 'S'])\nvariables = [ES, E, P, S]\n# Parameters\nk_1f0, k_1b0, k_2f0, k_2b0, = symbols(['k_1f0', 'k_1b0', 'k_2f0','k_2b0'] )\n\n# Define symbols for the GEEK parameters\nbeta_1f, beta_1b, beta_2f, beta_2b , = symbols(['beta_1f', 'beta_1b', 'beta_2f', 'beta_2b' ] )\nalpha_ES_1b,alpha_ES_1f,alpha_ES_2b,alpha_ES_2f, = symbols(['alpha_ES_1f', 'alpha_ES_1b','alpha_ES_2b','alpha_ES_2f'])\nalpha_E_1b, alpha_E_1f, alpha_E_2b, alpha_E_2f, = symbols(['alpha_E_1f', 'alpha_E_1b','alpha_E_2b','alpha_E_2f'])\nalpha_P_1f, alpha_P_1b, alpha_P_2f, alpha_P_2b, = symbols(['alpha_P_1f', 'alpha_P_1b','alpha_P_2f','alpha_P_2b'])\nalpha_S_1f, alpha_S_1b, alpha_S_2f, alpha_S_2b, = symbols(['alpha_S_1f', 'alpha_S_1b','alpha_S_2f','alpha_S_2b'])\n\nES0,E0,P0, S0 = symbols(['ES0', 'E0', 'P0', 'S0'])\n\node_params = [k_1f0, k_1b0, k_2f0, k_2b0,\n beta_1f, beta_1b, beta_2f, beta_2b ,\n alpha_ES_1b,alpha_ES_1f,alpha_ES_2b,alpha_ES_2f,\n alpha_E_1b, alpha_E_1f, alpha_E_2b, alpha_E_2f,\n alpha_P_1f, alpha_P_1b, alpha_P_2f, alpha_P_2b,\n alpha_S_1f, alpha_S_1b, alpha_S_2f, alpha_S_2b,\n ES0, E0, P0, S0]\n# Reactions\n\ngeek_reactions = {\n 'r_1f': k_1f0 * S * E * sym_exp(beta_1f) * (ES / ES0) ** alpha_ES_1f * (E / E0) ** alpha_E_1f * ( P / P0) ** alpha_P_1f * ( S / S0) ** alpha_S_1f,\n 'r_1b': k_1b0 * ES * sym_exp(beta_1b) * (ES / ES0) ** alpha_ES_1b * (E / E0) ** alpha_E_1b * ( P / P0 ) ** alpha_P_1b * ( S / S0) ** alpha_S_1b,\n 'r_2f': k_2f0 * ES * sym_exp(beta_2f) * (ES / ES0) ** alpha_ES_2f * (E / E0) ** alpha_E_2f * (P / P0) ** alpha_P_2f * (S / S0) ** alpha_S_2f,\n 'r_2b': k_2b0 * P * E * sym_exp(beta_2b) * (ES / ES0) ** alpha_ES_2b * (E / E0) ** alpha_E_2b * (P / P0) ** alpha_P_2b * (S / S0) ** alpha_S_2b,\n}\n\n#Expressions\n\nexpressions = {\n ES: geek_reactions['r_1f'] + geek_reactions ['r_2b']- geek_reactions['r_1b'] - geek_reactions['r_2f'],\n E: -(geek_reactions['r_1f'] + geek_reactions ['r_2b']- geek_reactions['r_1b'] - geek_reactions['r_2f']),\n S: -geek_reactions['r_1f'] + geek_reactions['r_1b'],\n P: geek_reactions['r_2f'] - geek_reactions['r_2b'],\n}\n\n\nE_tot = symbols('E_tot')\n\n# QSSA Expression:\nES_qss = expressions[ES].subs(E,E_tot-ES)\n\n# Substitute values\nES_qss.subs({ Symbol(key):val for key,val in param_dict.items()})\n\np = np.arange(1e-3,10,0.1)*g3p\ns = np.arange(1e-3,10,0.1)*g2p\n\ndata=np.array([[0., 0., 0.],])\n\nfor the_p in p:\n for the_s in s:\n ES_qss_temp = ES_qss.subs({Symbol(key): val for key, val in param_dict.items()})\n ES_qss_temp = ES_qss_temp.subs({S:the_s, P:the_p})\n ES_qss_val = nsolve(ES_qss_temp, ES, reference_concentrations[0])\n if imag(ES_qss_val) < 1e-9:\n ES_qss_val = real(ES_qss_val)\n E_qss_val = param_dict['E_tot'] - ES_qss_val\n else:\n raise ValueError('Invalid output_old!')\n\n\n v_net = geek_reactions['r_2f'] - geek_reactions['r_2b']\n\n this_params = {'ES':ES_qss_val,'E':E_qss_val,'S':the_s, 'P':the_p }\n this_params.update(param_dict)\n\n tamp = v_net.subs({Symbol(key): val for key, val in this_params.items()})\n v_net_val = numerical(tamp)\n\n data = np.append(data,np.array([[the_s,the_p,v_net_val]]), axis=0)\n\ndf = DataFrame(data=data, columns=['S', 'P', 'v_net'])\n\n\n" ]
[ [ "numpy.arange", "numpy.array", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
JiarunLiu/Co-correcting
[ "4e3ca4951de5d73ca812bbbcfe666273082ff2fd" ]
[ "models/densenet.py" ]
[ "import re\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom collections import OrderedDict\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom torch import Tensor\nfrom torch.jit.annotations import List\n\n\n__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']\n\nmodel_urls = {\n 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',\n 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',\n 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',\n 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',\n}\n\n\nclass _DenseLayer(nn.Module):\n def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False):\n super(_DenseLayer, self).__init__()\n self.add_module('norm1', nn.BatchNorm2d(num_input_features)),\n self.add_module('relu1', nn.ReLU(inplace=True)),\n self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *\n growth_rate, kernel_size=1, stride=1,\n bias=False)),\n self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),\n self.add_module('relu2', nn.ReLU(inplace=True)),\n self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,\n kernel_size=3, stride=1, padding=1,\n bias=False)),\n self.drop_rate = float(drop_rate)\n self.memory_efficient = memory_efficient\n\n def bn_function(self, inputs):\n # type: (List[Tensor]) -> Tensor\n concated_features = torch.cat(inputs, 1)\n bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484\n return bottleneck_output\n\n # todo: rewrite when torchscript supports any\n def any_requires_grad(self, input):\n # type: (List[Tensor]) -> bool\n for tensor in input:\n if tensor.requires_grad:\n return True\n return False\n\n @torch.jit.unused # noqa: T484\n def call_checkpoint_bottleneck(self, input):\n # type: (List[Tensor]) -> Tensor\n def closure(*inputs):\n return self.bn_function(*inputs)\n\n return cp.checkpoint(closure, input)\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input):\n # type: (List[Tensor]) -> (Tensor)\n pass\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input):\n # type: (Tensor) -> (Tensor)\n pass\n\n # torchscript does not yet support *args, so we overload method\n # allowing it to take either a List[Tensor] or single Tensor\n def forward(self, input): # noqa: F811\n if isinstance(input, Tensor):\n prev_features = [input]\n else:\n prev_features = input\n\n if self.memory_efficient and self.any_requires_grad(prev_features):\n if torch.jit.is_scripting():\n raise Exception(\"Memory Efficient not supported in JIT\")\n\n bottleneck_output = self.call_checkpoint_bottleneck(prev_features)\n else:\n bottleneck_output = self.bn_function(prev_features)\n\n new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))\n if self.drop_rate > 0:\n new_features = F.dropout(new_features, p=self.drop_rate,\n training=self.training)\n return new_features\n\n\nclass _DenseBlock(nn.ModuleDict):\n _version = 2\n\n def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):\n super(_DenseBlock, self).__init__()\n for i in range(num_layers):\n layer = _DenseLayer(\n num_input_features + i * growth_rate,\n growth_rate=growth_rate,\n bn_size=bn_size,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient,\n )\n self.add_module('denselayer%d' % (i + 1), layer)\n\n def forward(self, init_features):\n features = [init_features]\n for name, layer in self.items():\n new_features = layer(features)\n features.append(new_features)\n return torch.cat(features, 1)\n\n\nclass _Transition(nn.Sequential):\n def __init__(self, num_input_features, num_output_features):\n super(_Transition, self).__init__()\n self.add_module('norm', nn.BatchNorm2d(num_input_features))\n self.add_module('relu', nn.ReLU(inplace=True))\n self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,\n kernel_size=1, stride=1, bias=False))\n self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))\n\n\nclass DenseNet(nn.Module):\n r\"\"\"Densenet-BC model class, based on\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n \"\"\"\n\n __constants__ = ['features']\n\n def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),\n num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False):\n\n super(DenseNet, self).__init__()\n\n # First convolution\n self.features = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('norm0', nn.BatchNorm2d(num_init_features)),\n ('relu0', nn.ReLU(inplace=True)),\n ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),\n ]))\n\n # Each denseblock\n num_features = num_init_features\n for i, num_layers in enumerate(block_config):\n block = _DenseBlock(\n num_layers=num_layers,\n num_input_features=num_features,\n bn_size=bn_size,\n growth_rate=growth_rate,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient\n )\n self.features.add_module('denseblock%d' % (i + 1), block)\n num_features = num_features + num_layers * growth_rate\n if i != len(block_config) - 1:\n trans = _Transition(num_input_features=num_features,\n num_output_features=num_features // 2)\n self.features.add_module('transition%d' % (i + 1), trans)\n num_features = num_features // 2\n\n # Final batch norm\n self.features.add_module('norm5', nn.BatchNorm2d(num_features))\n\n # Linear layer\n self.classifier = nn.Linear(num_features, num_classes)\n\n # Official init from torch repo.\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out\n\n\ndef _load_state_dict(model, model_url, progress):\n # '.'s are no longer allowed in module names, but previous _DenseLayer\n # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.\n # They are also in the checkpoints in model_urls. This pattern is used\n # to find such keys.\n pattern = re.compile(\n r'^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')\n\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n for key in list(state_dict.keys()):\n res = pattern.match(key)\n if res:\n new_key = res.group(1) + res.group(2)\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\n try:\n model.load_state_dict(state_dict)\n except:\n model = trans_state_dict(model, state_dict)\n return model\n\ndef trans_state_dict(model, pretrained_dict):\n model_dict = model.state_dict()\n new_pretrained_dict = {}\n for k, v in pretrained_dict.items():\n if k in model_dict:\n if model_dict[k].shape == v.shape:\n new_pretrained_dict[k] = v\n model_dict.update(new_pretrained_dict)\n model = model.load_state_dict(model_dict)\n return model\n\ndef _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,\n **kwargs):\n model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)\n if pretrained:\n _load_state_dict(model, model_urls[arch], progress)\n return model\n\n\ndef densenet121(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-121 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n \"\"\"\n return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,\n **kwargs)\n\n\ndef densenet161(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-161 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n \"\"\"\n return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,\n **kwargs)\n\n\ndef densenet169(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-169 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n \"\"\"\n return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,\n **kwargs)\n\n\ndef densenet201(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-201 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n \"\"\"\n return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,\n **kwargs)" ]
[ [ "torch.cat", "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.functional.relu", "torch.utils.checkpoint.checkpoint", "torch.jit.is_scripting", "torch.nn.BatchNorm2d", "torch.flatten", "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ankur-gos/RE-Flex
[ "3c34343d12c4f251b5fee10a20dc55bddb930043" ]
[ "reflex/qa_runner.py" ]
[ "\"\"\"\nClasses for running QA inference\n\"\"\"\nimport os\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom transformers import BertConfig, BertForQuestionAnswering, BertTokenizer\nfrom dataclasses import dataclass\nfrom reflex.utils import load_file\nfrom reflex.squad_utils import convert_examples_to_features, read_input_examples, RawResult, get_predictions\nfrom reflex.metrics import calculate_em_f1, calculate_relation_metrics\nfrom tqdm import tqdm\n\n@dataclass(frozen=True)\nclass Sample:\n head: str\n context: str\n tail: str\n question: str\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\nclass QARunner:\n def __init__(self, qa_path, relations_filepath, data_directory, batch_size, must_choose_answer, device, trained_to_reject, calculate_single_error=True):\n self.trained_to_reject = trained_to_reject\n self.qa_path = qa_path # path to qa weights\n self.relations_filepath = relations_filepath # path to relations file\n self.data_directory = data_directory # data directory path\n self.tokenizer = BertTokenizer.from_pretrained('bert-large-cased') # tokenizer\n self.model = BertForQuestionAnswering.from_pretrained(qa_path) # Load the model\n self.model.to(device)\n self.device = device\n\n self.batch_size = batch_size\n self.must_choose_answer = must_choose_answer # For datasets where there is always an answer, setting this to true will ensure that QA models that can return \"answer doesn't exist\" will always return a span in the context\n self.total_samples = 0\n if calculate_single_error:\n self.se_list = []\n else:\n self.se_list = None\n\n def predict(self):\n # Load relations file\n relations = load_file(self.relations_filepath)\n # Iterate through relations file and predict for each relation\n aggregate_em = aggregate_f1 = 0\n per_relation_metrics = {}\n for relation in relations:\n data_file = os.path.join(self.data_directory, relation['relation']) + '.jsonl'\n data = load_file(data_file)\n # Adding to set filters any accidental duplicates\n samples = set()\n for d in data:\n question = relation['question'].replace('[X]', d['subject'])\n samples.add(Sample(d['subject'], d['context'], d['object'], question))\n samples = list(samples)\n print(f'Loaded relation {relation[\"relation\"]}. There are {len(samples)} test samples')\n # Most of below is taken directly from HuggingFace, which is what Lewis et al use to train their QA head\n # Defaults from huggingface\n do_lower_case = True\n max_answer_length = 30\n verbose_logging = False\n null_score_diff_threshold = 0.0\n n_best = 20\n max_query_length = 64\n doc_stride = 128\n max_seq_length = 384\n\n # Load the samples into squad format\n examples = read_input_examples(samples)\n features = convert_examples_to_features(examples=examples,\n tokenizer=self.tokenizer,\n max_seq_length=max_seq_length,\n doc_stride=doc_stride,\n max_query_length=max_query_length,\n is_training=False,\n cls_token_segment_id=0,\n pad_token_segment_id=0,\n cls_token_at_end=False,\n sequence_a_is_doc=False)\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)\n all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)\n all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,\n all_example_index, all_cls_index, all_p_mask)\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=self.batch_size)\n all_results = []\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n #stime = time.time()\n batch = tuple(t.to(device=self.device) for t in batch)\n with torch.no_grad():\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1]\n }\n inputs['token_type_ids'] = batch[2]\n example_indices = batch[3]\n outputs = self.model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n result = RawResult(unique_id = unique_id,\n start_logits = to_list(outputs[0][i]),\n end_logits = to_list(outputs[1][i]))\n all_results.append(result)\n\n predictions = get_predictions(examples, features, all_results, n_best,\n max_answer_length, do_lower_case,\n verbose_logging,\n self.trained_to_reject, null_score_diff_threshold, must_choose_answer=self.must_choose_answer)\n predictions = [predictions[p] for p in predictions]\n self.total_samples += len(predictions)\n relation_em, relation_f1, per_relation_metrics, self.se_list, _ = calculate_relation_metrics(samples, predictions, per_relation_metrics, relation, self.se_list, False)\n aggregate_em += relation_em\n aggregate_f1 += relation_f1\n aggregate_em /= len(relations)\n aggregate_f1 /= len(relations)\n return aggregate_em, aggregate_f1, per_relation_metrics\n\n\n" ]
[ [ "torch.utils.data.TensorDataset", "torch.utils.data.SequentialSampler", "torch.utils.data.DataLoader", "torch.tensor", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hmludwig/aoc2020
[ "c1a5860feec80a9b8c1b039ceba2cc64ec625a6b" ]
[ "src/day16.py" ]
[ "import sys\nfrom collections import defaultdict\nimport numpy as np\n\nf = open(sys.argv[1])\ndata = f.read().strip().splitlines()\ndata = [d for d in data if d.strip() != '']\nrules = dict()\nrules_section = True\nmy_ticket = None\ntickets = list()\ndeparture_rules = list()\n\nfor i, d in enumerate(data):\n if d == 'your ticket:':\n rules_section = False\n my_ticket = [int(x) for x in data[i + 1].split(',')]\n elif d == 'nearby tickets:':\n for k in range(i + 1, len(data)):\n tickets.append([int(x) for x in data[k].split(',')])\n break\n elif rules_section:\n tmp = d.split(': ')\n rule_name = tmp[0]\n if 'departure' in rule_name:\n departure_rules.append(rule_name)\n rule_range = [x.split('-') for x in tmp[1].split(' or ')]\n rules[rule_name] = [(int(x[0]), int(x[1])) for x in rule_range]\n\nerror_rate = 0\ninvalid = list()\n\nfor i in range(len(tickets)):\n for j in tickets[i]:\n valid = False\n for m in rules:\n for n in rules[m]:\n if j >= n[0] and j <= n[1]:\n valid = True\n break\n if not valid:\n invalid.append(tickets[i])\n error_rate += j\n\nprint(f'Part 1: {error_rate}')\n\n\ndef check_rule_at_pos(ticket, rule, pos):\n if (ticket[pos] >= rule[0][0] and\n ticket[pos] <= rule[0][1]) or (ticket[pos] >= rule[1][0] and\n ticket[pos] <= rule[1][1]):\n return True\n return False\n\n\nrule_index = defaultdict(list)\n\nfor r in rules:\n rule = rules[r]\n for pos in range(len(my_ticket)):\n good_pos = True\n for ticket in tickets:\n if ticket not in invalid:\n if not check_rule_at_pos(ticket, rule, pos):\n good_pos = False\n break\n if good_pos:\n rule_index[r].append(pos)\n\ntable = list()\nfor i, k in enumerate(rule_index):\n table.append([])\n for j in range(len(my_ticket)):\n if j in rule_index[k]:\n table[i].append(1)\n else:\n table[i].append(0)\n\npositions = list()\ntable = np.array(table)\n\nwhile len(positions) != 6:\n for i in range(len(my_ticket)):\n if sum(table[:, i]) == 1:\n for j, x in enumerate(table[:, i]):\n if x == 1:\n if j < 6:\n positions.append(i)\n table[j, :] = 0\n\nprod = 1\nfor x in positions:\n prod *= my_ticket[x]\n\nprint(f'Part 2: {prod}')\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liqiwa/python_work
[ "3d1198d5616b28a37fee7dfba5bbef0e1d489c2d" ]
[ "15/mpl_squares.py" ]
[ "import matplotlib.pyplot as plt \n\ninput_values = [1,2,3,4,5]\nsquares = [1,4,9,16,25]\n\nplt.plot(input_values,squares,linewidth = 5)\n\nplt.title(\"Square Numbers\",fontsize = 24)\nplt.xlabel(\"Value\",fontsize = 14)\nplt.ylabel(\"Square of Value\",fontsize = 14)\n\nplt.tick_params(axis = 'both',labelsize = 14)\n\n\nplt.show() " ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NextCenturyCorporation/mcs-scene-generator
[ "e0a6ee778359cadd2de682a5006581b7a6134431" ]
[ "tests/optimal_path_test.py" ]
[ "import numpy\nimport pytest\nfrom numpy.testing import assert_array_almost_equal_nulp\nfrom shapely.geometry import Point\n\nfrom generator import geometry, optimal_path\n\n\ndef test_dilate_and_unify_object_bounds():\n bounds = [\n {'x': -1.0, 'z': -1.0},\n {'x': -1.0, 'z': 1.0},\n {'x': 1.0, 'z': 1.0},\n {'x': 1.0, 'z': -1.0}\n ]\n\n output = optimal_path._dilate_and_unify_object_bounds(\n [bounds],\n 0.5,\n Point(0, -4),\n Point(0, 4)\n )\n assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[\n (-1.0, -1.5), (-1.5, -1.0), (-1.5, 1.0), (-1.0, 1.5), (1.0, 1.5),\n (1.5, 1.0), (1.5, -1.0), (1.0, -1.5)\n ]]))\n\n output = optimal_path._dilate_and_unify_object_bounds(\n [bounds],\n 1,\n Point(0, -4),\n Point(0, 4)\n )\n assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[\n (-1.0, -2), (-2, -1.0), (-2, 1.0), (-1.0, 2), (1.0, 2),\n (2, 1.0), (2, -1.0), (1.0, -2)\n ]]))\n\n # Will not dilate if source is inside bounds.\n output = optimal_path._dilate_and_unify_object_bounds(\n [bounds],\n 0.5,\n Point(0, -1.25),\n Point(0, 4)\n )\n assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[\n (-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)\n ]]))\n\n # Will not dilate if target is inside bounds.\n output = optimal_path._dilate_and_unify_object_bounds(\n [bounds],\n 0.5,\n Point(0, -4),\n Point(0, 1.25)\n )\n assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[\n (-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)\n ]]))\n\n\ndef test_dilate_and_unify_object_bounds_multiple_poly():\n bounds_1 = [\n {'x': -1.0, 'z': -1.0},\n {'x': -1.0, 'z': 1.0},\n {'x': 1.0, 'z': 1.0},\n {'x': 1.0, 'z': -1.0}\n ]\n bounds_2 = [\n {'x': -4.0, 'z': -1.0},\n {'x': -4.0, 'z': 1.0},\n {'x': -3.0, 'z': 1.0},\n {'x': -3.0, 'z': -1.0}\n ]\n bounds_3 = [\n {'x': 3.0, 'z': -1.0},\n {'x': 3.0, 'z': 1.0},\n {'x': 4.0, 'z': 1.0},\n {'x': 4.0, 'z': -1.0}\n ]\n\n output = optimal_path._dilate_and_unify_object_bounds(\n [bounds_1, bounds_2, bounds_3],\n 0.5,\n Point(0, -4),\n Point(0, 4)\n )\n assert len(output) == 3\n assert_array_almost_equal_nulp(numpy.array(output[0]), numpy.array([\n (-4.0, -1.5), (-4.5, -1.0), (-4.5, 1.0), (-4.0, 1.5), (-3.0, 1.5),\n (-2.5, 1.0), (-2.5, -1.0), (-3.0, -1.5)\n ]))\n assert_array_almost_equal_nulp(numpy.array(output[1]), numpy.array([\n (3.0, -1.5), (2.5, -1.0), (2.5, 1.0), (3.0, 1.5), (4.0, 1.5),\n (4.5, 1.0), (4.5, -1.0), (4.0, -1.5)\n ]))\n assert_array_almost_equal_nulp(numpy.array(output[2]), numpy.array([\n (-1.0, -1.5), (-1.5, -1.0), (-1.5, 1.0), (-1.0, 1.5), (1.0, 1.5),\n (1.5, 1.0), (1.5, -1.0), (1.0, -1.5)\n ]))\n\n bounds_4 = [\n {'x': 1.0, 'z': -1.0},\n {'x': 1.0, 'z': 1.0},\n {'x': 3.0, 'z': 1.0},\n {'x': 3.0, 'z': -1.0}\n ]\n\n output = optimal_path._dilate_and_unify_object_bounds(\n [bounds_1, bounds_2, bounds_3, bounds_4],\n 0.5,\n Point(0, -4),\n Point(0, 4)\n )\n assert len(output) == 2\n assert_array_almost_equal_nulp(numpy.array(output[0]), numpy.array([\n (-1.0, -1.5), (-1.5, -1.0), (-1.5, 1.0), (-1.0, 1.5), (1.0, 1.5),\n (3.0, 1.5), (4.0, 1.5), (4.5, 1.0), (4.5, -1.0), (4.0, -1.5),\n (3.0, -1.5), (1.0, -1.5)\n ]))\n assert_array_almost_equal_nulp(numpy.array(output[1]), numpy.array([\n (-4.0, -1.5), (-4.5, -1.0), (-4.5, 1.0), (-4.0, 1.5), (-3.0, 1.5),\n (-2.5, 1.0), (-2.5, -1.0), (-3.0, -1.5)\n ]))\n\n\ndef test_dilate_target_bounds():\n output = optimal_path._dilate_target_bounds([\n {'x': -1.0, 'z': -1.0},\n {'x': -1.0, 'z': 1.0},\n {'x': 1.0, 'z': 1.0},\n {'x': 1.0, 'z': -1.0}\n ])\n assert_array_almost_equal_nulp(numpy.array(output), numpy.array([\n (1.0, -1.99), (0, -1.99), (-1.0, -1.99),\n (-1.99, -1.0), (-1.99, 0), (-1.99, 1.0),\n (-1.0, 1.99), (0, 1.99), (1.0, 1.99),\n (1.99, 1.0), (1.99, 0), (1.99, -1.0)\n ]))\n\n\ndef test_find_target_or_parent_dict():\n output = optimal_path._find_target_or_parent_dict({\n 'id': 'id_0',\n 'type': 'soccer_ball'\n }, [{\n 'id': 'id_1',\n 'type': 'ball'\n }, {\n 'id': 'id_2',\n 'type': 'duck'\n }, {\n 'id': 'id_3',\n 'type': 'sofa'\n }, {\n 'id': 'id_4',\n 'type': 'suitcase'\n }])\n assert output == {'id': 'id_0', 'type': 'soccer_ball'}\n\n\ndef test_find_target_or_parent_dict_with_parent():\n output = optimal_path._find_target_or_parent_dict({\n 'id': 'id_0',\n 'type': 'soccer_ball',\n 'locationParent': 'id_4'\n }, [{\n 'id': 'id_1',\n 'type': 'ball'\n }, {\n 'id': 'id_2',\n 'type': 'duck'\n }, {\n 'id': 'id_3',\n 'type': 'sofa'\n }, {\n 'id': 'id_4',\n 'type': 'suitcase'\n }])\n assert output == {'id': 'id_4', 'type': 'suitcase'}\n\n\ndef test_remove_duplicate_paths():\n path_1 = optimal_path.ShortestPath([{\n 'action': 'MoveAhead',\n 'params': {}\n }, {\n 'action': 'MoveAhead',\n 'params': {}\n }], None, None)\n path_2 = optimal_path.ShortestPath([{\n 'action': 'MoveAhead',\n 'params': {}\n }, {\n 'action': 'PickupObject',\n 'params': {'objectId': 'a'}\n }], None, None)\n path_3 = optimal_path.ShortestPath([{\n 'action': 'MoveAhead',\n 'params': {}\n }, {\n 'action': 'PickupObject',\n 'params': {'objectId': 'b'}\n }], None, None)\n path_4 = optimal_path.ShortestPath([{\n 'action': 'MoveAhead',\n 'params': {}\n }, {\n 'action': 'MoveAhead',\n 'params': {}\n }, {\n 'action': 'PickupObject',\n 'params': {'objectId': 'a'}\n }], None, None)\n\n output = optimal_path._remove_duplicate_paths([\n path_1, path_2, path_3, path_4, path_1, path_2, path_3, path_4\n ])\n assert len(output) == 4\n assert output[0].action_list[0]['action'] == 'MoveAhead'\n assert output[0].action_list[1]['action'] == 'MoveAhead'\n assert output[1].action_list[0]['action'] == 'MoveAhead'\n assert output[1].action_list[1]['action'] == 'PickupObject'\n assert output[1].action_list[1]['params']['objectId'] == 'a'\n assert output[2].action_list[0]['action'] == 'MoveAhead'\n assert output[2].action_list[1]['action'] == 'PickupObject'\n assert output[2].action_list[1]['params']['objectId'] == 'b'\n assert output[3].action_list[0]['action'] == 'MoveAhead'\n assert output[3].action_list[1]['action'] == 'MoveAhead'\n assert output[3].action_list[2]['action'] == 'PickupObject'\n assert output[3].action_list[2]['params']['objectId'] == 'a'\n\n\ndef test_generate_shortest_path_position_list_trivial():\n environment = optimal_path._generate_pathfinding_environment(\n geometry.DEFAULT_ROOM_DIMENSIONS,\n []\n )\n assert environment\n position_list = optimal_path._generate_shortest_path_position_list(\n (0, 0),\n (0, 4.5),\n environment\n )\n assert position_list == [(0, 0), (0, 4.5)]\n\n\ndef test_generate_shortest_path_position_list_basic():\n bounds_1 = [\n {'x': -1.0, 'z': 0.5}, {'x': -1.0, 'z': 1.0}, {'x': 0.5, 'z': 1.0},\n {'x': 0.5, 'z': 0.5}\n ]\n environment = optimal_path._generate_pathfinding_environment(\n geometry.DEFAULT_ROOM_DIMENSIONS,\n [bounds_1]\n )\n assert environment\n position_list = optimal_path._generate_shortest_path_position_list(\n (0, 0),\n (0, 4.5),\n environment\n )\n assert_array_almost_equal_nulp(numpy.array(position_list), numpy.array([\n (0, 0), (0.5, 0.22), (0.78, 0.5), (0.78, 1.0), (0, 4.5)\n ]))\n\n\ndef test_generate_shortest_path_position_list_complex():\n bounds_1 = [\n {'x': -1.0, 'z': 0.5}, {'x': -1.0, 'z': 0.75}, {'x': 0.5, 'z': 0.75},\n {'x': 0.5, 'z': 0.5}\n ]\n bounds_2 = [\n {'x': -0.5, 'z': 1.5}, {'x': -0.5, 'z': 1.75}, {'x': 4.44, 'z': 1.75},\n {'x': 4.44, 'z': 1.5}\n ]\n bounds_3 = [\n {'x': -4.44, 'z': 2.5}, {'x': -4.44, 'z': 2.75}, {'x': 0.5, 'z': 2.75},\n {'x': 0.5, 'z': 2.5}\n ]\n bounds_4 = [\n {'x': -0.5, 'z': 3.5}, {'x': -0.5, 'z': 3.75}, {'x': 1.0, 'z': 3.75},\n {'x': 1.0, 'z': 3.5}\n ]\n environment = optimal_path._generate_pathfinding_environment(\n geometry.DEFAULT_ROOM_DIMENSIONS,\n [bounds_1, bounds_2, bounds_3, bounds_4]\n )\n assert environment\n position_list = optimal_path._generate_shortest_path_position_list(\n (0, 0),\n (0, 4.5),\n environment\n )\n assert_array_almost_equal_nulp(numpy.array(position_list), numpy.array([\n (0, 0), (-1.0, 0.22), (-1.28, 0.5), (-1.28, 0.75), (-0.78, 1.75),\n (-0.5, 2.03), (0.5, 2.22), (0.78, 2.5), (1.28, 3.5), (1.28, 3.75),\n (1.0, 4.03), (0, 4.5)\n ]))\n\n\ndef test_generate_shortest_path_position_list_squeeze():\n bounds_1 = [\n {'x': -4.44, 'z': 0.5}, {'x': -4.44, 'z': 1.0}, {'x': -0.25, 'z': 1.0},\n {'x': -0.25, 'z': 0.5}\n ]\n bounds_2 = [\n {'x': 0.25, 'z': 0.5}, {'x': 0.25, 'z': 1.0}, {'x': 1.0, 'z': 1.0},\n {'x': 1.0, 'z': 0.5}\n ]\n bounds_3 = [\n {'x': 2.0, 'z': 0.5}, {'x': 2.0, 'z': 1.0}, {'x': 4.44, 'z': 1.0},\n {'x': 4.44, 'z': 0.5}\n ]\n environment = optimal_path._generate_pathfinding_environment(\n geometry.DEFAULT_ROOM_DIMENSIONS,\n [bounds_1, bounds_2, bounds_3]\n )\n assert environment\n position_list = optimal_path._generate_shortest_path_position_list(\n (0, 0),\n (0, 4.5),\n environment\n )\n assert_array_almost_equal_nulp(numpy.array(position_list), numpy.array([\n (0, 0), (1.0, 0.22), (1.28, 0.5), (1.28, 1.0), (0, 4.5)\n ]))\n\n\ndef test_generate_shortest_path_position_list_almost_impossible():\n bounds_1 = [\n {'x': -5, 'z': 0.5}, {'x': -5, 'z': 1.0}, {'x': 4.43, 'z': 1.0},\n {'x': 4.43, 'z': 0.5}\n ]\n environment = optimal_path._generate_pathfinding_environment(\n geometry.DEFAULT_ROOM_DIMENSIONS,\n [bounds_1]\n )\n assert environment\n position_list = optimal_path._generate_shortest_path_position_list(\n (0, 0),\n (0, 4.5),\n environment\n )\n assert_array_almost_equal_nulp(numpy.array(position_list), numpy.array([\n (0, 0), (4.43, 0.22), (4.71, 0.5), (4.71, 1.0), (4.43, 1.28), (0, 4.5)\n ]))\n\n\ndef test_generate_shortest_path_position_list_impossible():\n bounds_1 = [\n {'x': -4.45, 'z': 0.5}, {'x': -4.45, 'z': 1.0}, {'x': 4.45, 'z': 1.0},\n {'x': 4.45, 'z': 0.5}\n ]\n environment = optimal_path._generate_pathfinding_environment(\n geometry.DEFAULT_ROOM_DIMENSIONS,\n [bounds_1]\n )\n assert environment\n position_list = optimal_path._generate_shortest_path_position_list(\n (0, 0),\n (0, 4),\n environment\n )\n assert position_list is None\n\n\ndef test_rotate_then_move_no_rotation_or_movement():\n # 90 degree rotation is facing north in shapely\n path = optimal_path.ShortestPath([], (0, 0), 90)\n path_list = optimal_path._rotate_then_move(path, (0, 0))\n assert len(path_list) == 1\n assert path_list[0].action_list == []\n assert path_list[0].rotation == 90\n assert path_list[0].position[0] == pytest.approx(0)\n assert path_list[0].position[1] == pytest.approx(0)\n\n\ndef test_rotate_then_move_only_movement():\n # 90 degree rotation is facing north in shapely\n path = optimal_path.ShortestPath([], (0, 0), 90)\n path_list = optimal_path._rotate_then_move(path, (0, 4))\n assert len(path_list) == 1\n assert path_list[0].action_list == (\n [{'action': 'MoveAhead', 'params': {}}] * 40\n )\n assert path_list[0].rotation == 90\n assert path_list[0].position[0] == pytest.approx(0)\n assert path_list[0].position[1] == pytest.approx(4)\n\n\ndef test_rotate_then_move_only_rotation():\n # 0 degree rotation is facing east in shapely\n path = optimal_path.ShortestPath([], (0, 0), 0)\n path_list = optimal_path._rotate_then_move(path, (0, 0.05))\n assert len(path_list) == 2\n assert path_list[0].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 9\n )\n assert path_list[0].rotation == 90\n assert path_list[0].position[0] == pytest.approx(0)\n assert path_list[0].position[1] == pytest.approx(0)\n assert path_list[1].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 9 +\n [{'action': 'MoveAhead', 'params': {}}]\n )\n assert path_list[1].rotation == 90\n assert path_list[1].position[0] == pytest.approx(0)\n assert path_list[1].position[1] == pytest.approx(0.1)\n\n\ndef test_rotate_then_move():\n # 180 degree rotation is facing west in shapely\n path = optimal_path.ShortestPath([], (0, 0), 180)\n path_list = optimal_path._rotate_then_move(path, (0, 4))\n assert len(path_list) == 1\n assert path_list[0].action_list == (\n [{'action': 'RotateRight', 'params': {}}] * 9 +\n [{'action': 'MoveAhead', 'params': {}}] * 40\n )\n assert path_list[0].rotation == 90\n assert path_list[0].position[0] == pytest.approx(0)\n assert path_list[0].position[1] == pytest.approx(4)\n\n\ndef test_rotate_then_move_multiple_path_move():\n # 0 degree rotation is facing east in shapely\n path = optimal_path.ShortestPath([], (0, 0), 0)\n path_list = optimal_path._rotate_then_move(path, (0.94, 0))\n assert len(path_list) == 2\n assert path_list[0].action_list == (\n [{'action': 'MoveAhead', 'params': {}}] * 9\n )\n assert path_list[0].rotation == 0\n assert path_list[0].position[0] == pytest.approx(0.9)\n assert path_list[0].position[1] == pytest.approx(0)\n assert path_list[1].action_list == (\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[1].rotation == 0\n assert path_list[1].position[0] == pytest.approx(1.0)\n assert path_list[1].position[1] == pytest.approx(0)\n\n\ndef test_rotate_then_move_multiple_path_rotate_left():\n path = optimal_path.ShortestPath([], (0, 0), -44)\n path_list = optimal_path._rotate_then_move(path, (1, 0))\n assert len(path_list) == 2\n assert path_list[0].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 4 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[0].rotation == -4\n assert path_list[0].position[0] == pytest.approx(0.997564)\n assert path_list[0].position[1] == pytest.approx(-0.0697565)\n assert path_list[1].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 5 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[1].rotation == 6\n assert path_list[1].position[0] == pytest.approx(0.994522)\n assert path_list[1].position[1] == pytest.approx(0.1045285)\n\n\ndef test_rotate_then_move_multiple_path_rotate_right():\n path = optimal_path.ShortestPath([], (0, 0), 44)\n path_list = optimal_path._rotate_then_move(path, (1, 0))\n assert len(path_list) == 2\n assert path_list[0].action_list == (\n [{'action': 'RotateRight', 'params': {}}] * 4 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[0].rotation == 4\n assert path_list[0].position[0] == pytest.approx(0.997564)\n assert path_list[0].position[1] == pytest.approx(0.0697565)\n assert path_list[1].action_list == (\n [{'action': 'RotateRight', 'params': {}}] * 5 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[1].rotation == -6\n assert path_list[1].position[0] == pytest.approx(0.994522)\n assert path_list[1].position[1] == pytest.approx(-0.1045285)\n\n\ndef test_rotate_then_move_multiple_path_both_rotate_move():\n path = optimal_path.ShortestPath([], (0, 0), -44)\n path_list = optimal_path._rotate_then_move(path, (1.04, 0))\n assert len(path_list) == 4\n assert path_list[0].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 4 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[0].rotation == -4\n assert path_list[0].position[0] == pytest.approx(0.997564)\n assert path_list[0].position[1] == pytest.approx(-0.0697565)\n assert path_list[1].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 4 +\n [{'action': 'MoveAhead', 'params': {}}] * 11\n )\n assert path_list[1].rotation == -4\n assert path_list[1].position[0] == pytest.approx(1.09732)\n assert path_list[1].position[1] == pytest.approx(-0.0767321)\n assert path_list[2].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 5 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[2].rotation == 6\n assert path_list[2].position[0] == pytest.approx(0.994522)\n assert path_list[2].position[1] == pytest.approx(0.1045285)\n assert path_list[3].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 5 +\n [{'action': 'MoveAhead', 'params': {}}] * 11\n )\n assert path_list[3].rotation == 6\n assert path_list[3].position[0] == pytest.approx(1.093974)\n assert path_list[3].position[1] == pytest.approx(0.1149813)\n\n\ndef test_rotate_then_move_rotate_less():\n path = optimal_path.ShortestPath([], (0, 0), -41)\n path_list = optimal_path._rotate_then_move(path, (1, 0))\n assert len(path_list) == 3\n assert path_list[0].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 4 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[0].rotation == -1\n assert path_list[0].position[0] == pytest.approx(0.999848)\n assert path_list[0].position[1] == pytest.approx(-0.0174524)\n assert path_list[1].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 5 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[1].rotation == 9\n assert path_list[1].position[0] == pytest.approx(0.987688)\n assert path_list[1].position[1] == pytest.approx(0.1564345)\n assert path_list[2].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 3 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[2].rotation == -11\n assert path_list[2].position[0] == pytest.approx(0.981627)\n assert path_list[2].position[1] == pytest.approx(-0.190809)\n\n\ndef test_rotate_then_move_rotate_more():\n path = optimal_path.ShortestPath([], (0, 0), -49)\n path_list = optimal_path._rotate_then_move(path, (1, 0))\n assert len(path_list) == 3\n assert path_list[0].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 4 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[0].rotation == -9\n assert path_list[0].position[0] == pytest.approx(0.987688)\n assert path_list[0].position[1] == pytest.approx(-0.1564344)\n assert path_list[1].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 5 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[1].rotation == 1\n assert path_list[1].position[0] == pytest.approx(0.999848)\n assert path_list[1].position[1] == pytest.approx(0.0174524)\n assert path_list[2].action_list == (\n [{'action': 'RotateLeft', 'params': {}}] * 6 +\n [{'action': 'MoveAhead', 'params': {}}] * 10\n )\n assert path_list[2].rotation == 11\n assert path_list[2].position[0] == pytest.approx(0.981627)\n assert path_list[2].position[1] == pytest.approx(0.190809)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lover-520/wzm_landform_scene_model
[ "1bc8894d99b76213ca2544e540dccab2ad52be00" ]
[ "data_loader/ouy_dataloader_64.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author: WZM\n@time: 2021/1/2 17:21\n@function: 实现原文作者的数据加载dataloader 64x64的图片分辨率\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport os\nimport random\nfrom torch.utils.data import DataLoader, Dataset\nimport torch\nimport time as t\nfrom net.ouy_net import Network\nfrom termcolor import cprint\nfrom random import choice\nimport torchvision.transforms as transforms\n\n\ndef log_print(text, color=None, on_color=None, attrs=None):\n if cprint is not None:\n cprint(text, color=color, on_color=on_color, attrs=attrs)\n else:\n print(text)\n\n\ndef save_net(fname, net):\n import h5py\n h5f = h5py.File(fname, mode='w')\n for k, v in net.state_dict().items():\n h5f.create_dataset(k, data=v.cpu().numpy())\n\n\ndef np_to_variable(x, is_cuda=True, is_training=False, is_lable=False, dtype=torch.FloatTensor):\n if is_training:\n v = torch.from_numpy(x).type(dtype)\n elif is_lable:\n v = torch.tensor(x).type(dtype)\n\n # device = torch.device('cuda:1')\n # if torch.cuda.is_available():\n # v = v.to(device)\n return v\n\n\n# concat\n\n\nclass TensorDataset(Dataset):\n # TensorDataset继承Dataset, 重载了__init__, __getitem__, __len__\n # 实现将一组Tensor数据对封装成Tensor数据集\n # 能够通过index得到数据集的数据,能够通过len,得到数据集大小\n\n def __init__(self, data_path, data_name, transform=False, shuffle=True):\n # self.data_tensor = data_tensor\n # self.target_tensor = target_tensor\n self.data_path = data_path\n self.data_name = data_name\n self.transform = transform\n # self.pre_load = pre_load\n # self.imresize = imresize;\n # self.sample=sample\n # self.samplenumber=samplenumber\n self.data_files = []\n self.label = []\n self.weights = []\n\n filename = os.path.join(data_path, data_name)\n with open(filename, 'r') as file_to_read:\n idx = 0\n name = []\n while True:\n lines = file_to_read.readline()\n if not lines:\n break\n pass\n nameT, labelT = lines.split()\n name.append(nameT) # [0001_img.tiff,0002_img.tiff....]\n path = os.path.join(data_path, nameT)\n if os.path.isfile(path):\n self.data_files.append(path)\n self.label.append(labelT) # [2,3,....]\n self.num_samples = len(self.data_files)\n\n def __getitem__(self, index):\n\n Set = set([0, 1, 2, 3])\n randN = int(choice(list(Set)))\n\n imgpath = self.data_files[index]\n target = self.label[index]\n target = np_to_variable(int(target), is_cuda=True, is_lable=True, dtype=torch.LongTensor)\n\n rgb = cv2.imread(imgpath, cv2.IMREAD_COLOR)\n if self.transform:\n rgb = np.rot90(rgb, randN, axes=(0, 1))\n rgb = cv2.resize(rgb, (128, 128)) # 默认双线性插值\n rgb = np.copy(rgb)\n rgb = rgb.astype(np.float16, copy=False)\n rgb = rgb.transpose(2, 0, 1) # shape(128,128,3)--- shape(3,128,128)\n rgb = rgb.reshape((rgb.shape[0], rgb.shape[1], rgb.shape[2])) # shape(1,3,128,128)\n rgb = np_to_variable(rgb, is_cuda=True, is_training=True)\n\n hillshadepath = imgpath.replace('img.tiff', 'sha.tiff')\n shade = cv2.imread(hillshadepath, cv2.IMREAD_GRAYSCALE) # 进行转化为灰度图,比如保存为了16位的图片,读取出来为8位,类型为CV_8UC1。\n if self.transform:\n shade = np.rot90(shade, randN, axes=(0, 1))\n shade = cv2.resize(shade, (128, 128))\n shade = np.copy(shade)\n shade = shade.astype(np.float16, copy=False)\n shade = shade.reshape((1, shade.shape[0], shade.shape[1])) # shape(1,1,128,128)\n shade = np_to_variable(shade, is_cuda=True, is_training=True)\n\n dem = []\n dempath = imgpath.replace('img.tiff', 'dem.txt')\n with open(dempath, 'r') as file_to_read:\n lines = file_to_read.readline()\n nameT, nC = lines.split()\n lines = file_to_read.readline()\n nameT, nR = lines.split()\n nR = int(nR)\n nC = int(nC)\n dem = np.zeros((nR, nC)) # shape(128,128)\n for i in range(0, nR):\n lines = file_to_read.readline()\n lines = lines.split()\n for j in range(0, nC):\n dem[i, j] = float(lines[j])\n if self.transform:\n dem = np.rot90(dem, randN, axes=(0, 1))\n dem = dem.reshape((dem.shape[0], dem.shape[1], 1))\n dem = cv2.resize(dem, (128, 128))\n dem = np.copy(dem)\n # dem = cv2.flip(dem, 0) # 1水平翻转,0垂直翻转,-1水平垂直翻转\n dem = dem.reshape((1, dem.shape[0], dem.shape[1])) # shape(1,1,128,128)\n dem = np_to_variable(dem, is_cuda=True, is_training=True)\n\n return rgb, shade, dem, target, imgpath\n\n def __len__(self):\n return self.num_samples\n\n def getWeight(self):\n # class_sample_count = [100, 100, 100]\n class_sample_count = [179, 809, 1390] # dataset has 10 class-1 samples, 1 class-2 samples, etc.\n classweights = 1 / torch.Tensor(class_sample_count)\n for i in self.label:\n if int(i) == 0:\n self.weights.append(classweights[0])\n elif int(i) == 1:\n self.weights.append(classweights[1])\n elif int(i) == 2:\n self.weights.append(classweights[2])\n\n return self.weights\n" ]
[ [ "numpy.rot90", "torch.Tensor", "torch.from_numpy", "torch.tensor", "numpy.copy", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TeamSPoon/logicmoo_nlu
[ "5c3e5013a3048da7d68a8a43476ad84d3ea4bb47" ]
[ "ext/pldata/nltk_3.0a3/nltk/metrics/scores.py" ]
[ "# Natural Language Toolkit: Evaluation\n#\n# Copyright (C) 2001-2013 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\nfrom __future__ import print_function\n\nfrom math import fabs\nimport operator\nfrom random import shuffle\nfrom functools import reduce\n\ntry:\n from scipy.stats.stats import betai\nexcept ImportError:\n betai = None\n\nfrom nltk.compat import xrange, izip\nfrom nltk.util import LazyConcatenation, LazyMap\n\ndef accuracy(reference, test):\n \"\"\"\n Given a list of reference values and a corresponding list of test\n values, return the fraction of corresponding values that are\n equal. In particular, return the fraction of indices\n ``0<i<=len(test)`` such that ``test[i] == reference[i]``.\n\n :type reference: list\n :param reference: An ordered list of reference values.\n :type test: list\n :param test: A list of values to compare against the corresponding\n reference values.\n :raise ValueError: If ``reference`` and ``length`` do not have the\n same length.\n \"\"\"\n if len(reference) != len(test):\n raise ValueError(\"Lists must have the same length.\")\n return float(sum(x == y for x, y in izip(reference, test))) / len(test)\n\ndef precision(reference, test):\n \"\"\"\n Given a set of reference values and a set of test values, return\n the fraction of test values that appear in the reference set.\n In particular, return card(``reference`` intersection ``test``)/card(``test``).\n If ``test`` is empty, then return None.\n\n :type reference: set\n :param reference: A set of reference values.\n :type test: set\n :param test: A set of values to compare against the reference set.\n :rtype: float or None\n \"\"\"\n if (not hasattr(reference, 'intersection') or\n not hasattr(test, 'intersection')):\n raise TypeError('reference and test should be sets')\n\n if len(test) == 0:\n return None\n else:\n return float(len(reference.intersection(test)))/len(test)\n\ndef recall(reference, test):\n \"\"\"\n Given a set of reference values and a set of test values, return\n the fraction of reference values that appear in the test set.\n In particular, return card(``reference`` intersection ``test``)/card(``reference``).\n If ``reference`` is empty, then return None.\n\n :type reference: set\n :param reference: A set of reference values.\n :type test: set\n :param test: A set of values to compare against the reference set.\n :rtype: float or None\n \"\"\"\n if (not hasattr(reference, 'intersection') or\n not hasattr(test, 'intersection')):\n raise TypeError('reference and test should be sets')\n\n if len(reference) == 0:\n return None\n else:\n return float(len(reference.intersection(test)))/len(reference)\n\ndef f_measure(reference, test, alpha=0.5):\n \"\"\"\n Given a set of reference values and a set of test values, return\n the f-measure of the test values, when compared against the\n reference values. The f-measure is the harmonic mean of the\n ``precision`` and ``recall``, weighted by ``alpha``. In particular,\n given the precision *p* and recall *r* defined by:\n\n - *p* = card(``reference`` intersection ``test``)/card(``test``)\n - *r* = card(``reference`` intersection ``test``)/card(``reference``)\n\n The f-measure is:\n\n - *1/(alpha/p + (1-alpha)/r)*\n\n If either ``reference`` or ``test`` is empty, then ``f_measure``\n returns None.\n\n :type reference: set\n :param reference: A set of reference values.\n :type test: set\n :param test: A set of values to compare against the reference set.\n :rtype: float or None\n \"\"\"\n p = precision(reference, test)\n r = recall(reference, test)\n if p is None or r is None:\n return None\n if p == 0 or r == 0:\n return 0\n return 1.0/(alpha/p + (1-alpha)/r)\n\ndef log_likelihood(reference, test):\n \"\"\"\n Given a list of reference values and a corresponding list of test\n probability distributions, return the average log likelihood of\n the reference values, given the probability distributions.\n\n :param reference: A list of reference values\n :type reference: list\n :param test: A list of probability distributions over values to\n compare against the corresponding reference values.\n :type test: list(ProbDistI)\n \"\"\"\n if len(reference) != len(test):\n raise ValueError(\"Lists must have the same length.\")\n\n # Return the average value of dist.logprob(val).\n total_likelihood = sum(dist.logprob(val)\n for (val, dist) in izip(reference, test))\n return total_likelihood/len(reference)\n\ndef approxrand(a, b, **kwargs):\n \"\"\"\n Returns an approximate significance level between two lists of\n independently generated test values.\n\n Approximate randomization calculates significance by randomly drawing\n from a sample of the possible permutations. At the limit of the number\n of possible permutations, the significance level is exact. The\n approximate significance level is the sample mean number of times the\n statistic of the permutated lists varies from the actual statistic of\n the unpermuted argument lists.\n\n :return: a tuple containing an approximate significance level, the count\n of the number of times the pseudo-statistic varied from the\n actual statistic, and the number of shuffles\n :rtype: tuple\n :param a: a list of test values\n :type a: list\n :param b: another list of independently generated test values\n :type b: list\n \"\"\"\n shuffles = kwargs.get('shuffles', 999)\n # there's no point in trying to shuffle beyond all possible permutations\n shuffles = \\\n min(shuffles, reduce(operator.mul, xrange(1, len(a) + len(b) + 1)))\n stat = kwargs.get('statistic', lambda lst: float(sum(lst)) / len(lst))\n verbose = kwargs.get('verbose', False)\n\n if verbose:\n print('shuffles: %d' % shuffles)\n\n actual_stat = fabs(stat(a) - stat(b))\n\n if verbose:\n print('actual statistic: %f' % actual_stat)\n print('-' * 60)\n\n c = 1e-100\n lst = LazyConcatenation([a, b])\n indices = list(range(len(a) + len(b)))\n\n for i in xrange(shuffles):\n if verbose and i % 10 == 0:\n print('shuffle: %d' % i)\n\n shuffle(indices)\n\n pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[:len(a)]))\n pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a):]))\n pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b)\n\n if pseudo_stat >= actual_stat:\n c += 1\n\n if verbose and i % 10 == 0:\n print('pseudo-statistic: %f' % pseudo_stat)\n print('significance: %f' % (float(c + 1) / (i + 1)))\n print('-' * 60)\n\n significance = float(c + 1) / (shuffles + 1)\n\n if verbose:\n print('significance: %f' % significance)\n if betai:\n for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:\n print(\"prob(phi<=%f): %f\" % (phi, betai(c, shuffles, phi)))\n\n return (significance, c, shuffles)\n\n\ndef demo():\n print('-'*75)\n reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()\n test = 'DET VB VB DET NN NN NN IN DET NN'.split()\n print('Reference =', reference)\n print('Test =', test)\n print('Accuracy:', accuracy(reference, test))\n\n print('-'*75)\n reference_set = set(reference)\n test_set = set(test)\n print('Reference =', reference_set)\n print('Test = ', test_set)\n print('Precision:', precision(reference_set, test_set))\n print(' Recall:', recall(reference_set, test_set))\n print('F-Measure:', f_measure(reference_set, test_set))\n print('-'*75)\n\nif __name__ == '__main__':\n demo()\n\n" ]
[ [ "scipy.stats.stats.betai" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.19", "0.18", "0.12", "0.17", "0.16" ], "tensorflow": [] } ]
riven314/Santa20-Local-Contest
[ "9bc8d1c9b20f450ead8d43081c3a542a1ff39da5" ]
[ "agent_pools/thompson_modified.py" ]
[ "\"\"\"\nmodified thompson sampling from Vic\n\"\"\"\nimport time\nimport numpy as np\n\n\nbandit = None\ntotal_reward = 0\nn_machines=100\nn_success_n_pull=[[] for _ in range(n_machines)]\nn_pull=np.array([0 for _ in range(n_machines)])\nprob_arrays = [[0.01]*100 for _ in range(n_machines)]\n\n\ndef cond_prob(theta: float, obs_list: list): #p(theta|obs_list)\n func_value = 1\n index = 0\n while index < len(obs_list):\n obs = obs_list[index]\n func_value = func_value*((theta*0.97**obs[1])*(-1)**(1-obs[0])+1-obs[0])\n index += 1\n return func_value\n\n\ndef prior_conj(theta:float,obs_list:list): #integrate p(theta|obs_list) from 0 to 1\n nom = cond_prob(theta,obs_list)\n denom = 1. \n #quad(cond_prob,0,1,args=obs_list,epsabs=0.02)[0]\n return nom / denom\n\n\ndef prob_array(obs_list:list,n_ele:int): #integrate p(theta|obs_list) from 0 to 1\n dis_supp = np.linspace(0,1, n_ele)\n dis = np.asarray([prior_conj(theta, obs_list) for theta in dis_supp])\n prob = dis/dis.sum()\n return prob\n\n\ndef sample(prob):\n sample = np.random.choice(\n a = np.linspace(0,1, len(prob)),\n size = None, replace = True, p = prob\n )\n return sample\n\n\ndef plot_check(obs_list,finess):\n import matplotlib.pyplot as plt\n t=np.arange(0.,1.,finess)\n plt.plot(t,list(map(lambda theta:prior_conj(theta,obs_list),t))) \n result=0\n finess=0.001\n for i in np.arange(0.,1.,finess):\n result+=finess*prior_conj(i,obs_list) \n #print(result)\n return None\n\n\ndef agent(observation, configuration):\n global total_reward, bandit, n_machines, n_pull,n_success_n_pull,last_reward,prob_arrays\n \n if observation.step == 0:\n \n bandit = None\n total_reward = 0\n n_machines = 100\n n_success_n_pull = [[] for _ in range(n_machines)]\n n_pull = np.array([0 for _ in range(n_machines)])\n prob_arrays = [[0.01]*100 for _ in range(n_machines)]\n\n\n last_reward = observation['reward'] - total_reward\n total_reward = observation['reward']\n\n if len(observation['lastActions']) == 2:\n # Update number of pulls for both machines\n m_index = observation['lastActions'][observation['agentIndex']]\n opp_index = observation['lastActions'][(observation['agentIndex'] + 1) % 2]\n n_pull[m_index] += 1\n n_pull[opp_index] += 1\n n_success_n_pull[m_index].append((last_reward, n_pull[m_index]))\n \n # Update the distribution for the machine pulled last turn\n prob_arrays[m_index] = prob_array(\n obs_list = n_success_n_pull[m_index], n_ele = 500)\n\n discounted_samples = np.asarray([sample(prob_arrays[_])*0.97**(n_pull[_]) for _ in range(n_machines)])\n bandit = int(np.argmax(discounted_samples))\n \n #print(\"Step:\"+str(observation.step))\n #print(\"Bandit:\"+str(bandit))\n return bandit" ]
[ [ "numpy.arange", "numpy.argmax", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ysndr/sam-knn-regressor
[ "4402ad28f888b47011b22b4a2171ad8d5bdce6f7" ]
[ "dataset.py" ]
[ "# To add a new cell, type '#%%'\n# To add a new markdown cell, type '#%% [markdown]'\n#%%\n# %load_ext autoreload\n# %autoreload 2\n#pylinignore\n#%matplotlib osx\n\n#%%\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom sklearn.preprocessing import MinMaxScaler\nfrom skmultiflow.data import DataStream\nfrom skmultiflow.evaluation import EvaluatePrequential\nfrom skmultiflow.trees import RegressionHAT, RegressionHoeffdingTree\nimport samknnreg\nfrom importlib import reload\nfrom samknnreg import SAMKNNRegressor\nimport matplotlib.pyplot as plt\n\n\n\n#%%\nprint(\"Reading dataset...\")\ndf = pd.read_csv(\n \"weatherHistory.csv\",\n parse_dates={\"datetime\": [\"Formatted Date\"]},\n date_parser=pd.to_datetime,\n index_col=\"datetime\")\nprint(\"done!\")\n#%%\n\ndf.index = pd.to_datetime(df.index, utc=True)\ndf. drop(columns=[\"Summary\", \"Precip Type\", \"Daily Summary\", \"Loud Cover\"], inplace=True, errors=\"ignore\")\n\ndf.info()\n\n\n#%%\ndf.head()\n\n\n#%%\nscaler = MinMaxScaler()\ntdf = pd.DataFrame(scaler.fit_transform(df.values), columns=df.columns.copy(), index=df.index)\n\n\n\n#%% \n\nfig, ax = plt.subplots(ncols=2)\n\ndf.drop(columns=[\"Pressure (millibars)\", \"Wind Bearing (degrees)\"]).resample(\"W\").mean().plot(ax=ax[0], title=\"unscaled\")\n\n\ntdf.drop(columns=[\"Pressure (millibars)\", \"Wind Bearing (degrees)\"]).resample(\"W\").mean().plot(ax=ax[1], title=\"scaled\")\n\n#%%\n\ntdf.info()\n\nX = tdf[[\"Pressure (millibars)\", \"Humidity\", \"Wind Speed (km/h)\"]].resample(\"6H\").mean()\ny = tdf[[\"Temperature (C)\"]].resample(\"6H\").max()\n\nX.plot(subplots=True, layout=(1,3))\ny.plot()\n\n#%%\n\nreload(samknnreg)\nfrom samknnreg import SAMKNNRegressor\n\nsam = SAMKNNRegressor()\nhat = RegressionHAT()\nrht = RegressionHoeffdingTree()\nds = DataStream(X, y=y)\nds.prepare_for_use()\n\n\nevaluator = EvaluatePrequential(show_plot=True,\n n_wait=730,\n batch_size=28,\n metrics=[\n 'mean_square_error',\n 'true_vs_predicted'])\n\n#%%\nevaluator.evaluate(\n stream=ds,\n model=[sam, rht, hat ],\n model_names=[\"SAM\", \"Hoeffding Tree Regressor\", \"Hoeffding Tree Regressor (Adaptive)\"])\n \n\n#%%\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "matplotlib.pyplot.subplots", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
tidepool-org/data-science-simulator
[ "fbecdadd8ea4afe88a2789871c13d8d128f637ac" ]
[ "tidepool_data_science_simulator/legacy/risk_metrics_ORIG.py" ]
[ "import numpy as np\n\n\nfrom tidepool_data_science_models.models.simple_metabolism_model import SimpleMetabolismModel\n\n\ndef get_bgri(bg_df):\n # Calculate LBGI and HBGI using equation from\n # Clarke, W., & Kovatchev, B. (2009)\n bgs = bg_df.copy()\n bgs[bgs < 1] = 1 # this is added to take care of edge case BG <= 0\n transformed_bg = 1.509 * ((np.log(bgs) ** 1.084) - 5.381)\n risk_power = 10 * (transformed_bg) ** 2\n low_risk_bool = transformed_bg < 0\n high_risk_bool = transformed_bg > 0\n rlBG = risk_power * low_risk_bool\n rhBG = risk_power * high_risk_bool\n LBGI = np.mean(rlBG)\n HBGI = np.mean(rhBG)\n BGRI = LBGI + HBGI\n\n return LBGI, HBGI, BGRI\n\n\ndef lbgi_risk_score(lbgi):\n if lbgi > 10:\n risk = 4\n elif lbgi > 5:\n risk = 3\n elif lbgi > 2.5:\n risk = 2\n elif lbgi > 0:\n risk = 1\n else:\n risk = 0\n return risk\n\n\ndef hbgi_risk_score(hbgi):\n if hbgi > 18:\n risk = 4\n elif hbgi > 9:\n risk = 3\n elif hbgi > 4.5:\n risk = 2\n elif hbgi > 0:\n risk = 1\n else:\n risk = 0\n return risk\n\n\ndef get_dka_risk_hours(temp_basals, iob_array, sbr):\n\n # Use refactor of metabolism model\n metab_model = SimpleMetabolismModel(\n insulin_sensitivity_factor=0, carb_insulin_ratio=0\n )\n steady_state_iob = metab_model.get_steady_state_iob_from_sbr(\n sbr, use_fda_submission_constant=True\n )\n\n fifty_percent_steady_state_iob = steady_state_iob / 2\n\n indices_with_less_50percent_sbr_iob = iob_array < fifty_percent_steady_state_iob\n\n hours_with_less_50percent_sbr_iob = (\n np.sum(indices_with_less_50percent_sbr_iob) * 5 / 60\n )\n return hours_with_less_50percent_sbr_iob\n\n\ndef dka_risk_score(hours_with_less_50percent_sbr_iob):\n if hours_with_less_50percent_sbr_iob >= 16:\n risk = 4\n elif hours_with_less_50percent_sbr_iob >= 12:\n risk = 3\n elif hours_with_less_50percent_sbr_iob >= 8:\n risk = 2\n elif hours_with_less_50percent_sbr_iob >= 4:\n risk = 1\n else:\n risk = 0\n return risk\n\n\ndef suspend_risk_score(minutes_of_suspend):\n if minutes_of_suspend >= 8 * 60:\n risk = 4\n elif minutes_of_suspend >= 5 * 60:\n risk = 3\n elif minutes_of_suspend >= 2 * 60:\n risk = 2\n elif minutes_of_suspend >= 1 * 60:\n risk = 1\n else:\n risk = 0\n return risk\n" ]
[ [ "numpy.log", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ardihikaru/eaglestitch
[ "b388f0c3b78b0539812985850905c78830e871aa" ]
[ "eaglestitch/image_subscriber/zenoh_pubsub/zenoh_net_publisher.py" ]
[ "from eaglestitch.image_subscriber.zenoh_pubsub.core.zenoh_net import ZenohNet\nimport sys\nimport time\nfrom datetime import datetime\nimport numpy as np\nimport cv2\nimport simplejson as json\nfrom enum import Enum\nimport logging\n\n###\n\nL = logging.getLogger(__name__)\n\n\n###\n\n\nclass ZenohNetPublisher(ZenohNet):\n\n\tclass InputDataType(Enum):\n\t\tNATIVE_TYPE = 1\n\t\tSIMPLE_NUMPY = 2\n\t\tCOMPLEX_NUMPY = 3\n\n\tdef __init__(self, _listener=None, _mode=\"peer\", _peer=None, _path=None, _session_type=None):\n\t\tsuper().__init__(_listener=_listener, _mode=_mode, _peer=_peer, _path=_path, _session_type=_session_type)\n\n\tdef register(self):\n\t\tsuper().register_publisher()\n\n\tdef get_publisher(self):\n\t\treturn self.pub\n\n\tdef _get_encoder(self, _encoder):\n\t\treturn self.ENCODER if _encoder is None else _encoder\n\n\tdef _encode_data(self, _val, _itype, _encoder):\n\t\tif _itype == self.InputDataType.NATIVE_TYPE.value:\n\t\t\tencoded_data = bytes(json.dumps(_val), encoding='utf8')\n\t\telif _itype == self.InputDataType.SIMPLE_NUMPY.value:\n\t\t\tencoded_data = _val.tobytes()\n\t\telif _itype == self.InputDataType.COMPLEX_NUMPY.value:\n\t\t\tencoder = self._get_encoder(_encoder)\n\t\t\ttagged_data = np.array(_val, dtype=encoder)\n\t\t\tencoded_data = tagged_data.tobytes()\n\t\telse:\n\t\t\t# simply convert the data into bytes\n\t\t\tencoded_data = bytes(json.dumps(_val), encoding='utf8')\n\n\t\treturn encoded_data\n\n\t# def publish(self, _val, _itype, _encoder=None, _taggable_info=None):\n\tdef publish(self, _val, _itype, _encoder=None):\n\t\t\"\"\"\n\t\t_val: The value of the resource to put.\n\t\t\"\"\"\n\n\t\t# pre-process data before being sent into Zenoh system\n\t\tencoded_data = self._encode_data(_val, _itype, _encoder)\n\n\t\tt0_publish = time.time()\n\t\tsuper().publish_data(encoded_data)\n\t\tt1_publish = (time.time() - t0_publish) * 1000\n\t\tL.warning(('\\n[%s] Latency insert data into Zenoh (%.3f ms) \\n' % (datetime.now().strftime(\"%H:%M:%S\"), t1_publish)))\n\n\tdef close_connection(self, _producer=None):\n\t\tif _producer is not None:\n\t\t\t_producer.undeclare()\n\t\tsuper().close_connection()\n\n\n\"\"\"\n# Usage example\n# ---------------\n\n# Define input data\n# [1] Data Type: simple Integer / Float / Bool\n# encoder_format = None\n# itype = 1\n# val = 123\n###############################################################\n\n# [2] Data Type: Numpy Array (image)\n# encoder_format = None\n# itype = 2\n# root_path = \"/home/s010132/devel/eagleeye/data/out1.png\"\n# val = cv2.imread(root_path)\n###############################################################\n\n# [3] Data Type: Numpy Array with structured array format (image + other information)\nitype = 3\nencoder_format = [\n\t('id', 'U10'),\n\t('timestamp', 'f'),\n\t('data', [('flatten', 'i')], (1, 6220800)),\n\t('store_enabled', '?'),\n]\nroot_path = \"/home/s010132/devel/eagleeye/data/out1.png\"\nimg = cv2.imread(root_path)\nimg_1d = img.reshape(1, -1)\nval = [('Drone 1', time.time(), img_1d, False)]\n###############################################################\n\n# configure zenoh service\npath = \"/demo/example/zenoh-python-pub\"\nz_svc = ZenohNetPublisher(\n\t_path=path, _session_type=\"PUBLISHER\"\n)\nz_svc.init_connection()\n\n# register and collect publisher object\nz_svc.register()\npublisher = z_svc.get_publisher()\n\n# publish data\nz_svc.publish(\n\t_val=val,\n\t_itype=itype,\n\t_encoder=encoder_format,\n)\n\n# closing Zenoh publisher & session\nz_svc.close_connection(publisher)\n\"\"\"\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CosmoStat/OSCAR
[ "ed0a8b784ce895f07f92dc21575d4fd5f4e5b282" ]
[ "reproducible_research/deconvolution_gamma_zero.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 12 11:04:42 2020\n\n@author: fnammour\n\"\"\"\n\nfrom score import score\nimport numpy as np\nimport os\n\n#define paths\nroot_path = '/Users/username/path/to/'\ndata_path = root_path+'data_folder/'\n\n#load data\n#set SNRs\nSNRs = [40,75,150,380]\n\n#Load psf\nPSF = np.load(data_path+'psfs.npy')\ngal_num,row,column = PSF.shape\ndigit_num = int(np.round(np.log10(gal_num)))+1\n\n#set denoising parameters\nn_starlet = 4 #number of starlet scales\nn_shearlet = 3 #number of shearlet scales\nlip_eps = 1e-3 #error upperbound for Lipschitz constant\ntolerance = 1e-6 #to test convergence\nn_itr = 150 #number of iteration\nk = 4 #Set k for k-sigma hard thresholding\nbeta_factor = 0.95 #to ensure that beta is not too big\nrip = False #Removal of Isolated Pixel in the deconvolution solution\nfirst_guess = np.ones((row,column))/(row*column) #first guess\ngamma = 0.0 #desactivate the shape constraint\n\n#define result path\nresults_path = root_path+'results/{0}_conv_k{1}/'.format(n_itr,k)\n\n#instantiate the solver\nsolver = score(k=k,n_starlet=n_starlet,n_shearlet=n_shearlet,epsilon=lip_eps,\\\n rip=rip,tolerance=tolerance,beta_factor=beta_factor,gamma=gamma,\\\n first_guess=first_guess,verbose=False)\n\n#loop on SNR\nfor SNR in SNRs:\n #load observed galaxies\n obs_gals = np.load(data_path+'SNR{0}/noisy_galaxies_SNR{0}.npy'.format(SNR))\n obs_gals = obs_gals[:2]\n decon_list = list() #deconvolved galaxies list\n ell_list = list() #ellipticity list\n gal_counter = 1\n print('DECONVOLVING GAMMA={1} SNR={0}'.format(SNR,gamma))\n #loop on the galaxy images\n for Y,H in zip(obs_gals,PSF):\n solver.deconvolve(obs=Y,psf=H)\n decon_list += [solver.solution] \n ell_list += [solver.ell_solution]\n nz = digit_num*'0'\n print(('Galaxy %'+nz+'{0}d of {1}').format(digit_num, gal_num) % gal_counter,end='\\r')\n gal_counter += 1\n \n output_directory = results_path+'SNR{0}/'.format(SNR)\n \n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n \n np.save(output_directory+'deconvolved_galaxies_gamma_zero.npy',np.array(decon_list))\n np.save(output_directory+'ellipticities_gamma_zero.npy',np.array(ell_list))\n " ]
[ [ "numpy.load", "numpy.log10", "numpy.array", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bfgoldstein/tiny_torchfi
[ "82b0f4931ff8aac6079122200fbe61782bb1f0da" ]
[ "torchFI/modules/linear.py" ]
[ "###############################################################\n# This file was created using part of Distiller project developed by:\n# NervanaSystems https://github.com/NervanaSystems/distiller\n# \n# Changes were applied to satisfy torchFI project needs\n###############################################################\n\nimport numpy as np\nimport torch.nn as nn\n\nfrom util.log import *\n\n\nclass FILinear(nn.Linear):\n\n def __init__(self, fi, name, in_features, out_features, weight=None, bias=None): \n self.fi = fi\n self.name = name\n self.id = fi.addNewLayer(name, FILinear)\n \n super(FILinear, self).__init__(in_features, out_features, \n True if bias is not None else False)\n\n if weight is not None:\n self.weight = weight\n if bias is not None:\n self.bias = bias\n\n def forward(self, input):\n if self.fi.injectionMode and self.id == self.fi.injectionLayer:\n # XNOR Operation\n # True only if both injectionFeatures and injectionWeights are True or False\n # False if one of them is True \n if not(self.fi.injectionFeatures ^ self.fi.injectionWeights):\n # decide where to apply injection\n # weights = 0, activations = 1 \n # locInjection = np.random.randint(0, 2)\n locInjection = np.random.binomial(1, .5)\n else:\n locInjection = self.fi.injectionFeatures\n\n if locInjection: \n if self.fi.log:\n logWarning(\"\\tInjecting Fault into feature data of Linear \"\n + self.name + \" layer.\")\n \n faulty_res = self.fi.injectFeatures(input.data)\n \n for idx, (indices, faulty_val) in enumerate(faulty_res):\n # add idx as batch index to indices array\n input.data[tuple([idx] + indices)] = faulty_val\n\n return nn.functional.linear(input, self.weight, self.bias)\n else:\n # create new tensor to apply FI\n weightFI = self.weight.clone()\n\n if self.fi.log:\n logWarning(\"\\tInjecting Fault into weight data of Linear \"\n + self.name + \" layer.\")\n \n indices, faulty_val = self.fi.inject(weightFI.data)\n \n weightFI.data[tuple(indices)] = faulty_val \n\n return nn.functional.linear(input, weightFI, self.bias)\n else:\n return super(FILinear, self).forward(input)\n \n @staticmethod\n def from_pytorch_impl(fi, name, linear: nn.Linear):\n return FILinear(fi, name, linear.in_features, linear.out_features, \n linear.weight, linear.bias)\n \n def __repr__(self):\n return \"%s(in_features=%d, out_features=%d, bias=%s, id=%d)\" % (\n self.__class__.__name__,\n self.in_features,\n self.out_features,\n str(True if self.bias is not None else False),\n self.id) " ]
[ [ "numpy.random.binomial", "torch.nn.functional.linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
StuartCHAN/KARL
[ "2a4bb39d2db7646f57e66bda7c6694ba33022f76" ]
[ "scripts/neural_layers - attn_birnn.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 7 17:23:17 2019\n\n@author: Stuart Chen\n\n#(env)> pip3 install pytorch-pretrained-bert \n\"\"\"\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\n#import unicodedata\n#import string\n#import re\nimport random\nimport time\nimport numpy as np \nimport os\nimport math \n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport torch.utils.data as Data\nimport torch.nn.utils.rnn as rnn_utils\n\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM, BertForSequenceClassification\n\nimport kg_utils.reward as reward \n\nimport utils\nfrom utils import *\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(\"\\n device: \", device)\n\nprint(\"\\n torch version \", torch.__version__ )\n\n\n\n\n# Define the Encoder\ndef swish(x):\n return x * torch.sigmoid(x)\n\nclass BERTEncoder(BertForSequenceClassification):\n# def __init__(self, config, num_labels=2):\n# super(BERTEncoder, self).__init__(config, num_labels)\n# self.num_labels = num_labels\n# self.bert = BertModel(config)\n# self.dropout = nn.Dropout(config.hidden_dropout_prob)\n# self.hidden_size = config.hidden_size #!\n# self.classifier = nn.Linear(config.hidden_size, num_labels)\n# self.apply(self.init_bert_weights)\n \n def __init__(self, config, num_labels=2):\n super(BERTEncoder, self).__init__(config, num_labels)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.hidden_size = config.hidden_size #!\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n \n def get_embedding(self, VOCAB_SIZE):\n self.bert.embeddings.word_embeddings = nn.Embedding(VOCAB_SIZE, self.hidden_size, padding_idx=0)\n print(self.bert.embeddings)\n \n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n logits = swish(logits)\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits ;\n \n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\n# Define the Decoder \nclass DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size):\n super(DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size ) #!!! , num_layers=3 ) \n #self.gru2 = nn.GRU(hidden_size, hidden_size ) #!!! , num_layers=3 ) \n self.out = nn.Linear(hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n output = self.embedding(input).view(1, 1, -1)\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n #output, hidden = self.gru2(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device) ;\n \n \n \nclass AttnDecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):\n super(AttnDecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.dropout_p = dropout_p\n self.max_length = max_length\n\n self.embedding = nn.Embedding(self.output_size, self.hidden_size)\n self.attn = nn.Linear(self.hidden_size * 2, self.max_length)\n self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)\n self.dropout = nn.Dropout(self.dropout_p)\n self.birnn = nn.GRU(self.hidden_size, self.hidden_size, bidirectional=True ) #!!! added\n self.gru = nn.GRU(self.hidden_size, self.hidden_size)\n self.out = nn.Linear(self.hidden_size, self.output_size)\n\n def forward(self, input, hidden, encoder_outputs):\n #input, hidden = input_and_hidden\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n attn_weights = F.softmax(\n F.relu(self.attn(torch.cat((embedded[0], hidden[0]), 1))), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0),\n encoder_outputs.unsqueeze(0))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n\n bihidden = hidden.repeat(2,1,1).view(2,1,-1)\n bioutput, bihidden = self.birnn(output, bihidden ) #!!! added\n #print(\"\\n birnn--> output, hidden : \", output.size(), hidden.size() )\n bioutput, bihidden = bioutput.view([-1,1,256]), torch.mean(bihidden,dim=0,keepdim=True)\n output, hidden = self.gru(bioutput, bihidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n \n \n# Evaluation for the Model \n\"\"\"\ndef evaluate(encoder, decoder, sentence, training_ans, input_lang, output_lang, max_length=utils.MAX_LENGTH, rl=False):\n with torch.no_grad():\n input_tensor = utils.tensorFromSentence(input_lang, sentence, device )\n input_length = input_tensor.size()[0]\n \n encoder_hidden = encoder(input_tensor)\n \n encoder_hidden = encoder_hidden.unsqueeze(0)\n \n decoder_input = torch.tensor([[utils.SOS_token]], device=device)\n\n decoder_hidden = encoder_hidden \n\n decoded_words = [] #!\n\n for di in range(input_length):\n decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)\n topv, topi = decoder_output.topk(1)\n #!!!\n if topi.item() == utils.EOS_token:\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n #!!!\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n #if decoder_input.item() == utils.EOS_token:\n #break;\n\n decoded_sentence = str(\" \").join(decoded_words) \n \n if rl and (training_ans is not None):\n rewrd = reward.get_reward(decoded_sentence, training_ans )\n print(\"\\n -reward -> \", rewrd)\n return rewrd\n else:\n print(\"\\n -query -> \", decoded_sentence, \"\\n \")\n return decoded_sentence ;\n\"\"\"\n#\n#def evaluate(encoder, decoder, sentence, training_ans, input_lang, output_lang, max_length=utils.MAX_LENGTH, rl=False):\n# with torch.no_grad():\n# \n# input_tensor = utils.tensorFromSentence(input_lang, sentence, device )\n# \n# input_length = input_tensor.size(0)\n# print(\" evaluation input_length: \", input_length)\n# \n# encoder_hidden = encoder(input_tensor)\n# \n# encoder_hidden = encoder_hidden.unsqueeze(0)\n# \n# decoder_input = torch.tensor([[utils.SOS_token]], device=device)\n# \n# decoder_hidden = encoder_hidden\n# \n# #decoder_hidden_input = decoder_hidden #!!!\n#\n# # Without teacher forcing: use its own predictions as the next input\n# decoded_words = [] #!\n#\n# for di in range(input_length):\n# print(di, \" decoder_hidden shape: \", decoder_hidden.size(), \" \\n \", decoder_hidden )\n# decoder_hidden = decoder_hidden[:, 0, :]\n# #decoder_hidden = decoder_hidden_input[:, di, :] #!!!\n# decoder_hidden = decoder_hidden.view(1,1,256)\n# \n# decoder_output, decoder_hidden = decoder(\n# decoder_input, decoder_hidden)\n# topv, topi = decoder_output.topk(1)\n# #!!!\n# if topi.item() == utils.EOS_token:\n# decoded_words.append('<EOS>')\n# break\n# else:\n# decoded_words.append(output_lang.index2word[topi.item()])\n# #!!!\n# decoder_input = topi.squeeze().detach() # detach from history as input\n#\n# if decoder_input.item() == utils.EOS_token:\n# break;\n# \n# decoded_sentence = str(\" \").join(decoded_words) \n# print(\"\\n --query--> \", decoded_sentence, \"\\n \") \n# \n# if not rl or (training_ans is None):\n# return decoded_sentence \n# else:\n# rewrd = reward.get_reward(decoded_sentence, training_ans )\n# print(\"\\n --reward--> \", rewrd)\n# return rewrd \n# \n\n\ndef evaluate(encoder, decoder, sentence, training_ans, input_lang, output_lang, max_length=utils.MAX_LENGTH, rl=True):\n with torch.no_grad():\n \n input_tensor = utils.tensorFromSentence(input_lang, sentence, device )\n \n input_length = input_tensor.size(0)\n print(\" evaluation input_length: \", input_length) \n\n \"\"\"#!!! \n encoder_hidden = encoder.initHidden() \n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei] )\n encoder_outputs[ei] = encoder_output[0, 0] ; \"\"\" \n \n encoder_hidden = encoder(input_tensor)\n \n encoder_hidden = encoder_hidden.unsqueeze(0)\n \n decoder_input = torch.tensor([[utils.SOS_token]], device=device)\n \n decoder_hidden = encoder_hidden\n \n #decoder_hidden_input = decoder_hidden #!!!\n\n src = encoder_hidden.reshape([-1, 256])\n #print(\"\\n src: \", src.size(), \"\\t\", src.size(1) )\n #print(max_length, src.size(0))\n encoder_outputs = F.pad(src, (0,0,0,int(max_length-src.size(0))), \"constant\", 0)\n #print(encoder_outputs.size())\n\n # Without teacher forcing: use its own predictions as the next input\n decoded_words = [] #!\n\n for di in range(max_length):\n #print(di, \" decoder_hidden shape: \", decoder_hidden.size(), \" \\n \", decoder_hidden )\n decoder_hidden = decoder_hidden[:, 0, :] \n decoder_hidden = decoder_hidden.view(1,1,256)\n \n decoder_output, decoder_hidden, decoder_attention = decoder( #decoder_input, decoder_hidden)\n decoder_input, decoder_hidden, encoder_outputs )\n topv, topi = decoder_output.topk(1)\n #!!!\n if topi.item() == utils.EOS_token or (topi.item() == utils.SOS_token and di>1 ):\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n #!!!\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n if decoder_input.item() == utils.EOS_token or (decoder_input.item() == utils.SOS_token and di>1 ):\n break;\n \n decoded_sentence = str(\" \").join(decoded_words) \n print(\"\\n --query--> \", decoded_sentence, \"\\n \") \n \n if (not rl) or (training_ans is None):\n return decoded_sentence \n else:\n rewrd = reward.get_reward(decoded_sentence, training_ans )\n print(\" --reward--> \", rewrd, \"\\n\")\n return rewrd \n''' \ndef evaluate(encoder, decoder, sentence, training_ans, input_lang, output_lang, max_length=utils.MAX_LENGTH, rl=True):\n with torch.no_grad():\n input_tensor = utils.tensorFromSentence(input_lang, sentence, device=device)\n input_length = input_tensor.size()[0]\n encoder_hidden = encoder.initHidden()\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei],\n encoder_hidden)\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device) # SOS\n\n decoder_hidden = encoder_hidden\n\n decoded_words = []\n decoder_attentions = torch.zeros(max_length, max_length)\n\n for di in range(max_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n decoder_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n if topi.item() == EOS_token:\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n\n decoder_input = topi.squeeze().detach()\n\n decoded_sentence = str(\" \").join(decoded_words)\n print(\"\\n --query--> \", decoded_sentence, \"\\n \") \n \n if (not rl) or (training_ans is None):\n return decoded_sentence \n else:\n rewrd = reward.get_reward(decoded_sentence, training_ans )\n print(\"\\n --reward--> \", rewrd)\n return rewrd \n''' \n \ndef evaluateRandomly(encoder, decoder, eval_pairs, input_lang, output_lang, n=10, rl=True):\n #output_sentences = []\n rewrds = 0.0\n \n training_pairs = []\n training_answers = []#!\n\n for _ in range(n):\n pair = random.choice(eval_pairs)\n #training_pairs.append( utils.tensorsFromPair(pair[:-1], input_lang, output_lang, device) )\n training_pairs.append( pair[:-1] )\n training_answers.append(pair[-1])#!\n\n for pair, ans in zip(training_pairs, training_answers):\n #pair = random.choice(pairs)\n #print('>', pair[0])\n #print('=', pair[1])\n rewrd = evaluate(encoder, decoder, pair[0], ans, input_lang, output_lang, rl=rl)\n if rl:\n rewrds += rewrd ;\n #output_sentence = ' '.join(output_words)\n #print('<', output_sentence)\n #output_sentences.append(output_sentence)\n #print('')\n #rew += reward.get_reward(output_sentence, pair[-1])\n if rl:\n reward_value = rewrds/n\n return reward_value \n else:\n return 1.0; \n\n\n\n# Training the Model \n\"\"\"\nteacher_forcing_ratio = 0.5\n\ndef trainBert(input_tensor, target_tensor, encoder, decoder, training_ans, input_lang, output_lang, encoder_optimizer, decoder_optimizer, criterion, max_length=utils.MAX_LENGTH):\n\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n\n #encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n loss = 0.0\n \n encoder_hidden = encoder(input_tensor)\n \n encoder_hidden = encoder_hidden.unsqueeze(0)\n\n #for ei in range(input_length):\n #encoder_output, encoder_hidden = encoder(\n #input_tensor[ei], encoder_hidden)\n #encoder_outputs[ei] = encoder_output[0, 0]\n\n decoder_input = torch.tensor([[utils.SOS_token]], device=device)\n\n decoder_hidden = encoder_hidden\n\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n rewards = [] #!\n\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden = decoder(\n decoder_input, decoder_hidden)\n loss += criterion(decoder_output, target_tensor[di])\n decoder_input = target_tensor[di] # Teacher forcing\n v = loss.detach().numpy()\n print(\"\\t * %s step xentrop: \"%str(di), float(v/(di+1.0)), \" \\n\" ) #!\n else:\n # Without teacher forcing: use its own predictions as the next input\n decoded_words = [] #!\n\n for di in range(target_length):\n decoder_output, decoder_hidden = decoder(\n decoder_input, decoder_hidden)\n topv, topi = decoder_output.topk(1)\n #!!!\n if topi.item() == EOS_token:\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n #!!!\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n decoded_sentence = str(\" \").join(decoded_words) \n print(\"\\n -query -> \", decoded_sentence, \"\\n \")\n rew = reward.get_reward(decoded_sentence, training_ans )\n rewards.append(rew)\n print(\"\\n -reward -> \", rew)\n loss += criterion(decoder_output, target_tensor[di])\n v = loss.detach().numpy()\n print(\"\\n\\t * %s step xentrop: \"%str(di), float(v/(di+1.0)) )#!\n print(\"\\n \")#!\n if decoder_input.item() == utils.EOS_token:\n break;\n \n #_, rewrd = evaluateRandomly(encoder, decoder, pairs, input_lang, output_lang, n=target_length)\n if np.mean(rewards) >= 1.0:\n loss = torch.mul(loss, torch.FloatTensor([np.mean(rewards)]))\n else:\n pass;\n \n var = loss.detach().numpy()/target_length\n print(\"\\n Loss:\", var)\n\n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item()/target_length \n\"\"\"\n\n\ndef trainBert(input_tensor, target_tensor, encoder, decoder, eval_pairs, input_lang, output_lang, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH, teacher_forcing_ratio=0.5, rl=True ):\n\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n\n #encoder_outputs = torch.zeros(max_length, 256, device=device) #!!! (max_length, encoder.hidden_size, device=device)\n \n loss = 0\n \n encoder_hidden = encoder(input_tensor)\n \n encoder_hidden = encoder_hidden.unsqueeze(0)\n #print(\"\\n real encoder_hidden: \", encoder_hidden.size(), \" \\n\")\n\n src = encoder_hidden.reshape([-1, 256])\n #print(\"\\n src: \", src.size(), \"\\t\", src.size(1) )\n #print(MAX_LENGTH, src.size(0))\n encoder_outputs = F.pad(src, (0,0,0,int(MAX_LENGTH-src.size(0))), \"constant\", 0)\n #print(encoder_outputs.size())\n\n decoder_input = torch.tensor([[utils.SOS_token]], device=device)\n\n decoder_hidden = encoder_hidden[0, 0].view(1,1,-1) #!!! v02 \n #encoder_outputs = decoder_hidden\n\n use_teacher_forcing = False #True if random.random() < teacher_forcing_ratio else False\n #print(\" * T-forcing ratio: \", teacher_forcing_ratio , str(use_teacher_forcing) )\n #!!!\n decoded_words = []\n \n #!!!\n losses = []\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n #print(\" * Teacher forcing: \")\n #print(\"\\n decoder_input, decoder_hidden, encoder_outputs : \",\n # decoder_input.size(), decoder_hidden.size(), encoder_outputs.size() )\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(input=decoder_input, \\\n hidden=decoder_hidden, encoder_outputs=encoder_outputs )#!!!\n # decoder_input, decoder_hidden)\n loss += criterion(decoder_output, target_tensor[di])\n\n loss_ = loss\n v = float(loss_.detach().numpy()/(di+1.0)) \n losses.append(v)\n \n decoder_input = target_tensor[di] # Teacher forcing\n\n if decoder_input.item() == utils.EOS_token or (decoder_input.item() == utils.SOS_token and di>1 ):\n break\n else:\n # Without teacher forcing: use its own predictions as the next input\n #print(\" * Not teacher forcing: \")\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(input=decoder_input, \\\n hidden=decoder_hidden, encoder_outputs=encoder_outputs )#!!!\n # decoder_input, decoder_hidden)\n topv, topi = decoder_output.topk(1)\n #!!!\n if topi.item() == utils.EOS_token :#or (topi.item() == utils.SOS_token and di>1 ):\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n \n #!!!\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n loss += criterion(decoder_output, target_tensor[di])\n\n loss_ = loss\n v = float(loss_.detach().numpy()/(di+1.0))\n losses.append(v) \n\n if decoder_input.item() == utils.EOS_token :#or (decoder_input.item() == utils.SOS_token and di>1 ):\n break\n\n #tgt_sent = str(\" \").join([output_lang.index2word[i] for i in list(target_tensor.detach().numpy()) if i in output_lang.index2word.keys()])\n nmt_sent = str(\" \").join(decoded_words)\n '''print(\" tgt: \", target_tensor.detach().numpy())\n print(\" output: \", output_tensor.detach().numpy())'''\n print(\"\\n nmt_sent: \", nmt_sent, \" \\n\")\n\n if rl and (np.mean(losses) < 1.0) :\n reward_value = evaluateRandomly(encoder, decoder, eval_pairs, input_lang, output_lang, n=10, rl=rl) \n if 2.0 > reward_value > 1.0:\n loss = torch.mul(loss, torch.FloatTensor([reward_value]) )\n else:\n pass;\n\n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item()/target_length\n \n\n\ndef trainItersBert(encoder, decoder, n_iters, training_pairs, eval_pairs, input_lang, output_lang, print_every=1000, plot_every=100, learning_rate=0.01, mom=0, model_name=\"qald-test\"):\n #start = time.time()\n plot_losses = []\n losses_trend = []\n \n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate, momentum=mom)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate, momentum=mom)\n \n #encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate, amsgrad=True)\n #encoder_scheduler = optim.lr_scheduler.CosineAnnealingLR(encoder_optimizer, n_iters)\n #decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate, amsgrad=True)\n #decoder_scheduler = optim.lr_scheduler.CosineAnnealingLR(decoder_optimizer, n_iters) \n\n teacher_forcing_ratio = 1.0\n\n criterion = nn.NLLLoss()\n '''\n input_tensors, target_tensors, train_pairs = [], [], []\n for pair in training_pairs:\n tensors = utils.tensorsFromPair(pair, input_lang, output_lang, device)\n train_pairs.append(tensors)\n #print(\"tensor shape--> \", tensors[0].size())\n #print(tensors[0])\n input_tensors.append(tensors[0].view(-1,1).long()) #float() #!!! \n target_tensors.append(tensors[1].view(-1,1).long()) #!!!\n\n print(\"\\n Dataset preparing... \")\n input_tensors = rnn_utils.pad_sequence(input_tensors, batch_first=True, padding_value=0)\n target_tensors = rnn_utils.pad_sequence(target_tensors, batch_first=True, padding_value=0)\n \n torch.save(input_tensors, \"./model/input_tensors.pt\")\n torch.save(target_tensors, \"./model/target_tensors.pt\")'''\n\n eval_tensors = [utils.tensorsFromPair(pair, input_lang, output_lang, device) for pair in eval_pairs ] \n eval_inputs = [ tensors[0] for tensors in eval_tensors ]\n eval_targets = [ tensors[1] for tensors in eval_tensors ]\n \n eval_inputs = rnn_utils.pad_sequence(eval_inputs, batch_first=True, padding_value=0)\n eval_targets = rnn_utils.pad_sequence(eval_targets, batch_first=True, padding_value=0)\n\n #input_tensors, target_tensors = utils.padding(input_tensors, target_tensors )\n '''torch_dataset = utils.TxtDataset(input_tensors, target_tensors )'''\n torch_dataset = utils.TxtDataset(eval_inputs, eval_targets )\n \n # put the dataset into DataLoader\n loader = Data.DataLoader(\n dataset=torch_dataset,\n batch_size=6, # MINIBATCH_SIZE\n shuffle=True,\n drop_last= True,\n num_workers= 2 if utils.getOSystPlateform() else 0 # set multi-work num read data\n #collate_fn= utils.collate_fn #!!! \n ) \n print(\" Dataset loader ready, begin training. \\n\") \n\n datset_len = len(loader)\n savepoint = datset_len//4 #12\n print(\"\\n Dataset loader length is \", datset_len, \", save model every %d batches. \"%savepoint )\n\n for epoch in range(1, n_iters + 1):\n # an epoch goes the whole data\n for batch, (batch_input, batch_target) in enumerate(loader):\n # here to train your model\n print('\\n\\n - Epoch ', epoch, ' | batch ', batch, '\\n | input lenght: ', batch_input.size(1), '\\n | target length: ', batch_target.size(1) ,\" \\n\") \n \n #input_tensor, target_tensor = batch_input, batch_target #!!! \n print(\" * T-forcing ratio: \", teacher_forcing_ratio )\n try:\n batch_input = batch_input.reshape( [6, -1, 1] ) #!!! [6, 1, -1] \n batch_target = batch_target.reshape( [6, -1, 1] )\n except:\n pass ;\n\n input_lens = [utils.getNzeroSize(tensor) for tensor in batch_input ]\n target_lens = [utils.getNzeroSize(tensor) for tensor in batch_target ]\n\n rl = False #True if (epoch > 1) and ( np.mean(losses_trend)<1.0 and len(losses_trend)>1 ) else False #!!! and / or \n\n loss = 0\n for batch_input_item, batch_target_item in zip(batch_input, batch_target):\n #print(\"\\n\\t batch_input_item, batch_target_item : \", batch_input_item.size(), batch_target_item.size() )\n loss += trainBert(batch_input_item, batch_target_item, encoder, decoder, eval_pairs, \\\n input_lang, output_lang, encoder_optimizer, decoder_optimizer, criterion, \\\n teacher_forcing_ratio = teacher_forcing_ratio, rl=rl )\n loss = loss/6.0\n plot_losses.append( loss )\n\n print(\"\\t- the %s batch xentropy loss: \"%str(str(epoch)+\".\"+str(batch)), loss, \" \" )\n\n teacher_forcing_ratio = utils.teacher_force(float(loss) ) ;\n\n '''if 0 == batch%savepoint and batch > 1:\n print(\"\\n Batch %d savepoint, save the trained model...\\n\"%batch )\n save_model(encoder, decoder, plot_losses, model_name ) ;'''\n \n losses_trend.append(np.mean(plot_losses))\n plot_losses.clear()\n\n if epoch > 1 and 0 == epoch%5 :\n save_model(encoder, decoder, losses_trend, model_name ) \n '''if epoch > 5 and 0 == epoch%5 :\n utils.showPlot(losses_trend, model_name, \"epoch\"+str(epoch) )'''\n print(\"\\n Finish Epoch %d -- model saved. \\n \"%epoch ); #!!!\n\n\ndef save_model(encoder, decoder, plot_losses, model_name ):\n stamp = str(time.time())\n savepath = utils.prepare_dir( model_name, stamp)\n torch.save(encoder.state_dict(), savepath+\"/%s.encoder\"%stamp )\n torch.save(decoder.state_dict(), savepath+\"/%s.decoder\"%stamp )\n try:\n utils.showPlot(plot_losses, model_name, stamp ) \n except:\n pass ;\n print(\" * model save with time stamp: \", stamp )\n\n\n\n\n " ]
[ [ "torch.mean", "torch.sigmoid", "torch.nn.NLLLoss", "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.nn.CrossEntropyLoss", "torch.zeros", "torch.cat", "torch.nn.utils.rnn.pad_sequence", "torch.nn.GRU", "torch.nn.Embedding", "torch.tensor", "torch.nn.Linear", "torch.nn.functional.relu", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mewiss/OpenLabeling
[ "9f41c3c8a0a5d6cfc9b939778cd4ae2ed09caa4c" ]
[ "main/dasiamrpn.py" ]
[ "\"\"\"\nAuthor : Will Stone\nDate : 190407\nDesc : Wrapper class for the DaSiamRPN tracking method. This class has the\n methods required to interface with the tracking class implemented\n in main.py within the OpenLabeling package.\n\"\"\"\nimport torch\nimport numpy as np\nimport sys\nfrom os.path import realpath, dirname, join, exists\n# set device, depending on whether cuda is available\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ntry:\n from DaSiamRPN.code.run_SiamRPN import SiamRPN_init, SiamRPN_track\nexcept ImportError:\n # check if the user has downloaded the submodules\n if not exists(join('DaSiamRPN', 'code', 'net.py')):\n print('Error: DaSiamRPN files not found. Please run the following command:')\n print('\\tgit submodule update --init')\n exit()\n else:\n # if python 3\n if sys.version_info >= (3, 0):\n sys.path.append(realpath(join('DaSiamRPN', 'code')))\n else:\n # check if __init__py files exist (otherwise create them)\n path_temp = join('DaSiamRPN', 'code', '__init__.py')\n if not exists(path_temp):\n open(path_temp, 'w').close()\n path_temp = join('DaSiamRPN', '__init__.py')\n if not exists(path_temp):\n open(path_temp, 'w').close()\n # try to import again\n from DaSiamRPN.code.run_SiamRPN import SiamRPN_init, SiamRPN_track\nfrom DaSiamRPN.code.utils import get_axis_aligned_bbox, cxy_wh_2_rect\nfrom DaSiamRPN.code.net import SiamRPNvot\n\nclass dasiamrpn(object):\n \"\"\"\n Wrapper class for incorporating DaSiamRPN into OpenLabeling\n (https://github.com/foolwood/DaSiamRPN,\n https://github.com/Cartucho/OpenLabeling)\n \"\"\"\n\n def __init__(self):\n self.net = SiamRPNvot()\n # check if SiamRPNVOT.model was already downloaded (otherwise download it now)\n model_path = join(realpath(dirname(__file__)), 'DaSiamRPN', 'code', 'SiamRPNVOT.model')\n print(model_path)\n if not exists(model_path):\n print('\\nError: module not found. Please download the pre-trained model and copy it to the directory \\'DaSiamRPN/code/\\'\\n')\n print('\\tdownload link: https://github.com/fogx/DaSiamRPN_noCUDA/blob/master/SiamRPNVOT.model')\n exit()\n \n\n\n if(torch.cuda.is_available()):\n self.net.load_state_dict(torch.load(model_path))\n else:\n self.net.load_state_dict(torch.load(model_path,map_location='cpu'))\n self.net.eval().to(device)\n \n\n def init(self, init_frame, initial_bbox):\n \"\"\"\n Initialize DaSiamRPN tracker with inital frame and bounding box.\n \"\"\"\n target_pos, target_sz = self.bbox_to_pos(initial_bbox)\n self.state = SiamRPN_init(\n init_frame, target_pos, target_sz, self.net)\n\n def update(self, next_image):\n \"\"\"\n Update bounding box position and size on next_image. Returns True\n beacuse tracking is terminated based on number of frames predicted\n in OpenLabeling, not based on feedback from tracking algorithm (unlike\n the opencv tracking algorithms).\n \"\"\"\n self.state = SiamRPN_track(self.state, next_image)\n target_pos = self.state[\"target_pos\"]\n target_sz = self.state[\"target_sz\"]\n bbox = self.pos_to_bbox(target_pos, target_sz)\n\n return True, bbox\n\n def bbox_to_pos(self, initial_bbox):\n \"\"\"\n Convert bounding box format from a tuple format containing\n xmin, ymin, width, and height to a tuple of two arrays which contain\n the x and y coordinates of the center of the box and its width and\n height respectively.\n \"\"\"\n xmin, ymin, w, h = initial_bbox\n cx = int(xmin + w/2)\n cy = int(ymin + h/2)\n target_pos = np.array([cx, cy])\n target_sz = np.array([w, h])\n\n return target_pos, target_sz\n\n def pos_to_bbox(self, target_pos, target_sz):\n \"\"\"\n Invert the bounding box format produced in the above conversion\n function.\n \"\"\"\n w = target_sz[0]\n h = target_sz[1]\n xmin = int(target_pos[0] - w/2)\n ymin = int(target_pos[1] - h/2)\n\n return xmin, ymin, w, h\n" ]
[ [ "numpy.array", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
llhthinker/slot-filling
[ "824258fb5d7f1d6ded8b20e0398c9dd415a17c3d" ]
[ "models/rnn.py" ]
[ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass SlotFilling(nn.Module):\n def __init__(self, vocab_size, label_size, mode='elman', bidirectional=False, cuda=False, is_training=True):\n \n super(SlotFilling, self).__init__()\n self.is_training = is_training\n embedding_dim = 100\n hidden_size = 75\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n \n if mode == 'lstm':\n self.rnn = nn.LSTM(input_size=embedding_dim,\n hidden_size=hidden_size,\n bidirectional=bidirectional,\n batch_first=True)\n else:\n self.rnn = RNN(input_size=embedding_dim,\n hidden_size=hidden_size,\n mode=mode,\n cuda=cuda,\n bidirectional=bidirectional,\n batch_first=True)\n if bidirectional: \n self.fc = nn.Linear(2*hidden_size, label_size)\n else:\n self.fc = nn.Linear(hidden_size, label_size)\n\n def forward(self, X):\n embed = self.embedding(X)\n embed = F.dropout(embed, p=0.2, training=self.is_training)\n outputs, _ = self.rnn(embed)\n outputs = self.fc(outputs)\n return outputs\n\n\nclass ElmanRNNCell(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(ElmanRNNCell, self).__init__()\n self.hidden_size = hidden_size\n self.i2h_fc1 = nn.Linear(input_size, hidden_size)\n self.i2h_fc2 = nn.Linear(hidden_size, hidden_size)\n self.h2o_fc = nn.Linear(hidden_size, hidden_size)\n\n def forward(self, input, hidden):\n hidden = F.sigmoid(self.i2h_fc1(input) + self.i2h_fc2(hidden))\n output = F.sigmoid(self.h2o_fc(hidden))\n return output, hidden\n\n\nclass JordanRNNCell(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(JordanRNNCell, self).__init__()\n self.hidden_size = hidden_size\n self.i2h_fc1 = nn.Linear(input_size, hidden_size) \n self.i2h_fc2 = nn.Linear(hidden_size, hidden_size)\n self.h2o_fc = nn.Linear(hidden_size, hidden_size)\n self.y_0 = nn.Parameter(nn.init.xavier_uniform(torch.Tensor(1, hidden_size)), requires_grad=True)\n\n def forward(self, input, hidden=None):\n if hidden is None:\n hidden = self.y_0\n hidden = F.sigmoid(self.i2h_fc1(input) + self.i2h_fc2(hidden))\n output = F.sigmoid(self.h2o_fc(hidden))\n return output, output\n\n\nclass HybridRNNCell(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(HybridRNNCell, self).__init__()\n self.hidden_size = hidden_size\n self.i2h_fc1 = nn.Linear(input_size, hidden_size)\n self.i2h_fc2 = nn.Linear(hidden_size, hidden_size)\n self.i2h_fc3 = nn.Linear(hidden_size, hidden_size)\n self.h2o_fc = nn.Linear(hidden_size, hidden_size)\n self.y_0 = nn.Parameter(nn.init.xavier_uniform(torch.Tensor(1, hidden_size)), requires_grad=True)\n\n def forward(self, input, hidden, output=None):\n if output is None:\n output = self.y_0 \n hidden = F.sigmoid(self.i2h_fc1(input)+self.i2h_fc2(hidden)+self.i2h_fc3(output))\n output = F.sigmoid(self.h2o_fc(hidden))\n return output, hidden\n\n\nclass RNN(nn.Module):\n\n def __init__(self, input_size, hidden_size, mode='elman', cuda=False, bidirectional=False, batch_first=True):\n super(RNN, self).__init__()\n self.mode = mode\n self.cuda = cuda\n if mode == 'elman':\n RNNCell = ElmanRNNCell\n elif mode == 'jordan':\n RNNCell = JordanRNNCell\n elif mode == 'hybrid':\n RNNCell = HybridRNNCell\n else:\n raise RuntimeError(mode + \" is not a simple rnn mode\")\n self.forward_cell = RNNCell(input_size=input_size,\n hidden_size=hidden_size)\n self.hidden_size = hidden_size\n self.bidirectional = bidirectional\n self.batch_first = batch_first\n if bidirectional:\n self.reversed_cell = RNNCell(input_size=input_size,\n hidden_size=hidden_size)\n\n def _forward(self, inputs, hidden):\n outputs = []\n seq_len = inputs.size(1)\n # batch_size*seq_len*n\n # -> seq_len*batch_size*n\n inputs = inputs.transpose(0, 1)\n # print(\"hidden size:\", hidden.size())\n output = None\n for i in range(seq_len):\n step_input = inputs[i] # batch_size*n\n if self.mode == 'hybrid':\n output, hidden = self.forward_cell(step_input, hidden, output)\n else:\n output, hidden = self.forward_cell(step_input, hidden)\n outputs.append(output)\n\n return outputs, hidden\n\n def _reversed_forward(self, inputs, hidden):\n outputs = []\n seq_len = inputs.size(1)\n # batch_size*seq_len*n\n # -> seq_len_len*batch_size*n\n inputs = inputs.transpose(0, 1)\n output = None\n for i in range(seq_len):\n step_input = inputs[seq_len-i-1] # batch_size*n\n if self.mode == 'hybrid':\n output, hidden = self.reversed_cell(step_input, hidden, output) \n else:\n output, hidden = self.reversed_cell(step_input, hidden)\n outputs.append(output)\n\n outputs.reverse()\n return outputs, hidden\n\n def forward(self, inputs, hidden=None): \n if hidden is None and self.mode != \"jordan\":\n # if hidden is None:\n batch_size = inputs.size(0)\n # print(batch_size)\n hidden = torch.autograd.Variable(torch.zeros(batch_size,\n self.hidden_size))\n if self.cuda:\n hidden = hidden.cuda()\n\n output_forward, hidden_forward = self._forward(inputs, hidden)\n output_forward = torch.stack(output_forward, dim=0)\n if not self.bidirectional:\n if self.batch_first:\n output_forward = output_forward.transpose(0,1)\n return output_forward, hidden_forward\n\n output_reversed, hidden_reversed = self._reversed_forward(inputs, hidden)\n hidden = torch.cat([hidden_forward, hidden_reversed], dim=hidden_forward.dim() - 1)\n output_reversed = torch.stack(output_reversed, dim=0)\n output = torch.cat([output_forward, output_reversed],\n dim=output_reversed.data.dim() - 1)\n if self.batch_first:\n output = output.transpose(0,1)\n return output, hidden\n\n" ]
[ [ "torch.Tensor", "torch.nn.LSTM", "torch.nn.functional.dropout", "torch.zeros", "torch.nn.Embedding", "torch.nn.Linear", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ammsa23/dials
[ "d9c6cdde2ee0abb3989596c91c80ab585e47f296" ]
[ "algorithms/indexing/ssx/analysis.py" ]
[ "from __future__ import annotations\n\nimport logging\nimport math\nfrom typing import List\n\nimport numpy as np\nfrom jinja2 import ChoiceLoader, Environment, PackageLoader\n\nfrom scitbx.array_family import flex\nfrom xfel.clustering.cluster import Cluster\nfrom xfel.clustering.cluster_groups import unit_cell_info\n\nfrom dials.algorithms.clustering import plots as cluster_plotter\nfrom dials.util import tabulate\n\nlogger = logging.getLogger(\"dials.algorithms.indexing.ssx.analysis\")\n\n\ndef generate_html_report(plots: dict, filename: str) -> None:\n loader = ChoiceLoader(\n [\n PackageLoader(\"dials\", \"templates\"),\n PackageLoader(\"dials\", \"static\", encoding=\"utf-8\"),\n ]\n )\n env = Environment(loader=loader)\n template = env.get_template(\"simple_report.html\")\n html = template.render(\n page_title=\"DIALS SSX indexing report\",\n panel_title=\"Indexing plots\",\n graphs=plots,\n )\n with open(filename, \"wb\") as f:\n f.write(html.encode(\"utf-8\", \"xmlcharrefreplace\"))\n\n\ndef make_summary_table(results_summary: dict) -> tabulate:\n # make a summary table\n overall_summary_header = [\n \"Image\",\n \"expt_id\",\n \"n_indexed\",\n \"RMSD X\",\n \"RMSD Y\",\n \"RMSD dPsi\",\n ]\n\n rows = []\n total = 0\n if any(len(v) > 1 for v in results_summary.values()):\n show_lattices = True\n overall_summary_header.insert(1, \"lattice\")\n else:\n show_lattices = False\n for k in sorted(results_summary.keys()):\n for j, cryst in enumerate(results_summary[k]):\n if not cryst[\"n_indexed\"]:\n continue\n n_idx, n_strong = (cryst[\"n_indexed\"], cryst[\"n_strong\"])\n frac_idx = f\"{n_idx}/{n_strong} ({100*n_idx/n_strong:2.1f}%)\"\n row = [\n cryst[\"Image\"],\n str(total),\n frac_idx,\n cryst[\"RMSD_X\"],\n cryst[\"RMSD_Y\"],\n cryst[\"RMSD_dPsi\"],\n ]\n if show_lattices:\n row.insert(1, j + 1)\n rows.append(row)\n total += 1\n\n summary_table = tabulate(rows, overall_summary_header)\n return summary_table\n\n\ndef combine_results_dicts(results_summaries: List[dict]) -> dict:\n \"\"\"For a list of dictionaries, each with keys 0..n-1,\n combine into a single dictionary with keys 0..ntot-1\"\"\"\n combined_summary = {}\n n_overall = 0\n for d in results_summaries:\n n_this = len(d)\n for i in range(n_this):\n combined_summary[i + n_overall] = d.pop(i)\n n_overall += n_this\n return combined_summary\n\n\ndef make_cluster_plots(large_clusters: List[Cluster]) -> dict:\n cluster_plots = {}\n for n, cluster in enumerate(large_clusters):\n uc_params = [flex.double() for i in range(6)]\n for c in cluster.members:\n ucp = c.crystal_symmetry.unit_cell().parameters()\n for i in range(6):\n uc_params[i].append(ucp[i])\n d_this = cluster_plotter.plot_uc_histograms(uc_params)\n d_this[\"uc_scatter\"][\"layout\"][\"title\"] += f\" cluster {n+1}\"\n d_this[\"uc_hist\"][\"layout\"][\"title\"] += f\" cluster {n+1}\"\n d_this[f\"uc_scatter_{n}\"] = d_this.pop(\"uc_scatter\")\n d_this[f\"uc_hist_{n}\"] = d_this.pop(\"uc_hist\")\n cluster_plots.update(d_this)\n return cluster_plots\n\n\ndef report_on_crystal_clusters(crystal_symmetries, make_plots=True):\n ucs = Cluster.from_crystal_symmetries(crystal_symmetries)\n clusters, _ = ucs.ab_cluster(5000, log=None, write_file_lists=False, doplot=False)\n cluster_plots = {}\n min_cluster_pc = 5\n threshold = math.floor((min_cluster_pc / 100) * len(crystal_symmetries))\n large_clusters = [c for c in clusters if len(c.members) > threshold]\n large_clusters.sort(key=lambda x: len(x.members), reverse=True)\n\n if large_clusters:\n logger.info(\n f\"\"\"\nUnit cell clustering analysis, clusters with >{min_cluster_pc}% of the number of crystals indexed\n{unit_cell_info(large_clusters)}\n\"\"\"\n )\n if make_plots:\n cluster_plots = make_cluster_plots(large_clusters)\n else:\n logger.info(\n f\"No clusters found with >{min_cluster_pc}% of the number of crystals.\"\n )\n return cluster_plots, large_clusters\n\n\ndef generate_plots(summary_data: dict) -> dict:\n \"\"\"Generate indexing plots from the summary data from index_all_concurrent\"\"\"\n # n_indexed_arrays are cumulative n_indexed for nth lattice\n n_indexed_arrays = [np.zeros(len(summary_data))]\n rmsd_x_arrays = [np.zeros(len(summary_data))]\n rmsd_y_arrays = [np.zeros(len(summary_data))]\n rmsd_z_arrays = [np.zeros(len(summary_data))]\n n_total_indexed = np.zeros(len(summary_data))\n n_strong_array = np.zeros(len(summary_data))\n images = np.arange(1, len(summary_data) + 1)\n n_lattices = 1\n\n for k in sorted(summary_data.keys()):\n n_lattices_this = len(summary_data[k])\n n_strong_array[k] = summary_data[k][0][\"n_strong\"]\n for j, cryst in enumerate(summary_data[k]):\n if not cryst[\"n_indexed\"]:\n continue\n if n_lattices_this > n_lattices:\n for _ in range(n_lattices_this - n_lattices):\n n_indexed_arrays.append(np.zeros(len(summary_data)))\n rmsd_x_arrays.append(np.zeros(len(summary_data)))\n rmsd_y_arrays.append(np.zeros(len(summary_data)))\n rmsd_z_arrays.append(np.zeros(len(summary_data)))\n n_lattices = n_lattices_this\n n_indexed_arrays[j][k] = cryst[\"n_indexed\"]\n rmsd_x_arrays[j][k] = cryst[\"RMSD_X\"]\n rmsd_y_arrays[j][k] = cryst[\"RMSD_Y\"]\n rmsd_z_arrays[j][k] = cryst[\"RMSD_dPsi\"]\n n_total_indexed[k] += cryst[\"n_indexed\"]\n\n n_indexed_data = [\n {\n \"x\": images.tolist(),\n \"y\": n_indexed_arrays[0].tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": \"N indexed\",\n },\n ]\n rmsd_data = [\n {\n \"x\": images[rmsd_x_arrays[0] > 0].tolist(),\n \"y\": rmsd_x_arrays[0][rmsd_x_arrays[0] > 0].tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": \"RMSD X\",\n },\n {\n \"x\": images[rmsd_y_arrays[0] > 0].tolist(),\n \"y\": rmsd_y_arrays[0][rmsd_y_arrays[0] > 0].tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": \"RMSD Y\",\n },\n ]\n rmsdz_data = [\n {\n \"x\": images[rmsd_z_arrays[0] > 0].tolist(),\n \"y\": rmsd_z_arrays[0][rmsd_z_arrays[0] > 0].tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": \"RMSD dPsi\",\n },\n ]\n if n_lattices > 1:\n n_indexed_data[0][\"name\"] += \" (lattice 1)\"\n rmsd_data[0][\"name\"] += \" (lattice 1)\"\n rmsd_data[1][\"name\"] += \" (lattice 1)\"\n rmsdz_data[0][\"name\"] += \" (lattice 1)\"\n for i, arr in enumerate(n_indexed_arrays[1:]):\n sub_images = images[arr > 0]\n sub_data = arr[arr > 0]\n n_indexed_data.append(\n {\n \"x\": sub_images.tolist(),\n \"y\": sub_data.tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": f\"N indexed (lattice {i+2})\",\n }\n )\n for i, arr in enumerate(rmsd_x_arrays[1:]):\n sub_images = images[arr > 0]\n sub_data_x = arr[arr > 0]\n sub_data_y = rmsd_y_arrays[i + 1][arr > 0]\n rmsd_data.append(\n {\n \"x\": sub_images.tolist(),\n \"y\": sub_data_x.tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": f\"RMSD X (lattice {i+2})\",\n },\n )\n rmsd_data.append(\n {\n \"x\": sub_images.tolist(),\n \"y\": sub_data_y.tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": f\"RMSD Y (lattice {i+2})\",\n },\n )\n for i, arr in enumerate(rmsd_z_arrays[1:]):\n sub_images = images[arr > 0]\n sub_data = arr[arr > 0]\n rmsdz_data.append(\n {\n \"x\": sub_images.tolist(),\n \"y\": sub_data.tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": f\"RMSD dPsi (lattice {i+2})\",\n },\n )\n percent_indexed = 100 * n_total_indexed / n_strong_array\n images = images.tolist()\n n_indexed_data.append(\n {\n \"x\": images,\n \"y\": n_strong_array.tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": \"N strong\",\n },\n )\n\n percent_bins = np.linspace(0, 100, 51)\n percent_hist = np.histogram(percent_indexed, percent_bins)[0]\n\n def _generate_hist_data(rmsd_arrays, step=0.01):\n all_rmsd = np.concatenate(rmsd_arrays)\n all_rmsd = all_rmsd[all_rmsd > 0]\n mult = int(1 / 0.01)\n start = math.floor(np.min(all_rmsd) * mult) / mult\n stop = math.ceil(np.max(all_rmsd) * mult) / mult\n nbins = int((stop - start) / step)\n hist, bin_edges = np.histogram(\n all_rmsd,\n bins=nbins,\n range=(start, stop),\n )\n bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2\n return hist, bin_centers\n\n hist_x, bin_centers_x = _generate_hist_data(rmsd_x_arrays)\n hist_y, bin_centers_y = _generate_hist_data(rmsd_y_arrays)\n hist_z, bin_centers_z = _generate_hist_data(rmsd_z_arrays, 0.001)\n\n plots = {\n \"n_indexed\": {\n \"data\": n_indexed_data,\n \"layout\": {\n \"title\": \"Number of indexed reflections per image\",\n \"xaxis\": {\"title\": \"image number\"},\n \"yaxis\": {\"title\": \"N reflections\"},\n },\n },\n \"percent_indexed\": {\n \"data\": [\n {\n \"x\": images,\n \"y\": percent_indexed.tolist(),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"name\": \"Percentage of strong spots indexed\",\n }\n ],\n \"layout\": {\n \"title\": \"Percentage of strong spots indexed per image\",\n \"xaxis\": {\"title\": \"image number\"},\n \"yaxis\": {\"title\": \"Percentage\"},\n },\n },\n \"percent_indexed_hist\": {\n \"data\": [\n {\n \"x\": percent_bins.tolist(),\n \"y\": percent_hist.tolist(),\n \"type\": \"bar\",\n }\n ],\n \"layout\": {\n \"title\": \"Distribution of percentage indexed\",\n \"xaxis\": {\"title\": \"Percentage indexed\"},\n \"yaxis\": {\"title\": \"Number of images\"},\n \"bargap\": 0,\n },\n },\n \"rmsds\": {\n \"data\": rmsd_data,\n \"layout\": {\n \"title\": \"RMSDs (x, y) per image\",\n \"xaxis\": {\"title\": \"image number\"},\n \"yaxis\": {\"title\": \"RMSD (px)\"},\n },\n },\n \"rmsdz\": {\n \"data\": rmsdz_data,\n \"layout\": {\n \"title\": \"RMSD (dPsi) per image\",\n \"xaxis\": {\"title\": \"image number\"},\n \"yaxis\": {\"title\": \"RMSD dPsi (deg)\"},\n },\n },\n \"rmsdxy_hist\": {\n \"data\": [\n {\n \"x\": bin_centers_x.tolist(),\n \"y\": hist_x.tolist(),\n \"type\": \"bar\",\n \"name\": \"RMSD X\",\n \"opacity\": 0.6,\n },\n {\n \"x\": bin_centers_y.tolist(),\n \"y\": hist_y.tolist(),\n \"type\": \"bar\",\n \"name\": \"RMSD Y\",\n \"opacity\": 0.6,\n },\n ],\n \"layout\": {\n \"title\": \"Distribution of RMSDs (x, y)\",\n \"xaxis\": {\"title\": \"RMSD (px)\"},\n \"yaxis\": {\"title\": \"Number of images\"},\n \"bargap\": 0,\n \"barmode\": \"overlay\",\n },\n },\n \"rmsdz_hist\": {\n \"data\": [\n {\n \"x\": bin_centers_z.tolist(),\n \"y\": hist_z.tolist(),\n \"type\": \"bar\",\n \"name\": \"RMSD dPsi\",\n },\n ],\n \"layout\": {\n \"title\": \"Distribution of RMSDs (dPsi)\",\n \"xaxis\": {\"title\": \"RMSD dPsi (deg)\"},\n \"yaxis\": {\"title\": \"Number of images\"},\n \"bargap\": 0,\n },\n },\n }\n return plots\n" ]
[ [ "numpy.linspace", "numpy.min", "numpy.concatenate", "numpy.max", "numpy.diff", "numpy.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rodrigodelazcano/rlcard
[ "963cf6886dfaf5f089e9c8d0039a1dbff87aca6d" ]
[ "rlcard/agents/dqn_agent.py" ]
[ "''' DQN agent\n\nThe code is derived from https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/dqn.py\n\nCopyright (c) 2019 Matthew Judell\nCopyright (c) 2019 DATA Lab at Texas A&M University\nCopyright (c) 2016 Denny Britz\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import namedtuple\nfrom copy import deepcopy\n\nfrom rlcard.utils.utils import remove_illegal\n\nTransition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'legal_actions', 'done'])\n\n\nclass DQNAgent(object):\n '''\n Approximate clone of rlcard.agents.dqn_agent.DQNAgent\n that depends on PyTorch instead of Tensorflow\n '''\n def __init__(self,\n replay_memory_size=20000,\n replay_memory_init_size=100,\n update_target_estimator_every=1000,\n discount_factor=0.99,\n epsilon_start=1.0,\n epsilon_end=0.1,\n epsilon_decay_steps=20000,\n batch_size=32,\n num_actions=2,\n state_shape=None,\n train_every=1,\n mlp_layers=None,\n learning_rate=0.00005,\n device=None):\n\n '''\n Q-Learning algorithm for off-policy TD control using Function Approximation.\n Finds the optimal greedy policy while following an epsilon-greedy policy.\n\n Args:\n replay_memory_size (int): Size of the replay memory\n replay_memory_init_size (int): Number of random experiences to sample when initializing\n the reply memory.\n update_target_estimator_every (int): Copy parameters from the Q estimator to the\n target estimator every N steps\n discount_factor (float): Gamma discount factor\n epsilon_start (float): Chance to sample a random action when taking an action.\n Epsilon is decayed over time and this is the start value\n epsilon_end (float): The final minimum value of epsilon after decaying is done\n epsilon_decay_steps (int): Number of steps to decay epsilon over\n batch_size (int): Size of batches to sample from the replay memory\n evaluate_every (int): Evaluate every N steps\n num_actions (int): The number of the actions\n state_space (list): The space of the state vector\n train_every (int): Train the network every X steps.\n mlp_layers (list): The layer number and the dimension of each layer in MLP\n learning_rate (float): The learning rate of the DQN agent.\n device (torch.device): whether to use the cpu or gpu\n '''\n self.use_raw = False\n self.replay_memory_init_size = replay_memory_init_size\n self.update_target_estimator_every = update_target_estimator_every\n self.discount_factor = discount_factor\n self.epsilon_decay_steps = epsilon_decay_steps\n self.batch_size = batch_size\n self.num_actions = num_actions\n self.train_every = train_every\n\n # Torch device\n if device is None:\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n else:\n self.device = device\n\n # Total timesteps\n self.total_t = 0\n\n # Total training step\n self.train_t = 0\n\n # The epsilon decay scheduler\n self.epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)\n\n # Create estimators\n self.q_estimator = Estimator(num_actions=num_actions, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n self.target_estimator = Estimator(num_actions=num_actions, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n\n # Create replay memory\n self.memory = Memory(replay_memory_size, batch_size)\n\n def feed(self, ts):\n ''' Store data in to replay buffer and train the agent. There are two stages.\n In stage 1, populate the memory without training\n In stage 2, train the agent every several timesteps\n\n Args:\n ts (list): a list of 5 elements that represent the transition\n '''\n (state, action, reward, next_state, done) = tuple(ts)\n self.feed_memory(state['obs'], action, reward, next_state['obs'], list(state['legal_actions'].keys()), done)\n self.total_t += 1\n tmp = self.total_t - self.replay_memory_init_size\n if tmp>=0 and tmp%self.train_every == 0:\n self.train()\n\n def step(self, state):\n ''' Predict the action for genrating training data but\n have the predictions disconnected from the computation graph\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n '''\n q_values = self.predict(state)\n epsilon = self.epsilons[min(self.total_t, self.epsilon_decay_steps-1)]\n legal_actions = list(state['legal_actions'].keys())\n probs = np.ones(len(legal_actions), dtype=float) * epsilon / len(legal_actions)\n best_action_idx = legal_actions.index(np.argmax(q_values))\n probs[best_action_idx] += (1.0 - epsilon)\n action_idx = np.random.choice(np.arange(len(probs)), p=probs)\n\n return legal_actions[action_idx]\n\n def eval_step(self, state):\n ''' Predict the action for evaluation purpose.\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n info (dict): A dictionary containing information\n '''\n q_values = self.predict(state)\n best_action = np.argmax(q_values)\n\n info = {}\n info['values'] = {state['raw_legal_actions'][i]: float(q_values[list(state['legal_actions'].keys())[i]]) for i in range(len(state['legal_actions']))}\n\n return best_action, info\n\n def predict(self, state):\n ''' Predict the masked Q-values\n\n Args:\n state (numpy.array): current state\n\n Returns:\n q_values (numpy.array): a 1-d array where each entry represents a Q value\n '''\n \n q_values = self.q_estimator.predict_nograd(np.expand_dims(state['obs'], 0))[0]\n masked_q_values = -np.inf * np.ones(self.num_actions, dtype=float)\n legal_actions = list(state['legal_actions'].keys())\n masked_q_values[legal_actions] = q_values[legal_actions]\n\n return masked_q_values\n\n def train(self):\n ''' Train the network\n\n Returns:\n loss (float): The loss of the current batch.\n '''\n state_batch, action_batch, reward_batch, next_state_batch, legal_actions_batch, done_batch = self.memory.sample()\n\n # Calculate best next actions using Q-network (Double DQN)\n q_values_next = self.q_estimator.predict_nograd(next_state_batch)\n legal_actions = []\n for b in range(self.batch_size):\n legal_actions.extend([i + b * self.num_actions for i in legal_actions_batch[b]])\n masked_q_values = -np.inf * np.ones(self.num_actions * self.batch_size, dtype=float)\n masked_q_values[legal_actions] = q_values_next.flatten()[legal_actions]\n masked_q_values = masked_q_values.reshape((self.batch_size, self.num_actions))\n best_actions = np.argmax(masked_q_values, axis=1)\n\n # Evaluate best next actions using Target-network (Double DQN)\n q_values_next_target = self.target_estimator.predict_nograd(next_state_batch)\n target_batch = reward_batch + np.invert(done_batch).astype(np.float32) * \\\n self.discount_factor * q_values_next_target[np.arange(self.batch_size), best_actions]\n\n # Perform gradient descent update\n state_batch = np.array(state_batch)\n\n loss = self.q_estimator.update(state_batch, action_batch, target_batch)\n print('\\rINFO - Step {}, rl-loss: {}'.format(self.total_t, loss), end='')\n\n # Update the target estimator\n if self.train_t % self.update_target_estimator_every == 0:\n self.target_estimator = deepcopy(self.q_estimator)\n print(\"\\nINFO - Copied model parameters to target network.\")\n\n self.train_t += 1\n\n def feed_memory(self, state, action, reward, next_state, legal_actions, done):\n ''' Feed transition to memory\n\n Args:\n state (numpy.array): the current state\n action (int): the performed action ID\n reward (float): the reward received\n next_state (numpy.array): the next state after performing the action\n legal_actions (list): the legal actions of the next state\n done (boolean): whether the episode is finished\n '''\n self.memory.save(state, action, reward, next_state, legal_actions, done)\n\n def set_device(self, device):\n self.device = device\n self.q_estimator.device = device\n self.target_estimator.device = device\n\nclass Estimator(object):\n '''\n Approximate clone of rlcard.agents.dqn_agent.Estimator that\n uses PyTorch instead of Tensorflow. All methods input/output np.ndarray.\n\n Q-Value Estimator neural network.\n This network is used for both the Q-Network and the Target Network.\n '''\n\n def __init__(self, num_actions=2, learning_rate=0.001, state_shape=None, mlp_layers=None, device=None):\n ''' Initilalize an Estimator object.\n\n Args:\n num_actions (int): the number output actions\n state_shape (list): the shape of the state space\n mlp_layers (list): size of outputs of mlp layers\n device (torch.device): whether to use cpu or gpu\n '''\n self.num_actions = num_actions\n self.learning_rate=learning_rate\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n self.device = device\n\n # set up Q model and place it in eval mode\n qnet = EstimatorNetwork(num_actions, state_shape, mlp_layers)\n qnet = qnet.to(self.device)\n self.qnet = qnet\n self.qnet.eval()\n\n # initialize the weights using Xavier init\n for p in self.qnet.parameters():\n if len(p.data.shape) > 1:\n nn.init.xavier_uniform_(p.data)\n\n # set up loss function\n self.mse_loss = nn.MSELoss(reduction='mean')\n\n # set up optimizer\n self.optimizer = torch.optim.Adam(self.qnet.parameters(), lr=self.learning_rate)\n\n def predict_nograd(self, s):\n ''' Predicts action values, but prediction is not included\n in the computation graph. It is used to predict optimal next\n actions in the Double-DQN algorithm.\n\n Args:\n s (np.ndarray): (batch, state_len)\n\n Returns:\n np.ndarray of shape (batch_size, NUM_VALID_ACTIONS) containing the estimated\n action values.\n '''\n with torch.no_grad():\n s = torch.from_numpy(s).float().to(self.device)\n q_as = self.qnet(s).cpu().numpy()\n return q_as\n\n def update(self, s, a, y):\n ''' Updates the estimator towards the given targets.\n In this case y is the target-network estimated\n value of the Q-network optimal actions, which\n is labeled y in Algorithm 1 of Minh et al. (2015)\n\n Args:\n s (np.ndarray): (batch, state_shape) state representation\n a (np.ndarray): (batch,) integer sampled actions\n y (np.ndarray): (batch,) value of optimal actions according to Q-target\n\n Returns:\n The calculated loss on the batch.\n '''\n self.optimizer.zero_grad()\n\n self.qnet.train()\n\n s = torch.from_numpy(s).float().to(self.device)\n a = torch.from_numpy(a).long().to(self.device)\n y = torch.from_numpy(y).float().to(self.device)\n\n # (batch, state_shape) -> (batch, num_actions)\n q_as = self.qnet(s)\n\n # (batch, num_actions) -> (batch, )\n Q = torch.gather(q_as, dim=-1, index=a.unsqueeze(-1)).squeeze(-1)\n\n # update model\n batch_loss = self.mse_loss(Q, y)\n batch_loss.backward()\n self.optimizer.step()\n batch_loss = batch_loss.item()\n\n self.qnet.eval()\n\n return batch_loss\n\n\nclass EstimatorNetwork(nn.Module):\n ''' The function approximation network for Estimator\n It is just a series of tanh layers. All in/out are torch.tensor\n '''\n\n def __init__(self, num_actions=2, state_shape=None, mlp_layers=None):\n ''' Initialize the Q network\n\n Args:\n num_actions (int): number of legal actions\n state_shape (list): shape of state tensor\n mlp_layers (list): output size of each fc layer\n '''\n super(EstimatorNetwork, self).__init__()\n\n self.num_actions = num_actions\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n\n # build the Q network\n layer_dims = [np.prod(self.state_shape)] + self.mlp_layers\n fc = [nn.Flatten()]\n fc.append(nn.BatchNorm1d(layer_dims[0]))\n for i in range(len(layer_dims)-1):\n fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))\n fc.append(nn.Tanh())\n fc.append(nn.Linear(layer_dims[-1], self.num_actions, bias=True))\n self.fc_layers = nn.Sequential(*fc)\n\n def forward(self, s):\n ''' Predict action values\n\n Args:\n s (Tensor): (batch, state_shape)\n '''\n return self.fc_layers(s)\n\nclass Memory(object):\n ''' Memory for saving transitions\n '''\n\n def __init__(self, memory_size, batch_size):\n ''' Initialize\n Args:\n memory_size (int): the size of the memroy buffer\n '''\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.memory = []\n\n def save(self, state, action, reward, next_state, legal_actions, done):\n ''' Save transition into memory\n\n Args:\n state (numpy.array): the current state\n action (int): the performed action ID\n reward (float): the reward received\n next_state (numpy.array): the next state after performing the action\n legal_actions (list): the legal actions of the next state\n done (boolean): whether the episode is finished\n '''\n if len(self.memory) == self.memory_size:\n self.memory.pop(0)\n transition = Transition(state, action, reward, next_state, legal_actions, done)\n self.memory.append(transition)\n\n def sample(self):\n ''' Sample a minibatch from the replay memory\n\n Returns:\n state_batch (list): a batch of states\n action_batch (list): a batch of actions\n reward_batch (list): a batch of rewards\n next_state_batch (list): a batch of states\n done_batch (list): a batch of dones\n '''\n samples = random.sample(self.memory, self.batch_size)\n return map(np.array, zip(*samples))\n\ndef copy_model_parameters(sess, estimator1, estimator2):\n ''' Copys the model parameters of one estimator to another.\n\n Args:\n sess (tf.Session): Tensorflow Session object\n estimator1 (Estimator): Estimator to copy the paramters from\n estimator2 (Estimator): Estimator to copy the parameters to\n '''\n e1_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator1.scope)]\n e1_params = sorted(e1_params, key=lambda v: v.name)\n e2_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator2.scope)]\n e2_params = sorted(e2_params, key=lambda v: v.name)\n\n update_ops = []\n for e1_v, e2_v in zip(e1_params, e2_params):\n op = e2_v.assign(e1_v)\n update_ops.append(op)\n\n sess.run(update_ops)\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.BatchNorm1d", "numpy.expand_dims", "numpy.linspace", "numpy.invert", "numpy.arange", "torch.nn.Flatten", "torch.from_numpy", "numpy.ones", "torch.nn.Tanh", "torch.nn.Linear", "numpy.argmax", "torch.no_grad", "numpy.prod", "torch.nn.init.xavier_uniform_", "torch.cuda.is_available", "numpy.array", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ccpcode/low-order-reactor
[ "57c33e00e8e4e5f612dd375448df3fbcb635f4a9" ]
[ "cstr.py" ]
[ "\"\"\"\nReactor model for one or more CSTR reactors in series at steady-state\nconditions. Chemistry in each reactor based on Liden 1988 kinetic scheme for\nbiomass fast pyrolysis in a bubbling fluidized bed reactor.\n\nFirst-order reactions from Liden 1998 kinetics:\nwood --k1--> tar --k2--> gas\nwood --k3--> (gas+char)\nwhere rate constants k are in 1/s\nk = k1 + k3 = 10^13 * exp(-183.3*10^3 / RT)\nk2 = 4.28*10^6 * exp(-107.5*10^3 / RT)\n\nTest rxns- Based on Liden's (1988) kinetics\nR1: W => t1*T (wood to tar), k1 = rate coeff. (1/s)\nR2: T => g2*G (tar to gas), k2 = rate coeff. (1/s)\nR3: W => c3*C + g3*G (wood to char + gas), k3 = rate coeff. (1/s)\n\nStagewise mass balances for each species:\ndyW(i)/dt = -(k1+k3)*yW(i)+yW(i-1)/tau-yW(i)/tau => (Wood)\ndyT(i)/dt = t1*k1*yW(i)-k2*yT(i)+yT(i-1)/tau-yT(i)/tau (Tar)\ndyG(i)/dt = g2*k2*yT(i)+g3*k3*yW(i)+yG(i-1)/tau-yG(i)/tau (Gas)\ndyC(i)/dt = c3*k3*yW(i)+yC(i-1)/tau-yC(i)/tau (Carbonized char)\n\nExplicit s.s. solution to mass balances if done in proper sequence\nGeneral pattern yi = (i inflow + i gen rate*tau)/(1+i sink ks*tau)\nyW = (1 + 0*tau)/(1+(k1+k3)*tau)\nyT = (0 + t1*k1*yW*tau)/(1+k2*tau)\nyG = (0 + g2*k2*yT+g3*k3*yW*tau)/(1+0)\nyC = (0 + c3*k3*yW*tau)/(1+0)\n\nReference:\nLiden, Berruti, Scott, 1988. Chem. Eng. Comm., 65, pp 207-221.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as py\n\n# Parameters\nTK = 773 # reaction temperature, K\ntaus = 3 # total solids residence time, s\ntaug = 1 # total gas residence time, s\nyfw = 1.0 # normalized wood feed\n\n# Number of stages and residence time in each stage\nnstages = 10 # No. of CSTR stages\ntsn = taus/nstages # Solids residence time in each stage (s)\ntgn = taug/nstages # Gas residence time in each stage (s)\n\n# Kinetics parameters\nphi = 0.703 # Max tar yield fraction\nFC = 0.14 # Wt. fraction fixed C\nt1 = 1 # Tar mass formed/mass wood converted in rxn. 1\ng2 = 1 # Gas mass formed/mass tar converted in rxn. 2\nc3 = FC/(1-phi) # Char mass formed/mass wood converted in rxn. 3\ng3 = 1-c3 # Gas mass formed/mass wood converted in rxn. 3\nRgas = 8.314 # Ideal gas constant (J/mole K)\n\nk2 = 4.28e6*np.exp(-107.5e3/Rgas/TK) # Rxn. 2 rate coeff. (1/s)\nk = 1e13*np.exp(-183.3e3/Rgas/TK) # Sum of rxn. 1 & 3 rate coefficients (1/s)\nk1 = phi*k # Rxn 1 rate constant (1/s)\nk3 = (1-phi)*k # Rxn. 3 rate constant (1/s)\n\n# Set up species solution vectors\nyW = yfw*np.ones(nstages) # Unconverted wood (normalized to feed)\nyT = np.zeros(nstages) # Tar (noramlized to feed)\nyG = np.zeros(nstages) # Light gases (normalized to feed)\nyC = np.zeros(nstages) # Char (normalized to feed)\nyCW = np.zeros(nstages) # Char + wood (normalized to feed)\n\n# Mass balance for stage 1\nyW[1] = yfw/(1+k*tsn) # Wood in exit\nyT[1] = t1*k1*yW[1]*tsn/(1+k2*tgn) # Tar in exit\nyG[1] = g2*k2*yT[1]*tgn+g3*k3*yW[1]*tsn # Gas in exit\nyC[1] = c3*k3*yW[1]*tsn # Carbonized char in exit\nyCW[1] = yW[1]+yC[1] # Total carbonized char + uncoverted wood\n\n# Mass balances for remaining stages\nfor i in range(2, nstages):\n yW[i] = yW[i-1]/(1+k*tsn) # Wood in exit of stage i\n yT[i] = (yT[i-1]+t1*k1*yW[i]*tsn)/(1+k2*tgn) # Tar in exit of stage i\n yG[i] = yG[i-1]+g2*k2*yT[i]*tgn+g3*k3*yW[i]*tsn # Gas in exit\n yC[i] = yC[i-1]+c3*k3*yW[i]*tsn # Carbonized char in exit\n yCW[i] = yC[i]+yW[i] # Total wood + carbonized char\n\n# Check overall mass balances for explicit solution\nmout = yW+yT+yG+yC # Total mass out\nmratio = mout/yfw # Ratio total mass out/total mass in\n\n# Plot Results\n# -----------------------------------------------------------------------------\n\nns = range(nstages) # list for numbers of stages\n\ndef despine():\n ax = py.gca()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n py.tick_params(bottom='off', top='off', left='off', right='off')\n\npy.ion()\npy.close('all')\n\npy.figure(1)\npy.plot(yT, lw=2, label='tar')\npy.xlabel('Reactor Height (stage number)')\npy.ylabel('Tar Yield (wt. fraction)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n\npy.figure(2, figsize=(5, 8))\npy.plot(yT, ns, lw=2, label='tar')\npy.ylabel('Reactor Height (stage number)')\npy.xlabel('Tar Yield (wt. fraction)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n\npy.figure(3)\npy.plot(yW, 'g', lw=2, label='wood')\npy.plot(yT, 'b', lw=2, label='tar')\npy.plot(yG, 'r', lw=2, label='gas')\npy.plot(yC, 'c', lw=2, label='char')\npy.xlabel('Reactor Height (stage number)')\npy.ylabel('Product Yield (wt. fraction)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.tick_params", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.pyplot.grid", "numpy.exp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ion", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
haberdashPI/nu_wright_lab_util
[ "6ed933cb94a4ed11e8dcbcf253fbc8ce8bb651ef" ]
[ "src/regress.py" ]
[ "import os\nimport patsy\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport pystan\n\nimport blmm\nfrom sample_stats import *\n\nlinear_model = blmm.load_model('linear',use_package_cache=True)\nrobit_model = blmm.load_model('robit',use_package_cache=True)\nrobit2_model = blmm.load_model('robit2',use_package_cache=True)\n\n\ndef linear(formula,df,coef_prior=10,error_prior=100,cache_file=None,\n **sample_kws):\n y,A = patsy.dmatrices(formula,df,return_type='dataframe',eval_env=1)\n y = np.squeeze(y)\n\n if cache_file is None or not os.path.isfile(cache_file):\n fit = linear_model.sampling({'y': y, 'A': A, 'n': A.shape[0],\n 'k': A.shape[1],\n 'fixed_mean_prior': coef_prior,\n 'prediction_error_prior': error_prior},\n **sample_kws)\n if cache_file:\n blmm.write_samples(fit.extract(),cache_file)\n else:\n fit = blmm.read_samples(cache_file)\n\n return Linear(fit,y,A,df)\n\n\nclass BaseRegressResults(object):\n def __init__(self,fit,y,A,df):\n self.fit = fit\n self.y = y\n self.A = A\n self.df = df\n\n def cache(self,cache_file):\n if issubclass(self.fit,pystan.StanFit4Model):\n blmm.write_samples(self.fit.extract(),cache_file)\n else: blmm.write_samples(fit,cache_file)\n\n def summary(self,coefs=None):\n if coefs is None:\n return coef_table(self.fit['alpha'],self.A.columns)\n else:\n return coef_table(self.fit['alpha'][:,coefs],self.A.columns[coefs])\n\n def R2(self):\n R2 = (1-np.divide(np.sum((self.y[:,np.newaxis] - self.predict())**2,axis=0),\n np.sum((self.y - self.y.mean())**2))[:,np.newaxis])\n return R2\n\n def contrasts(self,coefs=None,correct=True):\n if coefs is None:\n return contrast_table(self.fit['alpha'],self.A.columns,correct=correct)\n else:\n return contrast_table(self.fit['alpha'][:,coefs],self.A.columns[coefs],\n correct=correct)\n\n def predict(self,df=None,use_dataframe=False):\n if df is None:\n Y = self._predict_helper(self.A)\n df = self.df\n if df is not None:\n A = patsy.dmatrix(self.A.design_info,df)\n Y = self._predict_helper(A)\n\n if use_dataframe:\n dfp = df.copy()\n dfp = dfp.iloc[np.repeat(np.arange(Y.shape[0]),Y.shape[1]),:]\n dfp['sample'] = np.tile(np.arange(Y.shape[1]),Y.shape[0])\n dfp['y'] = np.reshape(Y,Y.shape[0]*Y.shape[1])\n return dfp\n else:\n return Y\n\n def linear_tests(self,names,X,coefs=None,rhs=0,correct=True):\n if coefs is None: coefs = slice(0,self.fit['alpha'].shape[1])\n tests = np.dot(self.fit['alpha'][:,coefs],X.T) - rhs\n table = coef_table(tests,names)\n if correct:\n return mcorrect(pd.DataFrame(tests),table)\n else:\n return table\n\n def validate(self,stats=default_stats,N=1000):\n p = self.predict()\n tests = ppp(self.y,p.T,self.error_fn(),stats,N=N)\n\n g = tests.groupby('type')\n summary = g.mean()\n summary['fakeSE'] = g.fake.std()\n summary['realSE'] = g.real.std()\n summary['p_val'] = g.apply(lambda d: p_value(d.real - d.fake))\n return summary\n\n def WAIC(self):\n log_post = self.log_posterior(self.y)\n p_waic = np.std(log_post,axis=1,ddof=1)\n lpd = scipy.misc.logsumexp(log_post,axis=1) - np.log(log_post.shape[1])\n\n waic = -2*np.sum(lpd - p_waic)\n sd = 2*np.sqrt(lpd.shape[0] * np.std(lpd - p_waic))\n return waic,sd,np.mean(p_waic)\n\n\nclass Linear(BaseRegressResults):\n def _predict_helper(self,A):\n if len(self.fit['alpha'].shape) == 1:\n p = np.einsum('ij,kj->ik',A,self.fit['alpha'][:,np.newaxis])\n else:\n p = np.einsum('ij,kj->ik',A,self.fit['alpha'])\n return p\n\n def log_posterior(self,y):\n eps = self.fit['eps'][np.newaxis,:]\n y = y[:,np.newaxis]\n p = self.predict()\n\n return scipy.stats.norm.pdf(y,p,eps)\n\n def error_fn(self):\n error = self.fit['eps']\n\n def fn(y_hat,indices,error=error):\n error = error[indices,np.newaxis]\n return np.random.normal(scale=error,size=(error.shape[0],y_hat.shape[1]))\n\n return fn\n\n\ndef robit(formula,df,coef_prior=5,error_prior=100,r=1e-10,cache_file=None,\n **sample_kws):\n\n y,A = patsy.dmatrices(formula,df,return_type='dataframe',eval_env=1)\n y = np.squeeze(y)\n\n if cache_file is None or not os.path.isfile(cache_file):\n fit = robit_model.sampling({'y': y, 'A': A, 'n': A.shape[0],\n 'k': A.shape[1], 'fixed_mean_prior': coef_prior,\n 'r': r,\n 'prediction_error_prior': error_prior},\n **sample_kws)\n if cache_file:\n blmm.write_samples(fit.extract(),cache_file)\n else:\n fit = blmm.read_samples(cache_file)\n\n return RobustLogit(r,fit,y,A,df)\n\n\nclass RobustLogit(BaseRegressResults):\n def __init__(self,r,*params):\n super(RobustLogit,self).__init__(*params)\n self.r = r\n\n def _predict_helper(self,A):\n if len(self.fit['alpha'].shape) == 1:\n p = np.einsum('ij,kj->ik',A,self.fit['alpha'][:,np.newaxis])\n else:\n p = np.einsum('ij,kj->ik',A,self.fit['alpha'])\n p = 1 / (1 + np.exp(-p))\n p = (p - self.r/2) / (1-self.r)\n\n return p\n\n def log_posterior(self,y):\n scale = self.fit['scale'][np.newaxis,:]\n\n r = self.r\n y = y[:,np.newaxis]\n p = r/2 + self.predict()*(1-r)\n\n return scipy.stats.beta.logpdf(r/2 + y*(1-r),p*scale,(1-p)*scale)\n\n def error_fn(self):\n scale = self.fit['scale']\n r = self.r\n\n def fn(y_hat,indices,scale=scale,r=r):\n y_hat = y_hat[indices,:]\n scale = scale[indices,np.newaxis]\n\n p = r/2 + y_hat*(1-r)\n pr = np.random.beta(p*scale,(1-p)*scale)\n pr = (pr - r/2) / (1-r)\n\n return pr - y_hat\n\n return fn\n\n\ndef robit2(formula,df,coef_prior=5,error_prior=100,cache_file=None,r_prior=0.05,\n **sample_kws):\n\n y,A = patsy.dmatrices(formula,df,return_type='dataframe',eval_env=1)\n y = np.squeeze(y)\n\n if cache_file is None or not os.path.isfile(cache_file):\n fit = robit2_model.sampling({'y': y, 'A': A, 'n': A.shape[0],\n 'k': A.shape[1],\n 'fixed_mean_prior': coef_prior,\n 'r_prior': r_prior,\n 'prediction_error_prior': error_prior},\n **sample_kws)\n if cache_file:\n blmm.write_samples(fit.extract(),cache_file)\n else:\n fit = blmm.read_samples(cache_file)\n\n return RobustLogit2(fit,y,A,df)\n\n\nclass RobustLogit2(BaseRegressResults):\n def __init__(self,*params):\n super(RobustLogit2,self).__init__(*params)\n\n def _predict_helper(self,A):\n if len(self.fit['alpha'].shape) == 1:\n p = np.einsum('ij,kj->ik',A,self.fit['alpha'][:,np.newaxis])\n else:\n p = np.einsum('ij,kj->ik',A,self.fit['alpha'])\n r = self.fit['r']\n p = 1 / (1 + np.exp(-p))\n p = (p - r/2) / (1-r)\n\n return p\n\n def error_fn(self):\n scale = self.fit['scale']\n r = self.fit['r']\n\n def fn(y_hat,indices,scale=scale,r=r):\n y_hat = y_hat[indices,:]\n scale = scale[indices,np.newaxis]\n r = r[indices,np.newaxis]\n\n p = r/2 + y_hat*(1-r)\n pr = np.random.beta(p*scale,(1-p)*scale)\n pr = (pr - r/2) / (1-r)\n\n return pr - y_hat\n\n return fn\n\n def log_posterior(self,y):\n scale = self.fit['scale'][np.newaxis,:]\n r = self.fit['r'][np.newaxis,:]\n\n y = y[:,np.newaxis]\n p = r/2 + self.predict()*(1-r)\n\n return scipy.stats.beta.logpdf(r/2 + y*(1-r),p*scale,(1-p)*scale)\n\n def fn(y_hat,indices,scale=scale,r=r):\n y_hat = y_hat[indices,:]\n scale = scale[indices,np.newaxis]\n r = r[indices,np.newaxis]\n\n p = r/2 + y_hat*(1-r)\n pr = np.random.beta(p*scale[i],(1-p)*scale[i])\n pr = (pr - r/2) / (1-r)\n\n return pr - y_hat\n\n return fn\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.random.beta", "scipy.stats.norm.pdf", "numpy.einsum", "numpy.reshape", "numpy.arange", "numpy.squeeze", "pandas.DataFrame", "numpy.std", "numpy.random.normal", "numpy.mean", "numpy.exp", "scipy.stats.beta.logpdf", "numpy.sum", "scipy.misc.logsumexp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "0.14", "0.15", "0.19", "0.18", "1.2", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] } ]
audunsh/braketlab
[ "262558b22467bb566a3a41f3f04bc490ec552277" ]
[ "braketlab/solid_harmonics.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\n\nimport sympy as sp\n\n \n\ndef get_Slm(l, m):\n \"\"\"\n return the sympy real solid harmonic gaussian S_{lm}(r) \n as presented in table 6.3 of Helgaker, Jørgensen and Olsen\n (page 211)\n \"\"\"\n x,y,z = sp.symbols(\"x y z\")\n r = sp.sqrt(x**2 + y**2 + z**2)\n \n assert(l<5), \"Only l<=4 permitted\"\n assert(l>=0), \"Invalid l value\"\n assert(np.abs(m)<=l), \"Invalid m value\"\n \n if l==0:\n if m== 0:\n return x**0\n \n if l==1:\n if m==1:\n return x\n if m==0:\n return z\n if m==-1:\n return y\n if l==2:\n if m==2:\n return (x**2 - y**2)*sp.sqrt(3.0)/2.0\n if m==1:\n return x*z*sp.sqrt(3.0)\n if m==0:\n return (3*z**2 - r**2)/2.0\n if m==-1:\n return y*z*sp.sqrt(3.0)\n if m==-2:\n return x*y*sp.sqrt(3.0)\n \n if l==3:\n if m==3:\n return x*(x**2 - 3*y**2)*sp.sqrt(5/2.0)/2\n if m==2:\n return z*(x**2 - y**2)*sp.sqrt(15)/2\n if m==1:\n return x*(5*z**2 - r**2)*sp.sqrt(3/2.0)/2\n if m==0:\n return z*(5*z**2 - 3*r**2)/2\n if m==-1:\n return y*(5*z**2 - r**2)*sp.sqrt(3/2.0)/2\n if m==-2:\n return x*y*z*sp.sqrt(15) \n if m==-3:\n return x*y*z*sp.sqrt(15) \n \n \n if l==4:\n if m==4:\n return (x**4 - 6*x**2*y**2 + y**4)*sp.sqrt(35)/8\n if m==3:\n return (x**2 - 3*y**2)*x*z*sp.sqrt(35/2.0)/2\n if m==2:\n return (7*z**2 - r**2)*(x**2 - y**2)*sp.sqrt(5)/4\n if m==1:\n return (7*z**2 - 3*r**2)*x*z*sp.sqrt(5/2.0)/2\n if m==0:\n return (35*z**4 - 30*z**2*r**2 + 3*r**4)/8\n if m==-1:\n return (7*z**2 - 3*r**2)*y*z*sp.sqrt(5/2.0)/2\n if m==-2:\n return (7*z**2 - r**2)*x*y*sp.sqrt(5)/2\n if m==-3:\n return (3*x**2 - y**2)*y*z*sp.sqrt(35/2.0)/2\n if m==-4:\n return (x**2 - y**2)*x*y*sp.sqrt(35)/2\n \n \n \n \ndef get_ao(a, l, m):\n \"\"\"\n return unnormalized \n solid harmonic gaussian\n for quantum numbers l, m\n a = exponent\n \"\"\"\n x,y,z = sp.symbols(\"x y z\")\n slm = get_Slm(l,m)\n return slm*sp.exp(-a*(x**2 + y**2 + z**2))\n \ndef get_ao_at(pos, a, l, m):\n \"\"\"\n return unnormalized \n solid harmonic gaussian\n for quantum numbers l, m\n a = exponent\n \"\"\"\n x,y,z = sp.symbols(\"x y z\")\n slm = get_Slm(l,m)\n \n chi = slm*sp.exp(-a*(x**2 + y**2 + z**2))\n chi = chi.subs(x, x-pos[0])\n chi = chi.subs(y, y-pos[1])\n chi = chi.subs(z, z-pos[2])\n \n return chi\n\ndef get_Npi(a_i, l):\n \"\"\"\n Returns the normalization prefactor for S_lm(a_i, r)\n a_i = exponent\n l = angular quantum number\n \"\"\"\n return (2*sp.pi)**(-.75) * (4*a_i)**(0.75 + l/2.0)\n \ndef get_Nao(a,l,m):\n \"\"\"\n return normalized AO in sympy-format\n a = exponent\n l = angular quantum number\n m = magnetic quantum number\n \"\"\"\n return get_ao(a,l,m)*get_Npi(a,l)*norm_extra(l)\n \ndef get_Nao_at(pos, a,l,m):\n \"\"\"\n return normalized AO in sympy-format\n a = exponent\n l = angular quantum number\n m = magnetic quantum number\n \"\"\"\n return get_ao_at(pos, a,l,m)*get_Npi(a,l)*norm_extra(l)\n\n\ndef f(m):\n \"\"\"\n factorial m!\n \"\"\"\n return np.max([np.prod(np.arange(m)+1), 1])\n\ndef norm_extra(l):\n \"\"\"\n Factor required that is _not_ accounted for\n in eq. 3.3 in LSDalton manual\n \"\"\"\n return (np.array([1,1,3,15,105])**-.5)[l]\n\ndef get_Nao_lambda(a,l,m):\n \"\"\"\n return a normalized solid harmonic gaussian\n in numpy lambda format, for convenient evaluation.\n \n Note that every function is centered in (0,0,0)\n translations should be performed retrospectively\n \"\"\"\n x,y,z = sp.symbols(\"x y z\")\n return sp.lambdify([x,y,z], get_Nao(a,l,m), \"numpy\")\n\n\ndef contracted_norm(a, w, l):\n \"\"\"\n Compute normalization factor \n of contracted basis function\n \"\"\" \n return np.sum(w[:,None]*w[None,:]*(np.sqrt(4*a[:,None]*a[None,:])/(a[:,None]+a[None,:]))**(1.5 + l))\n\n\ndef get_contracted(a,w,l,m, representation = \"numeric\"):\n \"\"\"\n Generates Solid Harmonic Gaussian lambda functions\n a = exponent\n \"\"\"\n S = contracted_norm(a,w,l)\n CGO = 0\n for i in np.arange(a.shape[0]):\n CGO += w[i]*get_Nao(a[i],l,m)/np.sqrt(S)\n \n if representation is \"numeric\":\n x,y,z = sp.symbols(\"x y z\")\n \n return sp.lambdify([x,y,z], CGO, \"numpy\")\n if representation is \"sympy\":\n return CGO\n\n\ndef get_sto(a,w,l,m, representation = \"sympy\"):\n S = np.sqrt(a**3/np.pi)\n\n x,y,z = sp.symbols(\"x y z\")\n sto = S*get_Slm(l,m)*sp.exp(-a*sp.sqrt(x**2 + y**2 + z**2))\n if representation is \"numeric\":\n return sp.lambdify([x,y,z], sto, \"numpy\")\n \n if representation is \"sympy\":\n return sto\n\n\n\ndef get_contracted_sympy(a,w,l,m):\n \"\"\"\n Generates Solid Harmonic Gaussian lambda functions\n a = exponents\n \n \"\"\"\n S = contracted_norm(a,w,l)\n CGO = 0\n for i in np.arange(a.shape[0]):\n CGO += w[i]*get_Nao(a[i],l,m)/np.sqrt(S)\n \n return CGO\n \ndef get_contracted_at(pos, a,w,l,m):\n \"\"\"\n Generates Solid Harmonic Gaussian lambda functions\n a = exponents\n \n \"\"\"\n S = contracted_norm(a,w,l)\n CGO = 0\n for i in np.arange(a.shape[0]):\n CGO += w[i]*get_Nao_at(pos, a[i],l,m)/np.sqrt(S)\n #print(CGO)\n x,y,z = sp.symbols(\"x y z\")\n \n \n return CGO #sp.lambdify([x,y,z], c*CGO, \"numpy\")\n " ]
[ [ "numpy.arange", "numpy.array", "numpy.sqrt", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marrrcin/pandas-feature-union
[ "a2a6e2b35a522893473bdee347fe1089c49d0b42" ]
[ "2_transform_solution.py" ]
[ "import pandas as pd\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import FeatureUnion, make_pipeline\n\nfrom pandas_transform import PandasTransform\n\n\ndef main():\n raw_data = load_iris()\n data = pd.DataFrame(raw_data[\"data\"], columns=raw_data[\"feature_names\"])\n data.loc[:, \"class\"] = raw_data[\"target\"]\n\n pipeline = FeatureUnion([\n (\"1\", make_pipeline(\n PandasTransform(lambda X: X.loc[:, [\"sepal length (cm)\"]]),\n # other transformations\n )),\n (\"2\", make_pipeline(\n PandasTransform(lambda X: X.loc[:, [\"sepal width (cm)\"]]),\n # other transformations\n ))\n ])\n\n X = pipeline.fit_transform(data)\n print(X[\"sepal length (cm)\"].mean())\n print(X[\"sepal width (cm)\"].mean())\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.datasets.load_iris", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
chenjyw/transformers
[ "aca16453f41956d0fb74af91899a53f4fe3a5717" ]
[ "transformers/tests/modeling_common_test.py" ]
[ "# coding=utf-8\n# Copyright 2019 HuggingFace Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport sys\nimport os\nimport shutil\nimport tempfile\nimport json\nimport random\nimport uuid\n\nimport unittest\nimport logging\nimport pytest\n\nfrom transformers import is_torch_available\n\nif is_torch_available():\n import torch\n import numpy as np\n\n from transformers import (PretrainedConfig, PreTrainedModel,\n BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n GPT2LMHeadModel, GPT2Config, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)\nelse:\n pytestmark = pytest.mark.skip(\"Require Torch\")\n\nif sys.version_info[0] == 2:\n import cPickle as pickle\n\n class TemporaryDirectory(object):\n \"\"\"Context manager for tempfile.mkdtemp() so it's usable with \"with\" statement.\"\"\"\n def __enter__(self):\n self.name = tempfile.mkdtemp()\n return self.name\n def __exit__(self, exc_type, exc_value, traceback):\n shutil.rmtree(self.name)\nelse:\n import pickle\n TemporaryDirectory = tempfile.TemporaryDirectory\n unicode = str\n\ndef _config_zero_init(config):\n configs_no_init = copy.deepcopy(config)\n for key in configs_no_init.__dict__.keys():\n if '_range' in key or '_std' in key:\n setattr(configs_no_init, key, 0.0)\n return configs_no_init\n\nclass CommonTestCases:\n\n class CommonModelTester(unittest.TestCase):\n\n model_tester = None\n all_model_classes = ()\n test_torchscript = True\n test_pruning = True\n test_resize_embeddings = True\n test_head_masking = True\n\n def test_save_load(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n model.eval()\n with torch.no_grad():\n outputs = model(**inputs_dict)\n\n with TemporaryDirectory() as tmpdirname:\n model.save_pretrained(tmpdirname)\n model = model_class.from_pretrained(tmpdirname)\n with torch.no_grad():\n after_outputs = model(**inputs_dict)\n\n # Make sure we don't have nans\n out_1 = after_outputs[0].numpy()\n out_2 = outputs[0].numpy()\n out_1 = out_1[~np.isnan(out_1)]\n out_2 = out_2[~np.isnan(out_2)]\n max_diff = np.amax(np.abs(out_1 - out_2))\n self.assertLessEqual(max_diff, 1e-5)\n\n def test_initialization(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n configs_no_init = _config_zero_init(config)\n for model_class in self.all_model_classes:\n model = model_class(config=configs_no_init)\n for name, param in model.named_parameters():\n if param.requires_grad:\n self.assertIn(param.data.mean().item(), [0.0, 1.0],\n msg=\"Parameter {} of model {} seems not properly initialized\".format(name, model_class))\n\n def test_determinism(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n model.eval()\n first, second = model(inputs_dict[\"input_ids\"])[0], model(inputs_dict[\"input_ids\"])[0]\n self.assertEqual(first.ne(second).sum().item(), 0)\n\n\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n config.output_attentions = True\n config.output_hidden_states = False\n model = model_class(config)\n model.eval()\n outputs = model(**inputs_dict)\n attentions = outputs[-1]\n self.assertEqual(model.config.output_attentions, True)\n self.assertEqual(model.config.output_hidden_states, False)\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads,\n self.model_tester.seq_length,\n self.model_tester.key_len if hasattr(self.model_tester, 'key_len') else self.model_tester.seq_length])\n out_len = len(outputs)\n\n # Check attention is always last and order is fine\n config.output_attentions = True\n config.output_hidden_states = True\n model = model_class(config)\n model.eval()\n outputs = model(**inputs_dict)\n self.assertEqual(out_len+1, len(outputs))\n self.assertEqual(model.config.output_attentions, True)\n self.assertEqual(model.config.output_hidden_states, True)\n\n attentions = outputs[-1]\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads,\n self.model_tester.seq_length,\n self.model_tester.key_len if hasattr(self.model_tester, 'key_len') else self.model_tester.seq_length])\n\n def test_torchscript(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n self._create_and_check_torchscript(config, inputs_dict)\n\n def test_torchscript_output_attentions(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n config.output_attentions = True\n self._create_and_check_torchscript(config, inputs_dict)\n\n def test_torchscript_output_hidden_state(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n config.output_hidden_states = True\n self._create_and_check_torchscript(config, inputs_dict)\n\n def _create_and_check_torchscript(self, config, inputs_dict):\n if not self.test_torchscript:\n return\n\n configs_no_init = _config_zero_init(config) # To be sure we have no Nan\n configs_no_init.torchscript = True\n for model_class in self.all_model_classes:\n model = model_class(config=configs_no_init)\n model.eval()\n inputs = inputs_dict['input_ids'] # Let's keep only input_ids\n\n try:\n torch.jit.trace(model, inputs)\n except RuntimeError:\n self.fail(\"Couldn't trace module.\")\n\n try:\n traced_gpt2 = torch.jit.trace(model, inputs)\n torch.jit.save(traced_gpt2, \"traced_model.pt\")\n except RuntimeError:\n self.fail(\"Couldn't save module.\")\n\n try:\n loaded_model = torch.jit.load(\"traced_model.pt\")\n os.remove(\"traced_model.pt\")\n except ValueError:\n self.fail(\"Couldn't load module.\")\n\n model.eval()\n loaded_model.eval()\n\n model_params = model.parameters()\n loaded_model_params = loaded_model.parameters()\n\n models_equal = True\n for p1, p2 in zip(model_params, loaded_model_params):\n if p1.data.ne(p2.data).sum() > 0:\n models_equal = False\n\n self.assertTrue(models_equal)\n\n\n def test_headmasking(self):\n if not self.test_head_masking:\n return\n\n global_rng.seed(42)\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n global_rng.seed()\n\n config.output_attentions = True\n config.output_hidden_states = True\n configs_no_init = _config_zero_init(config) # To be sure we have no Nan\n for model_class in self.all_model_classes:\n model = model_class(config=configs_no_init)\n model.eval()\n\n # Prepare head_mask\n # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)\n head_mask = torch.ones(self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads)\n head_mask[0, 0] = 0\n head_mask[-1, :-1] = 0\n head_mask.requires_grad_(requires_grad=True)\n inputs = inputs_dict.copy()\n inputs['head_mask'] = head_mask\n\n outputs = model(**inputs)\n\n # Test that we can get a gradient back for importance score computation\n output = sum(t.sum() for t in outputs[0])\n output = output.sum()\n output.backward()\n multihead_outputs = head_mask.grad\n\n attentions = outputs[-1]\n hidden_states = outputs[-2]\n\n # Remove Nan\n for t in attentions:\n self.assertLess(torch.sum(torch.isnan(t)), t.numel() / 4) # Check we don't have more than 25% nans (arbitrary)\n attentions = [t.masked_fill(torch.isnan(t), 0.0) for t in attentions] # remove them (the test is less complete)\n\n self.assertIsNotNone(multihead_outputs)\n self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)\n self.assertAlmostEqual(\n attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)\n self.assertNotEqual(\n attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)\n self.assertNotEqual(\n attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)\n self.assertAlmostEqual(\n attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)\n self.assertNotEqual(\n attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)\n\n\n def test_head_pruning(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n config.output_attentions = True\n config.output_hidden_states = False\n model = model_class(config=config)\n model.eval()\n heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)),\n -1: [0]}\n model.prune_heads(heads_to_prune)\n outputs = model(**inputs_dict)\n\n attentions = outputs[-1]\n\n self.assertEqual(\n attentions[0].shape[-3], 1)\n self.assertEqual(\n attentions[1].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(\n attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)\n\n def test_head_pruning_save_load_from_pretrained(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n config.output_attentions = True\n config.output_hidden_states = False\n model = model_class(config=config)\n model.eval()\n heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)),\n -1: [0]}\n model.prune_heads(heads_to_prune)\n directory = \"pruned_model\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n model.save_pretrained(directory)\n model = model_class.from_pretrained(directory)\n\n outputs = model(**inputs_dict)\n attentions = outputs[-1]\n self.assertEqual(attentions[0].shape[-3], 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)\n\n shutil.rmtree(directory)\n\n def test_head_pruning_save_load_from_config_init(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n config.output_attentions = True\n config.output_hidden_states = False\n\n heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)),\n -1: [0]}\n config.pruned_heads = heads_to_prune\n\n model = model_class(config=config)\n model.eval()\n\n outputs = model(**inputs_dict)\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)\n\n def test_head_pruning_integration(self):\n if not self.test_pruning:\n return\n\n for model_class in self.all_model_classes:\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n if \"head_mask\" in inputs_dict:\n del inputs_dict[\"head_mask\"]\n\n config.output_attentions = True\n config.output_hidden_states = False\n\n heads_to_prune = {0: [0], 1: [1, 2]}\n config.pruned_heads = heads_to_prune\n\n model = model_class(config=config)\n model.eval()\n\n outputs = model(**inputs_dict)\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)\n\n directory = \"pruned_model\"\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n model.save_pretrained(directory)\n model = model_class.from_pretrained(directory)\n shutil.rmtree(directory)\n\n outputs = model(**inputs_dict)\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)\n self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)\n\n heads_to_prune = {0: [0], 2: [1, 2]}\n model.prune_heads(heads_to_prune)\n\n outputs = model(**inputs_dict)\n attentions = outputs[-1]\n\n self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads -1)\n self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)\n self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)\n\n self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})\n\n\n def test_hidden_states_output(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n config.output_hidden_states = True\n config.output_attentions = False\n model = model_class(config)\n model.eval()\n outputs = model(**inputs_dict)\n hidden_states = outputs[-1]\n self.assertEqual(model.config.output_attentions, False)\n self.assertEqual(model.config.output_hidden_states, True)\n self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [self.model_tester.seq_length, self.model_tester.hidden_size])\n\n def test_resize_tokens_embeddings(self):\n original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n if not self.test_resize_embeddings:\n return\n\n for model_class in self.all_model_classes:\n config = copy.deepcopy(original_config)\n model = model_class(config)\n\n model_vocab_size = config.vocab_size\n # Retrieve the embeddings and clone theme\n model_embed = model.resize_token_embeddings(model_vocab_size)\n cloned_embeddings = model_embed.weight.clone()\n\n # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size\n model_embed = model.resize_token_embeddings(model_vocab_size + 10)\n self.assertEqual(model.config.vocab_size, model_vocab_size + 10)\n # Check that it actually resizes the embeddings matrix\n self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)\n\n # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size\n model_embed = model.resize_token_embeddings(model_vocab_size - 15)\n self.assertEqual(model.config.vocab_size, model_vocab_size - 15)\n # Check that it actually resizes the embeddings matrix\n self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)\n\n # Check that adding and removing tokens has not modified the first part of the embedding matrix.\n models_equal = True\n for p1, p2 in zip(cloned_embeddings, model_embed.weight):\n if p1.data.ne(p2.data).sum() > 0:\n models_equal = False\n\n self.assertTrue(models_equal)\n\n def test_tie_model_weights(self):\n if not self.test_torchscript:\n return\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n def check_same_values(layer_1, layer_2):\n equal = True\n for p1, p2 in zip(layer_1.weight, layer_2.weight):\n if p1.data.ne(p2.data).sum() > 0:\n equal = False\n return equal\n\n for model_class in self.all_model_classes:\n if not hasattr(model_class, 'tie_weights'):\n continue\n\n config.torchscript = True\n model_not_tied = model_class(config)\n params_not_tied = list(model_not_tied.parameters())\n\n config_tied = copy.deepcopy(config)\n config_tied.torchscript = False\n model_tied = model_class(config_tied)\n params_tied = list(model_tied.parameters())\n\n # Check that the embedding layer and decoding layer are the same in size and in value\n self.assertGreater(len(params_not_tied), len(params_tied))\n # self.assertTrue(check_same_values(embeddings, decoding))\n\n # # Check that after modification, they remain the same.\n # embeddings.weight.data.div_(2)\n # # Check that the embedding layer and decoding layer are the same in size and in value\n # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)\n # self.assertTrue(check_same_values(embeddings, decoding))\n\n # # Check that after modification, they remain the same.\n # decoding.weight.data.div_(4)\n # # Check that the embedding layer and decoding layer are the same in size and in value\n # self.assertTrue(embeddings.weight.shape, decoding.weight.shape)\n # self.assertTrue(check_same_values(embeddings, decoding))\n\n # Check that after resize they remain tied.\n model_tied.resize_token_embeddings(config.vocab_size + 10)\n params_tied_2 = list(model_tied.parameters())\n self.assertGreater(len(params_not_tied), len(params_tied))\n self.assertEqual(len(params_tied_2), len(params_tied))\n\n # decoding.weight.data.mul_(20)\n # # Check that the embedding layer and decoding layer are the same in size and in value\n # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)\n # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))\n\n\n class GPTModelTester(CommonModelTester):\n\n def __init__(self,\n parent,\n batch_size=13,\n seq_length=7,\n is_training=True,\n use_position_ids=True,\n use_token_type_ids=True,\n use_labels=True,\n vocab_size=99,\n n_positions=33,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n n_choices=3,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n scope=None,\n config_class=None,\n base_model_class=None,\n lm_head_model_class=None,\n double_head_model_class=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_position_ids = use_position_ids\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.n_positions = n_positions\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.n_choices = n_choices\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.scope = scope\n self.config_class = config_class\n self.base_model_class = base_model_class\n self.lm_head_model_class = lm_head_model_class\n self.double_head_model_class = double_head_model_class\n self.all_model_classes = (base_model_class, lm_head_model_class, double_head_model_class)\n\n def prepare_config_and_inputs(self):\n total_num_tokens = self.vocab_size\n input_ids = ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_num_tokens)\n\n position_ids = None\n if self.use_position_ids:\n position_ids = ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.n_positions)\n\n token_type_ids = None\n if self.use_token_type_ids:\n total_voc = self.vocab_size\n token_type_ids = ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_voc)\n\n mc_labels = None\n lm_labels = None\n mc_token_ids = None\n if self.use_labels:\n mc_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n lm_labels = ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.num_labels)\n mc_token_ids = ids_tensor([self.batch_size, self.n_choices], self.seq_length)\n\n config = self.config_class(\n vocab_size_or_config_json_file=self.vocab_size,\n n_positions=self.n_positions,\n n_embd=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n initializer_range=self.initializer_range)\n\n return (config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids)\n\n def create_and_check_base_model(self, config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids):\n model = self.base_model_class(config)\n model.eval()\n\n outputs = model(input_ids, position_ids, token_type_ids)\n outputs = model(input_ids, position_ids)\n outputs = model(input_ids)\n\n hidden_state = outputs[0]\n self.parent.assertListEqual(\n list(hidden_state.size()),\n [self.batch_size, self.n_choices, self.seq_length, self.hidden_size])\n\n\n def create_and_check_lm_head(self, config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids):\n model = self.lm_head_model_class(config)\n model.eval()\n outputs = model(input_ids, position_ids, token_type_ids, lm_labels)\n loss, lm_logits = outputs[:2]\n\n total_voc = self.vocab_size\n self.parent.assertListEqual(\n list(lm_logits.size()),\n [self.batch_size, self.n_choices, self.seq_length, total_voc])\n self.parent.assertListEqual(\n list(loss.size()),\n [])\n\n def create_and_check_presents(self, config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids):\n for model_class in self.all_model_classes:\n model = model_class(config)\n model.eval()\n outputs = model(input_ids)\n presents = outputs[-1]\n self.parent.assertEqual(self.num_hidden_layers, len(presents))\n self.parent.assertListEqual(\n list(presents[0].size()),\n [2, self.batch_size * self.n_choices, self.num_attention_heads,\n self.seq_length, self.hidden_size // self.num_attention_heads])\n\n def create_and_check_double_heads(self, config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids):\n model = self.double_head_model_class(config)\n model.eval()\n outputs = model(input_ids, mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels,\n token_type_ids=token_type_ids, position_ids=position_ids)\n lm_loss, mc_loss, lm_logits, mc_logits = outputs[:4]\n loss = [lm_loss, mc_loss]\n\n total_voc = self.vocab_size\n self.parent.assertListEqual(\n list(lm_logits.size()),\n [self.batch_size, self.n_choices, self.seq_length, total_voc])\n self.parent.assertListEqual(\n list(mc_logits.size()),\n [self.batch_size, self.n_choices])\n self.parent.assertListEqual(\n [list(l.size()) for l in loss],\n [[], []])\n\n def create_and_check_model_from_pretrained(self):\n cache_dir = \"/tmp/transformers_test/\"\n for model_name in list(self.base_model_class.pretrained_model_archive_map.keys())[:1]:\n model = self.base_model_class.from_pretrained(model_name, cache_dir=cache_dir)\n shutil.rmtree(cache_dir)\n self.parent.assertIsNotNone(model)\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids) = config_and_inputs\n inputs_dict = {'input_ids': input_ids}\n return config, inputs_dict\n\n def run_common_tests(self, test_presents=False):\n config_and_inputs = self.prepare_config_and_inputs()\n self.create_and_check_base_model(*config_and_inputs)\n\n config_and_inputs = self.prepare_config_and_inputs()\n self.create_and_check_lm_head(*config_and_inputs)\n\n config_and_inputs = self.prepare_config_and_inputs()\n self.create_and_check_double_heads(*config_and_inputs)\n\n if test_presents:\n config_and_inputs = self.prepare_config_and_inputs()\n self.create_and_check_presents(*config_and_inputs)\n\n def run_slow_tests(self):\n self.create_and_check_model_from_pretrained()\n\n\nclass ConfigTester(object):\n def __init__(self, parent, config_class=None, **kwargs):\n self.parent = parent\n self.config_class = config_class\n self.inputs_dict = kwargs\n\n def create_and_test_config_common_properties(self):\n config = self.config_class(**self.inputs_dict)\n self.parent.assertTrue(hasattr(config, 'vocab_size'))\n self.parent.assertTrue(hasattr(config, 'hidden_size'))\n self.parent.assertTrue(hasattr(config, 'num_attention_heads'))\n self.parent.assertTrue(hasattr(config, 'num_hidden_layers'))\n\n def create_and_test_config_to_json_string(self):\n config = self.config_class(**self.inputs_dict)\n obj = json.loads(config.to_json_string())\n for key, value in self.inputs_dict.items():\n self.parent.assertEqual(obj[key], value)\n\n def create_and_test_config_to_json_file(self):\n config_first = self.config_class(**self.inputs_dict)\n json_file_path = os.path.join(os.getcwd(), \"config_\" + str(uuid.uuid4()) + \".json\")\n config_first.to_json_file(json_file_path)\n config_second = self.config_class.from_json_file(json_file_path)\n os.remove(json_file_path)\n self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())\n\n def run_common_tests(self):\n self.create_and_test_config_common_properties()\n self.create_and_test_config_to_json_string()\n self.create_and_test_config_to_json_file()\n\n\nglobal_rng = random.Random()\n\n\ndef ids_tensor(shape, vocab_size, rng=None, name=None):\n \"\"\"Creates a random int32 tensor of the shape within the vocab size.\"\"\"\n if rng is None:\n rng = global_rng\n\n total_dims = 1\n for dim in shape:\n total_dims *= dim\n\n values = []\n for _ in range(total_dims):\n values.append(rng.randint(0, vocab_size - 1))\n\n return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()\n\n\nclass ModelUtilsTest(unittest.TestCase):\n def test_model_from_pretrained(self):\n logging.basicConfig(level=logging.INFO)\n for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:\n config = BertConfig.from_pretrained(model_name)\n self.assertIsNotNone(config)\n self.assertIsInstance(config, PretrainedConfig)\n\n model = BertModel.from_pretrained(model_name)\n model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)\n self.assertIsNotNone(model)\n self.assertIsInstance(model, PreTrainedModel)\n for value in loading_info.values():\n self.assertEqual(len(value), 0)\n\n config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)\n model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)\n self.assertEqual(model.config.output_attentions, True)\n self.assertEqual(model.config.output_hidden_states, True)\n self.assertEqual(model.config, config)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.jit.save", "torch.jit.load", "torch.ones", "torch.jit.trace", "numpy.abs", "torch.isnan", "numpy.isnan", "torch.tensor", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fermiPy/dmpipe
[ "e5b3f950d18d5077f7abf46f53fcf59e97bb3301" ]
[ "dmpipe/dm_plotting.py" ]
[ "#!/usr/bin/env python\n#\n\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nTop level scripts to make castro plot and limits plots in mass / sigmav space\n\"\"\"\n\n\nimport os\nfrom os.path import splitext\nimport numpy as np\n\nfrom astropy.table import Table\n\nfrom fermipy.utils import init_matplotlib_backend, load_yaml\nfrom fermipy.jobs.utils import is_not_null\nfrom fermipy.jobs.link import Link\nfrom fermipy.jobs.scatter_gather import ScatterGather\nfrom fermipy.jobs.slac_impl import make_nfs_path\n\nfrom dmpipe.dm_spectral_utils import DMCastroData, DMSpecTable\nfrom dmpipe.dm_plotting_utils import plot_dm_castro\nfrom dmpipe.dm_plotting_utils import plot_dm_spectra_by_mass, plot_dm_spectra_by_channel\nfrom dmpipe.dm_plotting_utils import plot_limits_from_arrays, plot_mc_truth\n\nfrom dmpipe.name_policy import NameFactory\nfrom dmpipe import defaults\n\ninit_matplotlib_backend()\nNAME_FACTORY = NameFactory(basedir='.')\n\n\ndef is_decay_profile(profile):\n \"\"\" Check if a profile string is for DM decay \"\"\"\n tokens = profile.split('_')\n return tokens[-1] in ['point', 'dmap', 'dradial']\n\ndef is_ann_profile(profile):\n \"\"\" Check if a profile string is for DM annihilation \"\"\"\n tokens = profile.split('_')\n return tokens[-1] in ['point', 'map', 'radial']\n\ndef select_channels(channels, profile):\n \"\"\" Select the relevent channels (for decay or annihilation) for a given profile \"\"\"\n sed_ok_decay = is_decay_profile(profile)\n sed_ok_ann = is_ann_profile(profile)\n ochans = []\n for chan in channels:\n chan_is_decay = chan.find('_decay') >= 0\n if chan_is_decay:\n if sed_ok_decay:\n ochans.append(chan)\n else:\n if sed_ok_ann:\n ochans.append(chan)\n return ochans\n\n\ndef get_ul_bands(table, prefix):\n \"\"\" Get the upper limit bands a table\n\n Parameters\n ----------\n\n table : `astropy.table.Table`\n Table to get the limits from.\n\n prefix : str\n Prefix to append to the column names for the limits\n\n\n Returns\n -------\n\n output : dict\n A dictionary with the limits bands\n\n \"\"\"\n o = dict(q02=np.squeeze(table[\"%s_q02\" % prefix]),\n q16=np.squeeze(table[\"%s_q16\" % prefix]),\n q84=np.squeeze(table[\"%s_q84\" % prefix]),\n q97=np.squeeze(table[\"%s_q97\" % prefix]),\n median=np.squeeze(table[\"%s_median\" % prefix]))\n return o\n\n\nclass PlotDMSpectra(Link):\n \"\"\"Small class to plot the DM spectra from pre-computed tables.\n\n \"\"\"\n appname = 'dmpipe-plot-dm-spectra'\n linkname_default = 'plot-dm-spectra'\n usage = '%s [options]' % (appname)\n description = \"Plot the DM spectra stored in pre-computed tables\"\n\n default_options = dict(infile=defaults.generic['infile'],\n outfile=defaults.generic['outfile'],\n chan=defaults.common['chan'],\n mass=defaults.common['mass'],\n spec_type=defaults.common['spec_type'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def run_analysis(self, argv):\n \"\"\"Run this analysis\"\"\"\n args = self._parser.parse_args(argv)\n\n dm_spec_table = DMSpecTable.create_from_fits(args.infile)\n dm_plot_by_mass = plot_dm_spectra_by_mass(\n dm_spec_table, chan=args.chan, spec_type=args.spec_type)\n dm_plot_by_chan = plot_dm_spectra_by_channel(\n dm_spec_table, mass=args.mass, spec_type=args.spec_type)\n\n if args.outfile:\n dm_plot_by_mass[0].savefig(\n args.outfile.replace(\n '.png', '_%s.png' %\n args.chan))\n dm_plot_by_chan[0].savefig(\n args.outfile.replace(\n '.png', '_%1.FGeV.png' %\n args.mass))\n\n\nclass PlotLimits(Link):\n \"\"\"Small class to Plot DM limits on <sigma v> versus mass.\n\n \"\"\"\n appname = 'dmpipe-plot-limits'\n linkname_default = 'plot-limits'\n usage = '%s [options]' % (appname)\n description = \"Plot DM limits on <sigma v> versus mass\"\n\n default_options = dict(infile=defaults.generic['infile'],\n outfile=defaults.generic['outfile'],\n chan=defaults.common['chan'],\n bands=defaults.collect['bands'],\n sim=defaults.sims['sim'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def run_analysis(self, argv):\n \"\"\"Run this analysis\"\"\"\n args = self._parser.parse_args(argv)\n\n if args.chan.find('_decay') >= 0:\n decay = True\n limit_col = 'll_0.95'\n ylims = (1e+22, 1e+28)\n else:\n decay = False\n limit_col = 'ul_0.95'\n ylims = (1e-28, 1e-22)\n\n if is_not_null(args.infile):\n tab_m = Table.read(args.infile, hdu=\"masses\")\n tab_s = Table.read(args.infile, hdu=args.chan)\n xvals = tab_m['masses'][0]\n yvals = tab_s[limit_col][0]\n ldict = dict(limits=(xvals, yvals))\n else:\n ldict = {}\n\n if is_not_null(args.bands):\n tab_b = Table.read(args.bands, hdu=args.chan)\n tab_bm = Table.read(args.bands, hdu=\"masses\")\n bands = get_ul_bands(tab_b, limit_col)\n bands['masses'] = tab_bm['masses'][0]\n else:\n bands = None\n\n if is_not_null(args.sim):\n sim_srcs = load_yaml(args.sim)\n injected_src = sim_srcs.get('injected_source', None)\n else:\n injected_src = None\n\n xlims = (1e1, 1e4)\n\n dm_plot = plot_limits_from_arrays(ldict, xlims, ylims, bands, decay=decay)\n\n if injected_src is not None:\n mc_model = injected_src['source_model']\n plot_mc_truth(dm_plot[1], mc_model)\n\n if args.outfile:\n dm_plot[0].savefig(args.outfile)\n return None\n return dm_plot\n\n\nclass PlotMLEs(Link):\n \"\"\"Small class to Plot DM maximum likelihood estimate <sigma v> versus mass.\n\n \"\"\"\n appname = 'dmpipe-plot-mles'\n linkname_default = 'plot-mles'\n usage = '%s [options]' % (appname)\n description = \"Plot DM maximum likelihood estimate on <sigma v> versus mass\"\n\n default_options = dict(infile=defaults.generic['infile'],\n outfile=defaults.generic['outfile'],\n chan=defaults.common['chan'],\n bands=defaults.collect['bands'],\n sim=defaults.sims['sim'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def run_analysis(self, argv):\n \"\"\"Run this analysis\"\"\"\n args = self._parser.parse_args(argv)\n\n if args.chan.find('_decay') >= 0:\n limit_col = 'll_0.95'\n ylims = (1e+22, 1e+28)\n else:\n limit_col = 'ul_0.95'\n ylims = (1e-28, 1e-22)\n\n if is_not_null(args.infile):\n tab_m = Table.read(args.infile, hdu=\"masses\")\n tab_s = Table.read(args.infile, hdu=args.chan)\n xvals = tab_m['masses'][0]\n yvals = tab_s[limit_col][0]\n ldict = dict(limits=(xvals, yvals))\n else:\n ldict = {}\n\n if is_not_null(args.bands):\n tab_b = Table.read(args.bands, hdu=args.chan)\n tab_bm = Table.read(args.bands, hdu=\"masses\")\n bands = get_ul_bands(tab_b, 'mles')\n bands['masses'] = tab_bm['masses'][0]\n else:\n bands = None\n\n if is_not_null(args.sim):\n sim_srcs = load_yaml(args.sim)\n injected_src = sim_srcs.get('injected_source', None)\n else:\n injected_src = None\n\n xlims = (1e1, 1e4)\n\n dm_plot = plot_limits_from_arrays(ldict, xlims, ylims, bands)\n\n if injected_src is not None:\n mc_model = injected_src['source_model']\n plot_mc_truth(dm_plot[1], mc_model)\n\n if args.outfile:\n dm_plot[0].savefig(args.outfile)\n return None\n return dm_plot\n\n\n\n\nclass PlotDM(Link):\n \"\"\"Small class to plot the likelihood vs <sigma v> and DM particle mass\n\n \"\"\"\n appname = 'dmpipe-plot-dm'\n linkname_default = 'plot-dm'\n usage = \"%s [options]\" % (appname)\n description = \"Plot the likelihood vs <sigma v> and DM particle mass\"\n\n default_options = dict(infile=defaults.generic['infile'],\n outfile=defaults.generic['outfile'],\n chan=defaults.common['chan'],\n global_min=defaults.common['global_min'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def run_analysis(self, argv):\n \"\"\"Run this analysis\"\"\"\n args = self._parser.parse_args(argv)\n exttype = splitext(args.infile)[-1]\n if exttype in ['.fits']:\n dm_castro = DMCastroData.create_from_fitsfile(args.infile, args.chan)\n elif exttype in ['.yaml']:\n dm_castro = DMCastroData.create_from_yamlfile(args.infile, args.chan)\n else:\n raise ValueError(\"Can not read file type %s for SED\" % exttype)\n\n dm_plot = plot_dm_castro(dm_castro, global_min=args.global_min)\n if args.outfile:\n dm_plot[0].savefig(args.outfile)\n return None\n return dm_plot\n\n\nclass PlotLimits_SG(ScatterGather):\n \"\"\"Small class to generate configurations for `PlotLimits`\n\n This does a triple nested loop over targets, profiles and j-factor priors\n \"\"\"\n appname = 'dmpipe-plot-limits-sg'\n usage = \"%s [options]\" % (appname)\n description = \"Make castro plots for set of targets\"\n clientclass = PlotLimits\n\n job_time = 60\n\n default_options = dict(ttype=defaults.common['ttype'],\n targetlist=defaults.common['targetlist'],\n channels=defaults.common['channels'],\n astro_priors=defaults.common['astro_priors'],\n dry_run=defaults.common['dry_run'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n if targets_yaml is None:\n return job_configs\n\n astro_priors = args['astro_priors']\n channels = args['channels']\n\n base_config = dict(bands=None,\n sim=sim)\n\n targets = load_yaml(targets_yaml)\n for target_name, target_list in list(targets.items()):\n for targ_prof in target_list:\n prof_chans = select_channels(channels, targ_prof)\n for astro_prior in astro_priors:\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n profile=targ_prof,\n astro_prior=astro_prior,\n fullpath=True)\n input_path = NAME_FACTORY.dmlimitsfile(**name_keys)\n for chan in prof_chans:\n targ_key = \"%s:%s:%s:%s\" % (\n target_name, targ_prof, astro_prior, chan)\n\n output_path = input_path.replace(\n '.fits', '_%s.png' % chan)\n logfile = make_nfs_path(\n output_path.replace('.png', '.log'))\n job_config = base_config.copy()\n job_config.update(dict(infile=input_path,\n outfile=output_path,\n astro_prior=astro_prior,\n logfile=logfile,\n chan=chan))\n job_configs[targ_key] = job_config\n\n return job_configs\n\n\nclass PlotStackedLimits_SG(ScatterGather):\n \"\"\"Small class to generate configurations for `PlotStackedLimits`\n\n This does a double nested loop over rosters and j-factor priors\n \"\"\"\n appname = 'dmpipe-plot-stacked-limits-sg'\n usage = \"%s [options]\" % (appname)\n description = \"Make castro plots for set of targets\"\n clientclass = PlotLimits\n\n job_time = 60\n\n default_options = dict(ttype=defaults.common['ttype'],\n rosterlist=defaults.common['rosterlist'],\n bands=defaults.collect['bands'],\n channels=defaults.common['channels'],\n astro_priors=defaults.common['astro_priors'],\n sim=defaults.sims['sim'],\n nsims=defaults.sims['nsims'],\n seed=defaults.sims['seed'],\n dry_run=defaults.common['dry_run'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)\n if roster_yaml is None:\n return job_configs\n\n roster_dict = load_yaml(roster_yaml)\n\n astro_priors = args['astro_priors']\n channels = args['channels']\n\n for roster_name in list(roster_dict.keys()):\n rost_chans = select_channels(channels, roster_name)\n for astro_prior in astro_priors:\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n astro_prior=astro_prior,\n sim_name=sim,\n fullpath=True)\n for chan in rost_chans:\n targ_key = \"%s:%s:%s\" % (roster_name, astro_prior, chan)\n if sim is not None:\n seedlist = list(range(\n args['seed'], args['seed'] + args['nsims']))\n sim_path = os.path.join('config', 'sim_%s.yaml' % sim)\n else:\n seedlist = [None]\n sim_path = None\n\n for seed in seedlist:\n if seed is not None:\n name_keys['seed'] = \"%06i\" % seed # pylint: disable=bad-string-format-type\n input_path = NAME_FACTORY.sim_stackedlimitsfile(\n **name_keys)\n full_targ_key = \"%s_%06i\" % (targ_key, seed) # pylint: disable=bad-string-format-type\n else:\n input_path = NAME_FACTORY.stackedlimitsfile(\n **name_keys)\n full_targ_key = targ_key\n\n output_path = input_path.replace(\n '.fits', '_%s.png' % chan)\n logfile = make_nfs_path(\n output_path.replace('.png', '.log'))\n job_config = dict(infile=input_path,\n outfile=output_path,\n astro_prior=astro_prior,\n logfile=logfile,\n sim=sim_path,\n chan=chan)\n job_configs[full_targ_key] = job_config\n\n return job_configs\n\n\nclass PlotDM_SG(ScatterGather):\n \"\"\"Small class to generate configurations for `PlotDM`\n\n This does a quadruple nested loop over targets, profiles,\n j-factor priors and channels\n \"\"\"\n appname = 'dmpipe-plot-dm-sg'\n usage = \"%s [options]\" % (appname)\n description = \"Make castro plots for set of targets\"\n clientclass = PlotDM\n\n job_time = 60\n\n default_options = dict(ttype=defaults.common['ttype'],\n targetlist=defaults.common['targetlist'],\n channels=defaults.common['channels'],\n astro_priors=defaults.common['astro_priors'],\n global_min=defaults.common['global_min'],\n dry_run=defaults.common['dry_run'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, _) = NAME_FACTORY.resolve_targetfile(args)\n if targets_yaml is None:\n return job_configs\n\n targets = load_yaml(targets_yaml)\n\n astro_priors = args['astro_priors']\n channels = args['channels']\n global_min = args['global_min']\n\n for target_name, target_list in list(targets.items()):\n for targ_prof in target_list:\n prof_chans = select_channels(channels, targ_prof)\n for astro_prior in astro_priors:\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n profile=targ_prof,\n astro_prior=astro_prior,\n fullpath=True)\n input_path = NAME_FACTORY.dmlikefile(**name_keys)\n for chan in prof_chans:\n targ_key = \"%s:%s:%s:%s\" % (\n target_name, targ_prof, astro_prior, chan)\n output_path = input_path.replace(\n '.fits', '_%s.png' % chan)\n logfile = make_nfs_path(\n output_path.replace('.png', '.log'))\n job_config = dict(infile=input_path,\n outfile=output_path,\n astro_prior=astro_prior,\n logfile=logfile,\n global_min=global_min,\n chan=chan)\n job_configs[targ_key] = job_config\n\n return job_configs\n\n\nclass PlotStackedDM_SG(ScatterGather):\n \"\"\"Small class to generate configurations for `PlotDM`\n\n This does a triple loop over rosters, j-factor priors and channels\n \"\"\"\n appname = 'dmpipe-plot-stacked-dm-sg'\n usage = \"%s [options]\" % (appname)\n description = \"Make castro plots for set of targets\"\n clientclass = PlotDM\n\n job_time = 60\n\n default_options = dict(ttype=defaults.common['ttype'],\n rosterlist=defaults.common['rosterlist'],\n channels=defaults.common['channels'],\n astro_priors=defaults.common['astro_priors'],\n sim=defaults.sims['sim'],\n nsims=defaults.sims['nsims'],\n seed=defaults.sims['seed'],\n global_min=defaults.common['global_min'],\n dry_run=defaults.common['dry_run'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)\n if roster_yaml is None:\n return job_configs\n\n roster_dict = load_yaml(roster_yaml)\n\n astro_priors = args['astro_priors']\n channels = args['channels']\n global_min = args['global_min']\n\n for roster_name in list(roster_dict.keys()):\n rost_chans = select_channels(channels, roster_name)\n for astro_prior in astro_priors:\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n astro_prior=astro_prior,\n sim_name=sim,\n fullpath=True)\n\n for chan in rost_chans:\n targ_key = \"%s:%s:%s\" % (roster_name, astro_prior, chan)\n\n if sim is not None:\n seedlist = list(range(\n args['seed'], args['seed'] + args['nsims']))\n else:\n seedlist = [None]\n\n for seed in seedlist:\n if seed is not None:\n name_keys['seed'] = \"%06i\" % seed # pylint: disable=bad-string-format-type\n input_path = NAME_FACTORY.sim_resultsfile(\n **name_keys)\n full_targ_key = \"%s_%06i\" % (targ_key, seed) # pylint: disable=bad-string-format-type\n else:\n input_path = NAME_FACTORY.resultsfile(**name_keys)\n full_targ_key = targ_key\n\n output_path = input_path.replace(\n '.fits', '_%s.png' % chan)\n logfile = make_nfs_path(\n output_path.replace('.png', '.log'))\n job_config = dict(infile=input_path,\n outfile=output_path,\n astro_prior=astro_prior,\n logfile=logfile,\n global_min=global_min,\n chan=chan)\n job_configs[full_targ_key] = job_config\n\n return job_configs\n\n\n\nclass PlotControlLimits_SG(ScatterGather):\n \"\"\"Small class to generate configurations for `PlotLimits`\n\n This does a quadruple loop over rosters, j-factor priors, channels, and expectation bands\n \"\"\"\n appname = 'dmpipe-plot-control-limits-sg'\n usage = \"%s [options]\" % (appname)\n description = \"Make limits plots for positve controls\"\n clientclass = PlotLimits\n\n job_time = 60\n\n default_options = dict(ttype=defaults.common['ttype'],\n rosterlist=defaults.common['targetlist'],\n channels=defaults.common['channels'],\n astro_priors=defaults.common['astro_priors'],\n sim=defaults.sims['sim'],\n dry_run=defaults.common['dry_run'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n\n try:\n os.makedirs(os.path.join(ttype, 'results'))\n except OSError:\n pass\n\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)\n if roster_yaml is None:\n return job_configs\n\n roster_dict = load_yaml(roster_yaml)\n\n astro_priors = args['astro_priors']\n channels = args['channels']\n\n sim_path = os.path.join('config', 'sim_%s.yaml' % sim)\n\n for roster_name in list(roster_dict.keys()):\n rost_chans = select_channels(channels, roster_name)\n for astro_prior in astro_priors:\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n astro_prior=astro_prior,\n sim_name=sim,\n seed='summary',\n fullpath=True)\n bands_path = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)\n\n for chan in rost_chans:\n targ_key = \"%s:%s:%s:%s\" % (roster_name, astro_prior, sim, chan)\n output_path = os.path.join(ttype, 'results', \"control_%s_%s_%s_%s.png\" % (roster_name, astro_prior, sim, chan))\n logfile = make_nfs_path(output_path.replace('.png', '.log'))\n job_config = dict(bands=bands_path,\n outfile=output_path,\n sim=sim_path,\n logfile=logfile,\n chan=chan)\n job_configs[targ_key] = job_config\n return job_configs\n\n\nclass PlotControlMLEs_SG(ScatterGather):\n \"\"\"Small class to generate configurations for `PlotMLEs`\n\n This does a quadruple loop over rosters, j-factor priors, channels, and expectation bands\n \"\"\"\n appname = 'dmpipe-plot-control-mles-sg'\n usage = \"%s [options]\" % (appname)\n description = \"Make mle plots for positve controls\"\n clientclass = PlotMLEs\n\n job_time = 60\n\n default_options = dict(ttype=defaults.common['ttype'],\n rosterlist=defaults.common['targetlist'],\n channels=defaults.common['channels'],\n astro_priors=defaults.common['astro_priors'],\n sim=defaults.sims['sim'],\n dry_run=defaults.common['dry_run'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n\n try:\n os.makedirs(os.path.join(ttype, 'results'))\n except OSError:\n pass\n\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)\n if roster_yaml is None:\n return job_configs\n\n roster_dict = load_yaml(roster_yaml)\n\n astro_priors = args['astro_priors']\n channels = args['channels']\n\n sim_path = os.path.join('config', 'sim_%s.yaml' % sim)\n\n for roster_name in list(roster_dict.keys()):\n rost_chans = select_channels(channels, roster_name)\n for astro_prior in astro_priors:\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n astro_prior=astro_prior,\n sim_name=sim,\n seed='summary',\n fullpath=True)\n bands_path = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)\n\n for chan in rost_chans:\n targ_key = \"%s:%s:%s:%s\" % (roster_name, astro_prior, sim, chan)\n output_path = os.path.join(ttype, 'results', \"control_mle_%s_%s_%s_%s.png\" % (roster_name, astro_prior, sim, chan))\n logfile = make_nfs_path(output_path.replace('.png', '.log'))\n job_config = dict(bands=bands_path,\n outfile=output_path,\n sim=sim_path,\n logfile=logfile,\n chan=chan)\n job_configs[targ_key] = job_config\n return job_configs\n\n\n\nclass PlotFinalLimits_SG(ScatterGather):\n \"\"\"Small class to generate configurations for `PlotLimits`\n\n This does a quadruple loop over rosters, j-factor priors, channels, and expectation bands\n \"\"\"\n appname = 'dmpipe-plot-final-limits-sg'\n usage = \"%s [options]\" % (appname)\n description = \"Make final limits plots\"\n clientclass = PlotLimits\n\n job_time = 60\n\n default_options = dict(ttype=defaults.common['ttype'],\n rosterlist=defaults.common['rosterlist'],\n channels=defaults.common['channels'],\n astro_priors=defaults.common['astro_priors'],\n sims=defaults.sims['sims'],\n dry_run=defaults.common['dry_run'])\n\n __doc__ += Link.construct_docstring(default_options)\n\n def build_job_configs(self, args):\n \"\"\"Hook to build job configurations\n \"\"\"\n job_configs = {}\n\n ttype = args['ttype']\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)\n if roster_yaml is None:\n return job_configs\n if sim is not None:\n raise ValueError(\"Sim argument set of plotting data results\")\n\n roster_dict = load_yaml(roster_yaml)\n\n astro_priors = args['astro_priors']\n channels = args['channels']\n\n sims = args['sims']\n for roster_name in list(roster_dict.keys()):\n rost_chans = select_channels(channels, roster_name)\n for astro_prior in astro_priors:\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n astro_prior=astro_prior,\n fullpath=True)\n input_path = NAME_FACTORY.stackedlimitsfile(**name_keys)\n for sim in sims:\n name_keys.update(sim_name=sim,\n seed='summary')\n bands_path = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)\n\n for chan in rost_chans:\n targ_key = \"%s:%s:%s:%s\" % (roster_name, astro_prior, sim, chan)\n output_path = os.path.join(ttype, 'results', \"final_%s_%s_%s_%s.png\" % (roster_name, astro_prior, sim, chan))\n logfile = make_nfs_path(output_path.replace('.png', '.log'))\n job_config = dict(infile=input_path,\n outfile=output_path,\n bands=bands_path,\n logfile=logfile,\n chan=chan)\n job_configs[targ_key] = job_config\n\n return job_configs\n\n\n\n\n\n\ndef register_classes():\n \"\"\"Register these classes with the `LinkFactory` \"\"\"\n PlotDMSpectra.register_class()\n PlotLimits.register_class()\n PlotLimits_SG.register_class()\n PlotMLEs.register_class()\n PlotDM.register_class()\n PlotDM_SG.register_class()\n PlotStackedDM_SG.register_class()\n PlotStackedLimits_SG.register_class()\n PlotControlLimits_SG.register_class()\n PlotControlMLEs_SG.register_class()\n PlotFinalLimits_SG.register_class()\n" ]
[ [ "numpy.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Aspirisha/l5kit
[ "40ed7576f803e83fc3f0714e6458635f9f6bfe60" ]
[ "l5kit/l5kit/data/map_api.py" ]
[ "from enum import IntEnum\nfrom functools import lru_cache\nfrom typing import Iterator, no_type_check, Sequence, Union\n\nimport numpy as np\nimport pymap3d as pm\n\nfrom l5kit.configs.config import load_metadata\nfrom l5kit.data import DataManager\n\nfrom ..geometry import transform_points\nfrom .proto.road_network_pb2 import GeoFrame, GlobalId, MapElement, MapFragment\n\n\nCACHE_SIZE = int(1e5)\nENCODING = \"utf-8\"\n\n\nclass InterpolationMethod(IntEnum):\n INTER_METER = 0 # fixed interpolation at a given step in meters\n INTER_ENSURE_LEN = 1 # ensure we always get the same number of elements\n\n\nclass TLFacesColors(IntEnum):\n RED = 0\n GREEN = 1\n YELLOW = 2\n\n\nclass MapAPI:\n def __init__(self, protobuf_map_path: str, world_to_ecef: np.ndarray):\n \"\"\"\n Interface to the raw protobuf map file with the following features:\n - access to element using ID is O(1);\n - access to coordinates in world ref system for a set of elements is O(1) after first access (lru cache)\n - object support iteration using __getitem__ protocol\n\n Args:\n protobuf_map_path (str): path to the protobuf file\n world_to_ecef (np.ndarray): transformation matrix from world coordinates to ECEF (dataset dependent)\n \"\"\"\n self.protobuf_map_path = protobuf_map_path\n self.ecef_to_world = np.linalg.inv(world_to_ecef)\n\n with open(protobuf_map_path, \"rb\") as infile:\n mf = MapFragment()\n mf.ParseFromString(infile.read())\n\n self.elements = mf.elements\n self.ids_to_el = {self.id_as_str(el.id): idx for idx, el in enumerate(self.elements)} # store a look-up table\n\n self.bounds_info = self.get_bounds() # store bound for semantic elements for fast look-up\n\n @staticmethod\n def from_cfg(data_manager: DataManager, cfg: dict) -> \"MapAPI\":\n \"\"\"Build a MapAPI object starting from a config file and a data manager\n\n :param data_manager: a data manager object ot resolve paths\n :param cfg: the config dict\n :return: a MapAPI object\n \"\"\"\n raster_cfg = cfg[\"raster_params\"]\n dataset_meta_key = raster_cfg[\"dataset_meta_key\"]\n\n semantic_map_filepath = data_manager.require(raster_cfg[\"semantic_map_key\"])\n dataset_meta = load_metadata(data_manager.require(dataset_meta_key))\n world_to_ecef = np.array(dataset_meta[\"world_to_ecef\"], dtype=np.float64)\n\n return MapAPI(semantic_map_filepath, world_to_ecef)\n\n @staticmethod\n @no_type_check\n def id_as_str(element_id: GlobalId) -> str:\n \"\"\"\n Get the element id as a string.\n Elements ids are stored as a variable len sequence of bytes in the protobuf\n\n Args:\n element_id (GlobalId): the GlobalId in the protobuf\n\n Returns:\n str: the id as a str\n \"\"\"\n return element_id.id.decode(ENCODING)\n\n @staticmethod\n def _undo_e7(value: float) -> float:\n \"\"\"\n Latitude and longitude are stored as value*1e7 in the protobuf for efficiency and guaranteed accuracy.\n Convert them back to float.\n\n Args:\n value (float): the scaled value\n\n Returns:\n float: the unscaled value\n \"\"\"\n return value / 1e7\n\n @no_type_check\n def unpack_deltas_cm(self, dx: Sequence[int], dy: Sequence[int], dz: Sequence[int], frame: GeoFrame) -> np.ndarray:\n \"\"\"\n Get coords in world reference system (local ENU->ECEF->world).\n See the protobuf annotations for additional information about how coordinates are stored\n\n Args:\n dx (Sequence[int]): X displacement in centimeters in local ENU\n dy (Sequence[int]): Y displacement in centimeters in local ENU\n dz (Sequence[int]): Z displacement in centimeters in local ENU\n frame (GeoFrame): geo-location information for the local ENU. It contains lat and long origin of the frame\n\n Returns:\n np.ndarray: array of shape (Nx3) with XYZ coordinates in world ref system\n\n \"\"\"\n x = np.cumsum(np.asarray(dx) / 100)\n y = np.cumsum(np.asarray(dy) / 100)\n z = np.cumsum(np.asarray(dz) / 100)\n frame_lat, frame_lng = self._undo_e7(frame.origin.lat_e7), self._undo_e7(frame.origin.lng_e7)\n xyz = np.stack(pm.enu2ecef(x, y, z, frame_lat, frame_lng, 0), axis=-1)\n xyz = transform_points(xyz, self.ecef_to_world)\n return xyz\n\n @staticmethod\n @no_type_check\n def is_lane(element: MapElement) -> bool:\n \"\"\"\n Check whether an element is a valid lane\n\n Args:\n element (MapElement): a proto element\n\n Returns:\n bool: True if the element is a valid lane\n \"\"\"\n return bool(element.element.HasField(\"lane\"))\n\n @lru_cache(maxsize=CACHE_SIZE)\n def get_lane_coords(self, element_id: str) -> dict:\n \"\"\"\n Get XYZ coordinates in world ref system for a lane given its id\n lru_cached for O(1) access\n\n Args:\n element_id (str): lane element id\n\n Returns:\n dict: a dict with the two boundaries coordinates as (Nx3) XYZ arrays\n \"\"\"\n element = self[element_id]\n assert self.is_lane(element)\n\n lane = element.element.lane\n left_boundary = lane.left_boundary\n right_boundary = lane.right_boundary\n\n xyz_left = self.unpack_deltas_cm(\n left_boundary.vertex_deltas_x_cm,\n left_boundary.vertex_deltas_y_cm,\n left_boundary.vertex_deltas_z_cm,\n lane.geo_frame,\n )\n xyz_right = self.unpack_deltas_cm(\n right_boundary.vertex_deltas_x_cm,\n right_boundary.vertex_deltas_y_cm,\n right_boundary.vertex_deltas_z_cm,\n lane.geo_frame,\n )\n\n return {\"xyz_left\": xyz_left, \"xyz_right\": xyz_right}\n\n @staticmethod\n def interpolate(xyz: np.ndarray, step: float, method: InterpolationMethod) -> np.ndarray:\n \"\"\"\n Interpolate points based on cumulative distances from the first one. Two modes are available:\n INTER_METER: interpolate using step as a meter value over cumulative distances (variable len result)\n INTER_ENSURE_LEN: interpolate using a variable step such that we always get step values\n Args:\n xyz (np.ndarray): XYZ coords\n step (float): param for the interpolation\n method (InterpolationMethod): method to use to interpolate\n\n Returns:\n np.ndarray: the new interpolated coordinates\n \"\"\"\n cum_dist = np.cumsum(np.linalg.norm(np.diff(xyz, axis=0), axis=-1))\n cum_dist = np.insert(cum_dist, 0, 0)\n\n if method == InterpolationMethod.INTER_ENSURE_LEN:\n step = int(step)\n assert step > 1, \"step must be at least 2 with INTER_ENSURE_LEN\"\n steps = np.linspace(cum_dist[0], cum_dist[-1], step)\n\n elif method == InterpolationMethod.INTER_METER:\n assert step > 0, \"step must be greater than 0 with INTER_FIXED\"\n steps = np.arange(cum_dist[0], cum_dist[-1], step)\n else:\n raise NotImplementedError(f\"interpolation method should be in {InterpolationMethod.__members__}\")\n\n xyz_inter = np.empty((len(steps), 3), dtype=xyz.dtype)\n xyz_inter[:, 0] = np.interp(steps, xp=cum_dist, fp=xyz[:, 0])\n xyz_inter[:, 1] = np.interp(steps, xp=cum_dist, fp=xyz[:, 1])\n xyz_inter[:, 2] = np.interp(steps, xp=cum_dist, fp=xyz[:, 2])\n return xyz_inter\n\n @lru_cache(maxsize=CACHE_SIZE)\n def get_lane_traffic_control_ids(self, element_id: str) -> set:\n lane = self[element_id].element.lane\n return set([MapAPI.id_as_str(la_tc) for la_tc in lane.traffic_controls])\n\n @lru_cache(maxsize=CACHE_SIZE)\n def get_lane_as_interpolation(self, element_id: str, step: float, method: InterpolationMethod) -> dict:\n \"\"\"\n Perform an interpolation of the left and right lanes and compute the midlane.\n See interpolate for details about the different interpolation methods\n\n Args:\n element_id (str): lane id\n step (float): step param for the method\n method (InterpolationMethod): one of the accepted methods\n\n Returns:\n dict: same as `get_lane_coords` but overwrite xyz values for the lanes\n \"\"\"\n lane_dict = self.get_lane_coords(element_id)\n xyz_left = lane_dict[\"xyz_left\"]\n xyz_right = lane_dict[\"xyz_right\"]\n\n lane_dict[\"xyz_left\"] = self.interpolate(xyz_left, step, method)\n lane_dict[\"xyz_right\"] = self.interpolate(xyz_right, step, method)\n\n # to compute midlane we average between left and right bounds\n # but to do that we need them to have the same numbers of points\n # if that's not the case (interpolation is not INTER_ENSURE_LEN) we interpolate again with that mode\n if method != InterpolationMethod.INTER_ENSURE_LEN:\n mid_steps = max(len(xyz_left), len(xyz_right))\n # recompute lanes using fixed length\n xyz_left = self.interpolate(xyz_left, mid_steps, InterpolationMethod.INTER_ENSURE_LEN)\n xyz_right = self.interpolate(xyz_right, mid_steps, InterpolationMethod.INTER_ENSURE_LEN)\n\n else:\n xyz_left = lane_dict[\"xyz_left\"]\n xyz_right = lane_dict[\"xyz_right\"]\n\n xyz_midlane = (xyz_left + xyz_right) / 2\n\n # interpolate xyz for midlane with the selected interpolation\n lane_dict[\"xyz_midlane\"] = self.interpolate(xyz_midlane, step, method)\n return lane_dict\n\n @staticmethod\n @no_type_check\n def is_crosswalk(element: MapElement) -> bool:\n \"\"\"\n Check whether an element is a valid crosswalk\n\n Args:\n element (MapElement): a proto element\n\n Returns:\n bool: True if the element is a valid crosswalk\n \"\"\"\n if not element.element.HasField(\"traffic_control_element\"):\n return False\n traffic_element = element.element.traffic_control_element\n return bool(traffic_element.HasField(\"pedestrian_crosswalk\") and traffic_element.points_x_deltas_cm)\n\n @lru_cache(maxsize=CACHE_SIZE)\n def get_crosswalk_coords(self, element_id: str) -> dict:\n \"\"\"\n Get XYZ coordinates in world ref system for a crosswalk given its id\n lru_cached for O(1) access\n\n Args:\n element_id (str): crosswalk element id\n\n Returns:\n dict: a dict with the polygon coordinates as an (Nx3) XYZ array\n \"\"\"\n element = self[element_id]\n assert self.is_crosswalk(element)\n traffic_element = element.element.traffic_control_element\n\n xyz = self.unpack_deltas_cm(\n traffic_element.points_x_deltas_cm,\n traffic_element.points_y_deltas_cm,\n traffic_element.points_z_deltas_cm,\n traffic_element.geo_frame,\n )\n\n return {\"xyz\": xyz}\n\n def is_traffic_light(self, element_id: str) -> bool:\n \"\"\"\n Check if the element is a traffic light\n Args:\n element_id (str): the id (utf-8 encode) of the element\n\n Returns:\n True if the element is a traffic light\n \"\"\"\n element = self[element_id]\n if not element.element.HasField(\"traffic_control_element\"):\n return False\n traffic_el = element.element.traffic_control_element\n return traffic_el.HasField(\"traffic_light\") is True\n\n @lru_cache(maxsize=CACHE_SIZE)\n def is_traffic_face(self, element_id: str) -> bool:\n \"\"\"\n Check if the element is a traffic light face (of any color)\n\n Args:\n element_id (str): the id (utf-8 encode) of the element\n Returns:\n True if the element is a traffic light face, False otherwise\n \"\"\"\n\n for color in TLFacesColors:\n color_name = color.name\n if self.is_traffic_face_color(element_id, color_name.lower()):\n return True\n return False\n\n def is_traffic_face_color(self, element_id: str, color: str) -> bool:\n \"\"\"\n Check if the element is a traffic light face of the given color\n\n Args:\n element_id (str): the id (utf-8 encode) of the element\n color (str): the color to check\n Returns:\n True if the element is a traffic light face with the given color\n \"\"\"\n element = self[element_id]\n if not element.element.HasField(\"traffic_control_element\"):\n return False\n traffic_el = element.element.traffic_control_element\n if (\n traffic_el.HasField(f\"signal_{color}_face\")\n or traffic_el.HasField(f\"signal_left_arrow_{color}_face\")\n or traffic_el.HasField(f\"signal_right_arrow_{color}_face\")\n or traffic_el.HasField(f\"signal_upper_left_arrow_{color}_face\")\n or traffic_el.HasField(f\"signal_upper_right_arrow_{color}_face\")\n ):\n return True\n return False\n\n @lru_cache(maxsize=CACHE_SIZE)\n def get_color_for_face(self, face_id: str) -> str:\n \"\"\"\n Utility function. It calls `is_traffic_face_color` for a set of colors until it gets an answer.\n If no color is found, then `face_id` is not the id of a traffic light face (and we raise ValueError).\n\n Args:\n face_id (str): the element id\n Returns:\n str: the color as string for this traffic face\n \"\"\"\n for color in TLFacesColors:\n color_name = color.name\n if self.is_traffic_face_color(face_id, color_name.lower()):\n return color_name\n raise ValueError(f\"Face {face_id} has no valid color among {TLFacesColors.__members__}\")\n\n def get_bounds(self) -> dict:\n \"\"\"\n For each elements of interest returns bounds [[min_x, min_y],[max_x, max_y]] and proto ids\n Coords are computed by the MapAPI and, as such, are in the world ref system.\n\n Returns:\n dict: keys are classes of elements, values are dict with `bounds` and `ids` keys\n \"\"\"\n lanes_ids = []\n crosswalks_ids = []\n\n lanes_bounds = np.empty((0, 2, 2), dtype=np.float) # [(X_MIN, Y_MIN), (X_MAX, Y_MAX)]\n crosswalks_bounds = np.empty((0, 2, 2), dtype=np.float) # [(X_MIN, Y_MIN), (X_MAX, Y_MAX)]\n\n for element in self.elements:\n element_id = MapAPI.id_as_str(element.id)\n\n if self.is_lane(element):\n lane = self.get_lane_coords(element_id)\n x_min = min(np.min(lane[\"xyz_left\"][:, 0]), np.min(lane[\"xyz_right\"][:, 0]))\n y_min = min(np.min(lane[\"xyz_left\"][:, 1]), np.min(lane[\"xyz_right\"][:, 1]))\n x_max = max(np.max(lane[\"xyz_left\"][:, 0]), np.max(lane[\"xyz_right\"][:, 0]))\n y_max = max(np.max(lane[\"xyz_left\"][:, 1]), np.max(lane[\"xyz_right\"][:, 1]))\n\n lanes_bounds = np.append(lanes_bounds, np.asarray([[[x_min, y_min], [x_max, y_max]]]), axis=0)\n lanes_ids.append(element_id)\n\n if self.is_crosswalk(element):\n crosswalk = self.get_crosswalk_coords(element_id)\n x_min, y_min = np.min(crosswalk[\"xyz\"], axis=0)[:2]\n x_max, y_max = np.max(crosswalk[\"xyz\"], axis=0)[:2]\n\n crosswalks_bounds = np.append(\n crosswalks_bounds, np.asarray([[[x_min, y_min], [x_max, y_max]]]), axis=0,\n )\n crosswalks_ids.append(element_id)\n\n return {\n \"lanes\": {\"bounds\": lanes_bounds, \"ids\": lanes_ids},\n \"crosswalks\": {\"bounds\": crosswalks_bounds, \"ids\": crosswalks_ids},\n }\n\n @no_type_check\n def __getitem__(self, item: Union[int, str, bytes]) -> MapElement:\n if isinstance(item, str):\n return self.elements[self.ids_to_el[item]]\n elif isinstance(item, int):\n return self.elements[item]\n elif isinstance(item, bytes):\n return self.elements[self.ids_to_el[item.decode(ENCODING)]]\n else:\n raise TypeError(\"only str, bytes and int are allowed in API __getitem__\")\n\n def __len__(self) -> int:\n return len(self.elements)\n\n def __iter__(self) -> Iterator:\n for i in range(len(self)):\n yield self[i]\n" ]
[ [ "numpy.linspace", "numpy.min", "numpy.linalg.inv", "numpy.asarray", "numpy.arange", "numpy.max", "numpy.diff", "numpy.insert", "numpy.interp", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rafelafrance/nitfix
[ "4f895ce84ae8d93f2df7fa3772146dd5d9e02643" ]
[ "nitfix/ingest_nfn_data.py" ]
[ "\"\"\"Extract, transform, and load data related to Notes from Nature data.\"\"\"\n\nimport os\nimport re\nimport string\n\nimport pandas as pd\n\nimport lib.db as db\nimport lib.util as util\n\n\ndef ingest_nfn_data():\n \"\"\"Ingest data related to the taxonomy.\"\"\"\n cxn = db.connect()\n\n exps = [get_expedition(e) for e in util.EXPEDITIONS]\n nfn = pd.concat(exps, ignore_index=True).fillna('')\n nfn = fixup_data(nfn)\n nfn = update_collector_data(nfn)\n\n create_nfn_table(cxn, nfn)\n\n\ndef get_expedition(file_name):\n \"\"\"Get NitFix expedition data.\"\"\"\n csv_path = os.fspath(util.EXPEDITION_DATA / file_name)\n nfn = pd.read_csv(csv_path, dtype=str).fillna('')\n nfn['workflow_id'] = file_name[:4]\n\n columns = {}\n for old in nfn.columns:\n new = old.lower()\n new = new.replace('⁰', 'deg')\n new = new.replace(\"''\", 'sec')\n new = new.replace(\"'\", 'min')\n new = re.sub(r'\\W+', '_', new)\n new = re.sub(r'^_|_$', '', new)\n columns[old] = new\n if 'subject_qr_code' in columns:\n columns['subject_qr_code'] = 'sample_id'\n if 'subject_sample_id' in columns:\n columns['subject_sample_id'] = 'sample_id'\n nfn.rename(columns=columns, copy=False, inplace=True)\n\n return nfn\n\n\ndef fixup_data(nfn):\n \"\"\"Merge duplicate sample IDs into one record.\"\"\"\n aggs = {c: agg_concat for c in nfn.columns}\n dup_ids = nfn.sample_id.duplicated(keep=False)\n dups = nfn.loc[dup_ids].groupby('sample_id').agg(aggs)\n\n nfn = pd.concat([nfn[~dup_ids], dups])\n nfn = nfn.set_index('sample_id')\n return nfn\n\n\ndef agg_concat(group):\n \"\"\"Concatenate the group into a string of unique values.\"\"\"\n group = [g for g in group if g]\n return '|'.join(set(group))\n\n\ndef update_collector_data(nfn):\n \"\"\"Normalize the collector data as much as possible.\"\"\"\n nfn['collection_no'] = nfn.apply(get_collection_no, axis=1)\n nfn['collected_by'] = nfn.apply(\n lambda x: x.collected_by_first_collector_last_name_only\n or x.primary_collector_last_first_middle, axis=1)\n nfn['last_name'] = nfn.collected_by.apply(get_last_name)\n return nfn\n\n\ndef get_last_name(collected_by):\n \"\"\"Extract the last name from the collected by field.\"\"\"\n if not collected_by:\n return ''\n\n collected_by = collected_by.split(',')\n if not collected_by or not collected_by[0]:\n return ''\n\n last_name = collected_by[0]\n\n while (last_name[-1] in string.punctuation\n or (len(last_name) > 4 and last_name[-2] == ' ')):\n if last_name[-1] in string.punctuation:\n last_name = last_name[:-1]\n if len(last_name) > 4 and last_name[-2] == ' ':\n last_name = last_name[:-2]\n\n return last_name\n\n\ndef get_collection_no(row):\n \"\"\"Get the collection number from an expedition row.\"\"\"\n if row.get('collector_number'):\n return row.collector_number\n num = row.get('collector_number_numeric_only', '')\n verb = row.get('collector_number_verbatim', '')\n if verb and len(num) < 2:\n return row.collector_number_verbatim\n return row.collector_number_numeric_only\n\n\ndef create_nfn_table(cxn, nfn):\n \"\"\"Create Notes from Nature data table.\"\"\"\n nfn.to_sql('nfn_data', cxn, if_exists='replace')\n\n cxn.execute(\"\"\"\n CREATE UNIQUE INDEX nfn_data_sample_id ON nfn_data (sample_id);\n \"\"\")\n\n\nif __name__ == '__main__':\n ingest_nfn_data()\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]