repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
xierensong/learnPython
[ "33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455", "33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455", "33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455", "33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455" ]
[ "case/tfP.py", "example/Artificial_Intelligence_with_Python_Code/Chapter 5/code/k_nearest_neighbors.py", "case1130/graph.py", "case/array.py" ]
[ "import tensorflow as tf\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=5)\nmodel.evaluate(x_test, y_test)", "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import NearestNeighbors\n\n# Input data\nX = np.array([[2.1, 1.3], [1.3, 3.2], [2.9, 2.5], [2.7, 5.4], [3.8, 0.9], \n [7.3, 2.1], [4.2, 6.5], [3.8, 3.7], [2.5, 4.1], [3.4, 1.9],\n [5.7, 3.5], [6.1, 4.3], [5.1, 2.2], [6.2, 1.1]])\n\n# Number of nearest neighbors\nk = 5\n\n# Test datapoint \ntest_datapoint = [4.3, 2.7]\n\n# Plot input data \nplt.figure()\nplt.title('Input data')\nplt.scatter(X[:,0], X[:,1], marker='o', s=75, color='black')\n\n# Build K Nearest Neighbors model\nknn_model = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X)\ndistances, indices = knn_model.kneighbors([test_datapoint])\n\nprint(indices)\nprint(distances)\n\n# Print the 'k' nearest neighbors\nprint(\"\\nK Nearest Neighbors:\")\nfor rank, index in enumerate(indices[0][:k], start=1):\n print(str(rank) + \" ==>\", X[index])\n\n# Visualize the nearest neighbors along with the test datapoint \nplt.figure()\nplt.title('Nearest neighbors')\nplt.scatter(X[:, 0], X[:, 1], marker='o', s=75, color='k')\nplt.scatter(X[indices][0][:][:, 0], X[indices][0][:][:, 1], \n marker='o', s=250, color='k', facecolors='none')\nplt.scatter(test_datapoint[0], test_datapoint[1],\n marker='x', s=75, color='k')\n\nplt.show()\n", "import networkx as nx\nimport operator\n\nimport matplotlib.pyplot as plt\n\ng = nx.read_weighted_edgelist('data/edgelist24.csv')\ndegree = nx.degree(g)\nnumNodes = nx.number_of_nodes(g)\nnumEdges = nx.number_of_edges(g)\nminDegree = min([item[1] for item in degree])\nmaxDegree = max([item[1] for item in degree])\n\nprint(degree)\nprint(numNodes)\nprint(numEdges)\nprint(minDegree)\nprint(maxDegree)\n\ndegreeSorted = sorted(degree, key=operator.itemgetter(1), reverse=True)\nprint(degreeSorted[0:9])\n\nnx.draw(g)\nplt.show()\nplt.savefig('path.png')\n", "# coding=utf-8\n\nimport numpy as np\n\na_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n\nprint('a list', a_list)\n\na_array = np.array(a_list)\n\nprint('a array', a_array)\n\na_new_list = a_array.tolist()\n\nprint('a new list', a_new_list)" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dropout" ], [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "sklearn.neighbors.NearestNeighbors", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.savefig" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adeline-cs/GTR
[ "889b0cda8a3c2b061371c4a63ea871821ddcd3d7" ]
[ "lib/evaluators.py" ]
[ "from __future__ import print_function, absolute_import\nimport time\nfrom time import gmtime, strftime\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport torch\nimport numpy as np\nfrom random import randint\nfrom PIL import Image\nimport sys\nfrom . import metric\nfrom metric import Accuracy, EditDistance, RecPostProcess\nfrom tqdm import tqdm\n\nclass BaseEvaluator(object):\n def __init__(self, model, metric, use_cuda = True):\n super(BaseEvaluator, self).__init__()\n self.model = model\n self.metric = metric\n self.use_cuda = use_cuda\n self.device = torch.device('cuda' if use_cuda else 'cpu')\n self.cos_sim = torch.nn.CosineSimilarity(dim=1, eps=1e-6)\n\n def evaluate(self, data_loader, step = 1, print_freq =1, tfLogger = None, dataset = None, vis_dir = None):\n self.model.eval()\n # batch_time =\n # data_time =\n\n # forward the network\n image, outputs, targets, embeds, losses = [], {}, [], [], []\n file_names = []\n end = time.time()\n for i, input in enumerate(tqdm(data_loader)):\n data_time.update(time.time()-end)\n input_dict = self._parse_data(input)\n output_dict = self._forward(input_dict)\n batch_size = input_dict['images'].size(0)\n total_loss_batch = 0\n for k, loss in output_dict['images'].item():\n loss = loss.mean(dim = 0, keepdim = True)\n total_loss_batch += loss.item() * batch_size\n image.append(input_dict['images'])\n targets.append(input_dict['rec_tragets'])\n embeds.append(input_dict['rec_embeds'])\n losses.append(total_loss_batch)\n ## the normal situation is without lexicon, especially for IIIT5k, IC03, IC13\n if global_args.evaluate_with_lexicon:\n file_names = input_dict['file_name']\n for k, v in output_dict['output'].item():\n if k not in outputs:\n outputs[k] = []\n outputs[k].append(v.cpu())\n batch_time.update(time.time()-end)\n\n if (i+1) % print_freq == 0:\n print('[{}]\\t'\n 'Evaluation : {}/{}\\t'\n 'Time: {:.3f} ({:.3f})\\t'\n 'Data: {:.3f} ({:.3f})\\t'\n .format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n i+1, len(data_loader),\n batch_time.val, batch_time.avg,\n data_time.val, data_time.avg\n\n ))\n if not global_args.keep_ratio():\n image = torch.cat(image)\n num_sample = image.size(0)\n else:\n num_sample = sum([subimage.size(0) for subimage in image])\n targets = torch.cat(targets)\n losses = np.sum(losses)/(1.0*num_sample)\n for k , v in outputs.items():\n outputs[k] = torch.cat(outputs[k])\n\n # save info for recognition\n if 'pred_rec' in outputs:\n # evaluation with metric\n if global_args.evaluate_with_lexicon:\n eval_res = metrics_factory[self.metric + '_with_lexicon'](outputs['pred_rec'], targets, dataset,\n file_names)\n print('lexicon0: {0}, {1:.3f}'.format(self.metric, eval_res[0]))\n print('lexicon50: {0}, {1:.3f}'.format(self.metric, eval_res[1]))\n print('lexicon1k: {0}, {1:.3f}'.format(self.metric, eval_res[2]))\n print('lexiconfull: {0}, {1:.3f}'.format(self.metric, eval_res[3]))\n eval_res = eval_res[0]\n else:\n eval_res = metrics_factory[self.metric](outputs['pred_rec'], targets, dataset)\n print('lexicon0: {0}: {1:.3f}'.format(self.metric, eval_res))\n pred_list, targ_list, score_list = RecPostProcess(outputs['pred_rec'], targets, outputs['pred_rec_score'],\n dataset)\n with open(\"embed_v1_results.txt\", \"w\", encoding=\"utf-8\") as f:\n for pred, targ in zip(pred_list, targ_list):\n f.write(\"{} {}\\n\".format(pred, targ))\n if 'pred_embed' in outputs:\n output_cos_sim = self.cos_sim(outputs['pred_embed'], torch.cat(embeds).cpu())\n output_cos_sim = torch.mean(torch.abs(output_cos_sim))\n print(\"Emebedding vector cos similarity: {:3f}\".format(output_cos_sim.item()))\n if tfLogger is not None:\n # (1) Log the scalar values\n info = {\n 'loss': losses,\n self.metric: eval_res,\n }\n for tag, value in info.items():\n tfLogger.scalar_summary(tag, value, step)\n\n # ====== Visualization ======#\n if vis_dir is not None:\n # recognition_vis(images, outputs['pred_rec'], targets, score_list, dataset, vis_dir)\n stn_vis(images, outputs['rectified_images'], outputs['ctrl_points'], outputs['pred_rec'],\n targets, score_list, outputs['pred_score'] if 'pred_score' in outputs else None, dataset, vis_dir)\n return eval_res\n\n # NotImplementedError, ValueError will represent what , the framework of python\n def _parse_data(self, input):\n raise NotImplementedError\n def _forward(self, input):\n raise NotImplementedError\n\nclass Evaluator(BaseEvaluator):\n def _parse_data(self, input):\n input_dict = {}\n if global_args.evaluate_with_lexicon:\n imgs, label_encs, lengths, file_name = inputs\n else:\n imgs, label_encs, lengths, embeds_ = inputs\n with torch.no_grad():\n images = imgs.to(self.device)\n if label_encs is None:\n labels = label_encs.to(self.device)\n if embeds_ is not None:\n embeds_ = embeds_.to(self.device)\n input_dict['images'] = images\n input_dict['rec_tragets'] = labels\n input_dict['rec_lengths'] = lengths\n input_dict['rec_embeds'] = embeds\n if global_args.evaluate_with_lexicon:\n input_dict['file_name'] = file_name\n return input_dict\n def _forward(self, input_dict):\n self.model.eval()\n with torch.no_grad:\n output_dict = self.model(input_dict)\n return output_dict" ]
[ [ "torch.abs", "torch.cat", "torch.nn.CosineSimilarity", "torch.no_grad", "torch.device", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kevint324/tensorboard
[ "cbc5b1f2d74236d89baa9d4810c166e4cee973a9" ]
[ "tensorboard/plugins/core/core_plugin_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the TensorBoard core endpoints.\"\"\"\n\n\nimport collections.abc\nimport contextlib\nimport io\nimport json\nimport os\nfrom unittest import mock\nimport zipfile\n\nimport tensorflow as tf\nfrom werkzeug import test as werkzeug_test\nfrom werkzeug import wrappers\n\nfrom tensorboard.backend import application\nfrom tensorboard.backend.event_processing import data_provider\nfrom tensorboard.backend.event_processing import (\n plugin_event_multiplexer as event_multiplexer,\n)\nfrom tensorboard.data import provider\nfrom tensorboard.plugins import base_plugin\nfrom tensorboard.plugins.core import core_plugin\nfrom tensorboard.util import test_util\n\nFAKE_INDEX_HTML = b\"<!doctype html><title>fake-index</title>\"\nFAKE_INDEX_JS = b\"console.log('hello');\"\nNO_CACHE_CONTROL_VALUE = \"no-cache, must-revalidate\"\nONE_DAY_CACHE_CONTROL_VALUE = \"private, max-age=86400\"\n\n\nclass FakeFlags(object):\n def __init__(\n self,\n bind_all=False,\n host=None,\n inspect=False,\n version_tb=False,\n logdir=\"\",\n logdir_spec=\"\",\n event_file=\"\",\n db=\"\",\n path_prefix=\"\",\n generic_data=\"true\",\n grpc_data_provider=\"\",\n reuse_port=False,\n ):\n self.bind_all = bind_all\n self.host = host\n self.inspect = inspect\n self.version_tb = version_tb\n self.logdir = logdir\n self.logdir_spec = logdir_spec\n self.event_file = event_file\n self.db = db\n self.path_prefix = path_prefix\n self.generic_data = generic_data\n self.grpc_data_provider = grpc_data_provider\n self.reuse_port = reuse_port\n\n\nclass CorePluginFlagsTest(tf.test.TestCase):\n def testFlag(self):\n loader = core_plugin.CorePluginLoader()\n loader.fix_flags(FakeFlags(version_tb=True))\n loader.fix_flags(FakeFlags(inspect=True, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=True, event_file=\"/tmp/event.out\"))\n loader.fix_flags(FakeFlags(inspect=False, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=False, db=\"sqlite:foo\"))\n # User can pass both, although the behavior is not clearly defined.\n loader.fix_flags(\n FakeFlags(inspect=False, logdir=\"/tmp\", db=\"sqlite:foo\")\n )\n\n logdir_or_db_req = r\"A logdir or db must be specified\"\n one_of_event_or_logdir_req = (\n r\"Must specify either --logdir.*but not both.$\"\n )\n event_or_logdir_req = r\"Must specify either --logdir or --event_file.$\"\n\n with self.assertRaisesRegex(ValueError, event_or_logdir_req):\n loader.fix_flags(FakeFlags(inspect=True))\n with self.assertRaisesRegex(ValueError, one_of_event_or_logdir_req):\n loader.fix_flags(\n FakeFlags(\n inspect=True, logdir=\"/tmp\", event_file=\"/tmp/event.out\"\n )\n )\n with self.assertRaisesRegex(ValueError, logdir_or_db_req):\n loader.fix_flags(FakeFlags(inspect=False))\n with self.assertRaisesRegex(ValueError, logdir_or_db_req):\n loader.fix_flags(\n FakeFlags(inspect=False, event_file=\"/tmp/event.out\")\n )\n\n def testPathPrefix_stripsTrailingSlashes(self):\n loader = core_plugin.CorePluginLoader()\n for path_prefix in (\"/hello\", \"/hello/\", \"/hello//\", \"/hello///\"):\n flag = FakeFlags(\n inspect=False, logdir=\"/tmp\", path_prefix=path_prefix\n )\n loader.fix_flags(flag)\n self.assertEqual(\n flag.path_prefix,\n \"/hello\",\n \"got %r (input %r)\" % (flag.path_prefix, path_prefix),\n )\n\n def testPathPrefix_mustStartWithSlash(self):\n loader = core_plugin.CorePluginLoader()\n flag = FakeFlags(inspect=False, logdir=\"/tmp\", path_prefix=\"noslash\")\n with self.assertRaises(base_plugin.FlagsError) as cm:\n loader.fix_flags(flag)\n msg = str(cm.exception)\n self.assertIn(\"must start with slash\", msg)\n self.assertIn(repr(\"noslash\"), msg)\n\n\nclass CorePluginTest(tf.test.TestCase):\n def setUp(self):\n super().setUp()\n self.multiplexer = event_multiplexer.EventMultiplexer()\n self.logdir = self.get_temp_dir()\n provider = data_provider.MultiplexerDataProvider(\n self.multiplexer, self.logdir\n )\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=self.logdir,\n data_provider=provider,\n window_title=\"title foo\",\n )\n self.plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n def _add_run(self, run_name):\n run_path = os.path.join(self.logdir, run_name)\n with test_util.FileWriter(run_path) as writer:\n writer.add_test_summary(\"foo\")\n self.multiplexer.AddRunsFromDirectory(self.logdir)\n self.multiplexer.Reload()\n\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testRoutesProvided(self):\n \"\"\"Tests that the plugin offers the correct routes.\"\"\"\n routes = self.plugin.get_plugin_apps()\n self.assertIsInstance(routes[\"/data/logdir\"], collections.abc.Callable)\n self.assertIsInstance(routes[\"/data/runs\"], collections.abc.Callable)\n\n def testIndex_returnsActualHtml(self):\n \"\"\"Test the format of the root / endpoint.\"\"\"\n response = self.server.get(\"/\")\n self.assertEqual(200, response.status_code)\n self.assertStartsWith(response.headers.get(\"Content-Type\"), \"text/html\")\n html = response.get_data()\n self.assertEqual(\n html,\n b'<!doctype html><meta name=\"tb-relative-root\" content=\"./\">'\n + FAKE_INDEX_HTML,\n )\n\n def test_js_no_cache(self):\n response = self.server.get(\"/index.js?foo=bar\")\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n NO_CACHE_CONTROL_VALUE, response.headers.get(\"Cache-Control\")\n )\n\n def test_js_cache(self):\n response = self.server.get(\"/index.js?_file_hash=meow\")\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n ONE_DAY_CACHE_CONTROL_VALUE, response.headers.get(\"Cache-Control\")\n )\n\n def test_html_no_cache(self):\n response = self.server.get(\"/index.html?_file_hash=meow\")\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n NO_CACHE_CONTROL_VALUE, response.headers.get(\"Cache-Control\")\n )\n\n def testDataPaths_disableAllCaching(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n for path in (\"/data/runs\", \"/data/logdir\"):\n response = self.server.get(path)\n self.assertEqual(200, response.status_code, msg=path)\n self.assertEqual(\"0\", response.headers.get(\"Expires\"), msg=path)\n\n def testEnvironmentForWindowTitle(self):\n \"\"\"Test that the environment route correctly returns the window\n title.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"window_title\"], \"title foo\")\n\n def testEnvironmentForLogdir(self):\n \"\"\"Test that the environment route correctly returns the logdir.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], self.get_temp_dir())\n\n def testEnvironmentWithExperimentMetadata(self):\n class FakeDataProvider(object):\n def experiment_metadata(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return provider.ExperimentMetadata(\n data_location=\"/tmp/logs\",\n experiment_name=\"Experiment #5 (実験#5)\",\n experiment_description=\"Take five (😊)\",\n creation_time=1234.5,\n )\n\n self.context = base_plugin.TBContext(\n flags=FakeFlags(generic_data=\"true\"),\n data_provider=FakeDataProvider(),\n )\n\n self.plugin = core_plugin.CorePlugin(self.context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], \"/tmp/logs\")\n self.assertEqual(parsed_object[\"window_title\"], None)\n self.assertEqual(\n parsed_object[\"experiment_name\"], \"Experiment #5 (実験#5)\"\n )\n self.assertEqual(\n parsed_object[\"experiment_description\"], \"Take five (😊)\"\n )\n self.assertEqual(parsed_object[\"creation_time\"], 1234.5)\n\n def testEnvironmentDebugOffByDefault(self):\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertNotIn(\"debug\", parsed_object)\n\n def testEnvironmentDebugOnExplicitly(self):\n multiplexer = event_multiplexer.EventMultiplexer()\n logdir = self.get_temp_dir()\n provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=logdir,\n data_provider=provider,\n window_title=\"title foo\",\n )\n plugin = core_plugin.CorePlugin(context, include_debug_info=True)\n app = application.TensorBoardWSGI([plugin])\n server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(server, \"/data/environment\")\n self.assertIn(\"debug\", parsed_object)\n\n def testLogdir(self):\n \"\"\"Test the format of the data/logdir endpoint.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/logdir\")\n self.assertEqual(parsed_object, {\"logdir\": self.get_temp_dir()})\n\n def testRuns(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n self._add_run(\"run1\")\n run_json = self._get_json(self.server, \"/data/runs\")\n self.assertEqual(run_json, [\"run1\"])\n\n def testRunsAppendOnly(self):\n \"\"\"Test that new runs appear after old ones in /data/runs.\"\"\"\n fake_wall_times = {\n \"run1\": 1234.0,\n \"avocado\": 2345.0,\n \"zebra\": 3456.0,\n \"ox\": 4567.0,\n \"mysterious\": None,\n \"enigmatic\": None,\n }\n\n def FirstEventTimestamp_stub(run_name):\n matches = [\n candidate_name\n for candidate_name in fake_wall_times\n if run_name.endswith(candidate_name)\n ]\n self.assertEqual(len(matches), 1, \"%s (%s)\" % (matches, run_name))\n wall_time = fake_wall_times[matches[0]]\n if wall_time is None:\n raise ValueError(\"No event timestamp could be found\")\n else:\n return wall_time\n\n with mock.patch.object(\n self.multiplexer, \"FirstEventTimestamp\"\n ) as mock_first_event_timestamp:\n mock_first_event_timestamp.side_effect = FirstEventTimestamp_stub\n # Start with a single run.\n self._add_run(\"run1\")\n\n # Add one run: it should come last.\n self._add_run(\"avocado\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\"],\n )\n\n # Add another run: it should come last, too.\n self._add_run(\"zebra\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\"],\n )\n\n # And maybe there's a run for which we somehow have no timestamp.\n self._add_run(\"mysterious\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"mysterious\"],\n )\n\n # Add another timestamped run: it should come before the timestamp-less one.\n self._add_run(\"ox\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"mysterious\"],\n )\n\n # Add another timestamp-less run, lexicographically before the other one:\n # it should come after all timestamped runs but first among timestamp-less.\n self._add_run(\"enigmatic\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"enigmatic\", \"mysterious\"],\n )\n\n def testNotifications(self):\n \"\"\"Test the format of the /data/notifications endpoint.\"\"\"\n notifications_json = self._get_json(self.server, \"/data/notifications\")\n self.assertEqual(notifications_json, {\"notifications\": []})\n\n\nclass CorePluginPathPrefixTest(tf.test.TestCase):\n def _send_request(self, path_prefix, pathname):\n multiplexer = event_multiplexer.EventMultiplexer()\n logdir = self.get_temp_dir()\n provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=logdir,\n data_provider=provider,\n window_title=\"\",\n flags=FakeFlags(path_prefix=path_prefix),\n )\n plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([plugin], path_prefix=path_prefix)\n server = werkzeug_test.Client(app, wrappers.BaseResponse)\n return server.get(pathname)\n\n def _assert_index(self, response, expected_tb_relative_root):\n self.assertEqual(200, response.status_code)\n self.assertStartsWith(response.headers.get(\"Content-Type\"), \"text/html\")\n html = response.get_data()\n\n expected_meta = (\n '<!doctype html><meta name=\"tb-relative-root\" content=\"%s\">'\n % expected_tb_relative_root\n ).encode()\n self.assertEqual(\n html,\n expected_meta + FAKE_INDEX_HTML,\n )\n\n def testIndex_no_path_prefix(self):\n self._assert_index(self._send_request(\"\", \"/\"), \"./\")\n self._assert_index(self._send_request(\"\", \"/index.html\"), \"./\")\n\n def testIndex_path_prefix_foo(self):\n self._assert_index(self._send_request(\"/foo\", \"/foo/\"), \"./\")\n self._assert_index(self._send_request(\"/foo\", \"/foo/index.html\"), \"./\")\n\n def testIndex_path_prefix_foo_exp_route(self):\n self._assert_index(\n self._send_request(\"/foo\", \"/foo/experiment/123/\"), \"../../\"\n )\n\n def testIndex_path_prefix_foo_incorrect_route(self):\n self.assertEqual(\n 404, (self._send_request(\"/foo\", \"/foo/meow/\").status_code)\n )\n self.assertEqual(404, (self._send_request(\"/foo\", \"/\").status_code))\n self.assertEqual(\n 404, (self._send_request(\"/foo\", \"/index.html\").status_code)\n )\n\n # Missing trailing \"/\" causes redirection.\n self.assertEqual(301, (self._send_request(\"/foo\", \"/foo\").status_code))\n self.assertEqual(\n 301, (self._send_request(\"/foo\", \"/foo/experiment/123\").status_code)\n )\n\n def testIndex_path_prefix_foo_bar(self):\n self._assert_index(self._send_request(\"/foo/bar\", \"/foo/bar/\"), \"./\")\n self._assert_index(\n self._send_request(\"/foo/bar\", \"/foo/bar/index.html\"), \"./\"\n )\n\n def testIndex_path_prefix_foo_bar_exp_route(self):\n self._assert_index(\n self._send_request(\"/foo/bar\", \"/foo/bar/experiment/123/\"), \"../../\"\n )\n\n\ndef get_test_assets_zip_provider():\n memfile = io.BytesIO()\n with zipfile.ZipFile(\n memfile, mode=\"w\", compression=zipfile.ZIP_DEFLATED\n ) as zf:\n zf.writestr(\"index.html\", FAKE_INDEX_HTML)\n zf.writestr(\"index.js\", FAKE_INDEX_JS)\n return lambda: contextlib.closing(io.BytesIO(memfile.getvalue()))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
veya2ztn/mltool
[ "4ed151152845ebe3de128e1f53c478581c1492e4", "4ed151152845ebe3de128e1f53c478581c1492e4", "4ed151152845ebe3de128e1f53c478581c1492e4" ]
[ "ModelArchi/GANModel/SNdcgan.py", "torch_complex/complex_operation.py", "cplxmodule/tests/test_utils.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import autograd\nimport time as t\nimport os\nfrom itertools import chain\nfrom torchvision import utils\nfrom .spectral_normalization import SpectralNorm\n\nclass WassersteinLoss(torch.nn.Module):\n def forward(self, x , target):\n loss = -target.mean()*x.mean()\n return loss\n\nclass Generator(torch.nn.Module):\n def __init__(self, channels):\n super().__init__()\n self.main_module = nn.Sequential(\n\n nn.ConvTranspose2d(in_channels=100, out_channels=1024, kernel_size=4, stride=1, padding=0),\n nn.BatchNorm2d(num_features=1024),\n nn.ReLU(True),\n\n\n nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(num_features=512),\n nn.ReLU(True),\n\n\n nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(num_features=256),\n nn.ReLU(True),\n\n\n nn.ConvTranspose2d(in_channels=256, out_channels=channels, kernel_size=4, stride=2, padding=1))\n\n\n self.output = nn.Tanh()\n\n def forward(self, x):\n x = self.main_module(x)\n return self.output(x)\n\nclass Discriminator(torch.nn.Module):\n def __init__(self, channels,version=\"DCGAN_M\"):\n super().__init__()\n self.version = version\n self.main_module = nn.Sequential(\n\n SpectralNorm(nn.Conv2d(in_channels=1, out_channels=256, kernel_size=3, stride=1, padding=1)),\n nn.LeakyReLU(0.2, inplace=True),\n\n\n SpectralNorm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1)),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True),\n\n\n SpectralNorm(nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=4, stride=2, padding=1)),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.2, inplace=True),\n\n\n )\n\n if version == \"DCGAN_L\":\n self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)))\n self.metric = torch.nn.BCEWithLogitsLoss()\n elif version == \"WGAN_GP\":\n self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)))\n self.metric = WassersteinLoss()\n else:\n self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)),\n nn.Sigmoid())\n if version == \"DCGAN\":self.metric = torch.nn.BCELoss()\n elif version == \"DCGAN_M\":self.metric = torch.nn.MSELoss()\n else:\n raise NotImplementedError\n def forward(self, x, target=None):\n x = self.main_module(x)\n x = self.output(x)\n return x.reshape(x.size(0),x.size(1)) #(b,1)\n\n def calculate_gradient_penalty(self, real_images, fake_images,GP_lambda= 10):\n batch_size = len(real_images)\n device = next(self.parameters()).device\n\n eta = torch.FloatTensor(batch_size,1,1,1).uniform_(0,1)\n eta = eta.expand(batch_size, real_images.size(1), real_images.size(2), real_images.size(3))\n eta = eta.to(device)\n\n interpolated = eta * real_images + ((1 - eta) * fake_images)\n interpolated = interpolated.to(device)\n interpolated = eta * real_images + ((1 - eta) * fake_images)\n\n interpolated = Variable(interpolated, requires_grad=True)\n prob_interpolated = self(interpolated)\n gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,\n grad_outputs=torch.ones(prob_interpolated.size()).to(device),\n create_graph=True, retain_graph=True)[0]\n grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * GP_lambda\n return grad_penalty\n\nclass Binary_Checker(nn.Module):\n def __init__(self):\n super().__init__()\n self.weight = nn.Parameter(torch.Tensor(1))\n def forward(self,x):\n\n shape=tuple(range(1,len(x.shape)))\n return (x**2).mean(shape).unsqueeze(1)\n\nclass DCGAN_MODEL(object):\n def __init__(self, args):\n print(\"DCGAN model initalization.\")\n self.G = Generator(args.channels)\n if args.GAN_TYPE == \"ForceBINARY\":\n self.D = Binary_Checker()\n else:\n self.D = Discriminator(args.channels,args.GAN_TYPE)\n self.D.version = args.GAN_TYPE\n self.C = args.channels\n self.check_cuda(True)\n\n def check_cuda(self, cuda_flag=False):\n print(cuda_flag)\n if cuda_flag:\n self.cuda_index = 0\n self.cuda = True\n self.D.cuda(self.cuda_index)\n self.G.cuda(self.cuda_index)\n print(\"Cuda enabled flag: {}\".format(self.cuda))\n else:\n self.cuda = False\n\n def save_to(self,path,mode=\"full\"):\n checkpoint = self.all_state_dict(mode=mode)\n torch.save(checkpoint,path)\n\n def all_state_dict(self,epoch=None,mode=\"full\"):\n checkpoint={}\n checkpoint['epoch'] = epoch\n checkpoint['D_state_dict'] = self.D.state_dict()\n checkpoint['G_state_dict'] = self.G.state_dict()\n if mode != \"light\":\n if hasattr(self,\"I2C\"):checkpoint['C_state_dict'] = self.I2C.state_dict()\n if hasattr(self,\"D_optimizer\"):checkpoint['D_optimizer'] = self.d_optimizer.state_dict()\n if hasattr(self,\"G_optimizer\"):checkpoint['G_optimizer'] = self.g_optimizer.state_dict()\n if hasattr(self,\"C_optimizer\"):checkpoint['C_optimizer'] = self.c_optimizer.state_dict()\n return checkpoint\n", "import numpy as np\nimport torch\nimport torch.nn.functional as F\n\ndef complex_mul(tensor_1: torch.Tensor,tensor_2: torch.Tensor,mode='cc')-> torch.Tensor:\n '''\n :param tensor_1(2) [...,2] for real part and image part\n '''\n if mode == 'cc':\n assert tensor_1.shape[-1]==2\n assert tensor_2.shape[-1]==2\n real1,imag1=tensor_1[...,0],tensor_1[...,1]\n real2,imag2=tensor_2[...,0],tensor_2[...,1]\n return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim = -1)\n elif mode=='cr':\n assert tensor_1.shape[-1]==2\n real1,imag1=tensor_1[...,0],tensor_1[...,1]\n real2 =tensor_2\n return torch.stack([real1 * real2, imag1 * real2], dim = -1)\n elif mode=='rc':\n assert tensor_2.shape[-1]==2\n real1,imag1=tensor_2[...,0],tensor_2[...,1]\n real2 =tensor_1\n return torch.stack([real1 * real2, imag1 * real2], dim = -1)\n else:\n raise NotImplementedError\n\ndef complex_mm(tensor_1: torch.Tensor,tensor_2: torch.Tensor,mode='cc')-> torch.Tensor:\n if mode == 'cc':\n assert tensor_1.shape[-1]==2\n assert tensor_2.shape[-1]==2\n real1,imag1=tensor_1[...,0],tensor_1[...,1]\n real2,imag2=tensor_2[...,0],tensor_2[...,1]\n return torch.stack([torch.matmul(real1, real2) - torch.matmul(imag1, imag2),\n torch.matmul(real1, imag2) + torch.matmul(imag1, real2)], dim = -1)\n elif mode=='cr':\n assert tensor_1.shape[-1]==2\n real1,imag1=tensor_1[...,0],tensor_1[...,1]\n real2 =tensor_2\n return torch.stack([real1.mm(real2), imag1.mm(real2)], dim = -1)\n elif mode=='rc':\n assert tensor_1.shape[-1]==2\n real1,imag1=tensor_2[...,0],tensor_2[...,1]\n real2 =tensor_1\n return torch.stack([real1.mm(real2), imag1.mm(real2)], dim = -1)\n else:\n raise NotImplementedError\n\ndef complex_mv(matrix: torch.Tensor,vector: torch.Tensor,mode='cc')-> torch.Tensor:\n if mode == 'cc':\n assert matrix.shape[-1]==2\n assert vector.shape[-1]==2\n real1,imag1=matrix[...,0],matrix[...,1]\n real2,imag2=vector[...,0],vector[...,1]\n return torch.stack([real1.mv(real2) - imag1.mv(imag2), real1.mv(imag2) + imag1.mv(real2)], dim = -1)\n elif mode=='cr':\n assert matrix.shape[-1]==2\n real1,imag1=matrix[...,0],matrix[...,1]\n real2 =vector\n return torch.stack([real1.mv(real2), imag1.mv(real2)], dim = -1)\n else:\n raise NotImplementedError\n\ndef complex_div(tensor_1: torch.Tensor,tensor_2: torch.Tensor)-> torch.Tensor:\n if mode == 'cc':\n assert tensor_1.shape[-1]==2\n assert tensor_2.shape[-1]==2\n a,b=tensor_1[...,0],tensor_1[...,1]\n c,d=tensor_2[...,0],tensor_2[...,1]\n Denominator = c**2+d**2\n return torch.stack([(a * c + b * d)/Denominator, (b*c-a*d)/Denominator], dim = -1)\n elif mode=='cr':\n assert tensor_1.shape[-1]==2\n a,b=tensor_1[...,0],tensor_1[...,1]\n c =tensor_2\n return torch.stack([a/c,b/c], dim = -1)\n else:\n raise NotImplementedError\n\ndef complex_conj(tensor_1: torch.Tensor)-> torch.Tensor:\n assert tensor_1.shape[-1]==2\n real1,imag1=tensor_1[...,0],tensor_1[...,1]\n imag1=-imag1\n return torch.stack([real1,imag1], dim = -1)\n\ndef complex_polar(tensor: torch.Tensor)-> torch.Tensor:\n assert tensor.shape[-1]==2\n real,imag=tensor[...,0],tensor[...,1]\n radius = torch.norm(tensor,dim=-1)\n angles = torch.atan(real/imag)\n return torch.stack([radius,angles],dim=-1)\n\ndef complex_exp(tensor: torch.Tensor,angle_unit=1)-> torch.Tensor:\n assert tensor.shape[-1]==2\n factor,angles=tensor[...,0],tensor[...,1]\n radius = torch.exp(factor)\n angles = angles*angle_unit\n direct = torch.stack([angles.cos(),angles.sin()],dim=-1)\n return complex_mul(direct,radius,'cr')\n\ndef complex_polar_ln(tensor: torch.Tensor):\n assert tensor.shape[-1]==2\n real,imag=tensor[...,0],tensor[...,1]\n radius = torch.norm(tensor,dim=-1).log()\n angles = torch.atan(real/imag)\n return radius,angles\n\ndef complex_tch2np(tch: torch.Tensor)->np.ndarray:\n assert tch.shape[-1]==2\n out=tch.detach().numpy()\n return out[...,0]+1j*out[...,1]\n\ndef complex_np2tch(npx:np.ndarray)-> torch.Tensor:\n real = torch.Tensor(np.real(npx))\n imag = torch.Tensor(np.imag(npx))\n return torch.stack([real,imag],dim=-1)\n\ndef complex_conv2d(inputs,filters,bias=None,**kargs):\n assert len(inputs.shape)==5\n assert len(filters.shape)==5\n assert inputs.shape[-1]==2\n assert filters.shape[-1]==2\n\n convfun = lambda x,w,b:F.conv2d(x,w,b,**kargs)\n x_r,x_i=inputs[...,0],inputs[...,1]\n w_r,w_i=filters[...,0],filters[...,1]\n b_r=b_i=None\n if bias is not None:\n assert bias.shape[-1]==2\n b_r,b_i = bias[...,0],bias[...,1]\n\n o_r = convfun(x_r,w_r,b_r) - convfun(x_i,w_i,None)\n o_i = convfun(x_r,w_i,b_i) + convfun(x_i,w_r,None)\n\n ### another implement\n ## but with very slow performance\n # o_r = F.conv3d(_inputs*torch.Tensor([1,-1]),_filter,stride=(stride,stride,1),padding=(padding,padding,0))\n # o_i = F.conv3d(_inputs,_filter.flip(-1),stride=(stride,stride,1),padding=(padding,padding,0))\n return torch.stack([o_r, o_i], dim = -1)\n\ndef complex_conv1d(inputs,filters,bias=None,**kargs):\n assert len(inputs.shape)==4\n assert len(filters.shape)==4\n assert inputs.shape[-1]==2\n assert filters.shape[-1]==2\n\n convfun = lambda x,w,b:F.conv1d(x,w,b,**kargs)\n x_r,x_i=inputs[...,0],inputs[...,1]\n w_r,w_i=filters[...,0],filters[...,1]\n b_r=b_i=None\n if bias is not None:\n assert bias.shape[-1]==2\n b_r,b_i = bias[...,0],bias[...,1]\n\n o_r = convfun(x_r,w_r,b_r) - convfun(x_i,w_i,None)\n o_i = convfun(x_r,w_i,b_i) + convfun(x_i,w_r,None)\n\n return torch.stack([o_r, o_i], dim = -1)\n\n# def complex_tanh(tensor:torch.Tensor)-> torch.Tensor:\n# #tensor = F.softplus(tensor) # avoid inf\n# x,y = tensor.split(1,dim=-1)\n# x = 2*x\n# y = 2*y\n# real = x.tanh()/(y.cos()/x.cosh() +1)\n# imag = y.sin()/(y.cos() + x.cosh() + 1e-8)\n# #real = x.sinh()/n\n# #imag = y.sin()/n\n# return torch.cat([real, imag], dim = -1)\nclass ComplexTanh(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. ctx is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n ctx.save_for_backward(input)\n x,y = input.split(1,dim=-1)\n x = 2*x\n y = 2*y\n real = x.tanh()/(y.cos()/x.cosh() +1)\n imag = y.sin()/(y.cos() + x.cosh() + 1e-8)\n return torch.cat([real, imag], dim = -1)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n f(x,y) = tanh(z) = u(x,y)+1j*v(x,y)\n grad_matrix =| \\partial u |\\partial u |\n | ---------- |---------- |\n | \\partial x |\\partial y |\n | --- | --- |\n | \\partial v |\\partial v |\n | ---------- |---------- |\n | \\partial x |\\partial y |\n\n \"\"\"\n\n input, = ctx.saved_tensors\n x,y = input.split(1,dim=-1)\n x = 2*x\n y = 2*y\n ys = y.sin()\n yc = y.cos()\n xch= x.cosh()\n xth= x.tanh()\n n = (1+yc/xch)**2\n ux = 2 +2*yc/xch-2*xth**2\n uy = 2*(ys/xch)*xth\n ux = ux/n\n uy = uy/n\n vx = -uy\n vy = ux\n\n u,v= grad_output.split(1,dim=-1)\n\n real = u*ux+v*vx\n imag =-u*uy-v*vy # miners is required by complex number.\n return torch.cat([real,imag],-1)\ncomplex_tanh = ComplexTanh.apply\n\ndef complex_sigmoid(tensor:torch.Tensor)-> torch.Tensor:\n x,y = tensor.split(1,dim=-1)\n x = torch.exp(-x)\n a = 1+x*y.cos()\n b = x*y.sin()\n n = a**2+b**2+ 1e-8\n return torch.cat([a/n, b/n], dim = -1)\n\n\ndef complexize(tensor: torch.Tensor)-> torch.Tensor:\n '''\n real to complex\n '''\n if tensor.shape[-1] == 2:return tensor\n imag = torch.zeros_like(tensor)\n return torch.stack([tensor,imag],-1)\n", "import torch\nimport pytest\nimport numpy as np\n\nfrom numpy.testing import assert_allclose\n\n\[email protected]\ndef random_state():\n return np.random.RandomState(None) # (1249563438)\n\n\ndef test_window_view(random_state):\n from cplxmodule.utils import window_view\n\n np_x = random_state.randn(2, 3, 1024, 2, 2)\n tr_x = torch.tensor(np_x)\n\n dim, size, stride = -3, 5, 2\n dim = (tr_x.dim() + dim) if dim < 0 else dim\n\n tr_x_view = window_view(tr_x, dim, size, stride)\n for i in range(tr_x_view.shape[dim]):\n slice_ = np.r_[i * stride:i * stride + size]\n a = tr_x_view.index_select(dim, torch.tensor(i)).squeeze(dim)\n b = tr_x.index_select(dim, torch.tensor(slice_))\n assert_allclose(a, b)\n\n assert_allclose(window_view(tr_x, dim, size, stride, at=-1),\n tr_x.unfold(dim, size, stride))\n\n\ndef test_complex_view(random_state):\n from cplxmodule.utils import complex_view\n from cplxmodule import Cplx\n\n # test if complex view is properly constructed\n shape = 3, 4, 5, 7, 3\n a = random_state.randn(*shape) + 1j * random_state.randn(*shape)\n real, imag = map(torch.from_numpy, (a.real, a.imag))\n for dim in range(a.ndim + 1):\n tr_x = torch.stack([real, imag], dim=dim)\n z = Cplx(*complex_view(tr_x, dim, squeeze=True))\n\n assert_allclose(z.numpy(), a)\n\n for shape in [3, 4, 5, 6, 7, 8]:\n tr_x = torch.tensor(random_state.randn(16, 10, shape))\n\n if shape % 2:\n with pytest.warns(RuntimeWarning, match=\"Odd dimension\"):\n real, imag = complex_view(tr_x, -1, squeeze=False)\n\n # odd\n assert_allclose(real, tr_x[..., 0:-1:2].clone())\n assert_allclose(imag, tr_x[..., 1:-1:2].clone())\n\n else:\n real, imag = complex_view(tr_x, -1, squeeze=False)\n\n # slice\n assert_allclose(real, tr_x[..., 0::2].clone())\n assert_allclose(imag, tr_x[..., 1::2].clone())\n\n # reshape\n input = tr_x.reshape(*tr_x.shape[:-1], -1, 2)\n assert_allclose(real, input[..., 0])\n assert_allclose(imag, input[..., 1])\n # end if\n # end for\n" ]
[ [ "torch.nn.ConvTranspose2d", "torch.Tensor", "torch.nn.Conv2d", "torch.nn.Tanh", "torch.nn.Sigmoid", "torch.nn.BCELoss", "torch.nn.BCEWithLogitsLoss", "torch.save", "torch.nn.LeakyReLU", "torch.FloatTensor", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.autograd.Variable" ], [ "torch.norm", "numpy.imag", "torch.cat", "torch.nn.functional.conv2d", "torch.nn.functional.conv1d", "torch.zeros_like", "torch.exp", "torch.matmul", "numpy.real", "torch.stack", "torch.atan" ], [ "torch.stack", "numpy.random.RandomState", "numpy.testing.assert_allclose", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
augeas/NeverMindTheMolluscs
[ "829185ad1d9239368d5b6f3572fc07b7825deb49" ]
[ "oliva.py" ]
[ "import numpy as np\n\n\nclass Oliva(object):\n def __init__(self,width=640, skip=12, act_diff=0.015, act_decay=0.1,\n act_prod=0.1, sat=0.25, in_diff=0.0, in_decay=0.014, in_mm=0.1,\n h_decay=0.1, hormone=0.5):\n \n self.width = width\n self.cells = np.zeros((2,2,self.width))\n \n self.skip = skip\n \n self.act_diff = act_diff\n self.act_decay = act_decay\n self.act_prod = act_prod\n self.sat = sat\n \n self.in_diff = in_diff\n self.in_decay = in_decay\n self.in_mm = in_mm\n self.h_decay = h_decay\n self.h_fac = 1-self.h_decay\n self.hormone = hormone\n \n self.tick = False\n \n self.cells[0,1,:] = 0.1\n \n self.fluct = self.act_decay * (0.96 +\n 0.08 *np.random.random(self.width))\n \n seeds = np.random.choice(np.arange(self.width),30,replace=False)\n self.cells[0,0,seeds] = 1.0\n \n self.act_diff_const = 1.0 - self.act_decay -2*self.act_diff\n self.in_diff_const = 1.0 - self.in_decay -2*self.in_diff\n\n\n def step(self):\n if self.tick:\n old = 1\n new = 0\n else:\n old = 0\n new = 1\n \n l_bound = np.copy(self.cells[old,:,0])\n r_bound = np.copy(self.cells[old,:,-1])\n \n act_sq = np.square(self.cells[old,0,:])\n auto_cat = self.fluct * act_sq / (1 + self.sat * act_sq)\n \n left_cells = np.roll(self.cells[old,:,:],-1,axis=1)\n right_cells = np.roll(self.cells[old,:,:],1,axis=1)\n\n left_cells[:,0] = l_bound\n right_cells[:,-1] = r_bound\n \n self.cells[new,0,:] = self.cells[old,0,:] * self.act_diff_const + self.act_diff * (left_cells[0,:] + right_cells[0,:]) + auto_cat / (self.in_mm + self.cells[old,1,:])\n \n self.cells[new,1,:] = self.cells[old,1,:] * self.in_diff_const + self.in_diff * (left_cells[1,:] + right_cells[1,:]) + auto_cat\n \n hormone_prod = (self.cells[old,0,:] * self.h_decay).sum()\n \n self.hormone = self.hormone * self.h_fac + hormone_prod / self.width\n \n self.in_diff_const = 1.0 - 2 * self.in_diff - self.in_decay / self.hormone\n \n self.tick = not self.tick\n \n \n def __iter__(self):\n return self\n \n \n def __next__(self):\n self.step()\n if self.tick:\n out = np.copy(self.cells[0,:,:])\n else:\n out = np.copy(self.cells[1,:,:])\n for i in range(self.skip):\n self.step()\n return out\n \n " ]
[ [ "numpy.square", "numpy.random.random", "numpy.arange", "numpy.copy", "numpy.zeros", "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MElody9120/UESTC-FinalRepo
[ "f7271e76090d92866a4c9346da19e4b7464f5f0b" ]
[ "BikeShare/bikeShareData/parseCSV.py" ]
[ "# Author Melody\n# Data 2021-06-03 16:27:40\n\nimport pandas as pd\nimport geopy.distance\n\n\n# Pytest is an automated testing module on Python,Use Pytest to test the legitimacy on Bikeshare Data\n# import pytest as pt\n\n# Coords is a data structures to save How Bikeshare Date,Coord just like a List\n\n\ndef getStartEndCoords():\n # load CSV File by using pandas API\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", index_col='Ending Station ID', low_memory=False)\n # Created a list, here in the interest of saving time,as the csv will not change\n # Numbers represents the Bikeshare stations ID\n allStations = [3005, 3006, 3007, 3008, 3009, 3010, 3011, 3014, 3016, 3018, 3019, 3020, 3021, 3022, 3023, 3024,\n 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3042,\n 3045, 3046, 3047, 3048, 3049, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3062, 3063,\n 3064, 3065, 3066, 3067, 3068, 3069, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 4108]\n # geoCodeEnd meaning geography ID just like 3005 -> 7th & Flower\n geoCodeEnd = []\n for station in allStations:\n temp = []\n # freq -> frequency,frequency must be an INT Type\n freq = df.at[float(station), \"Ending Station Latitude\"].size\n assert type(freq) == int\n lat = (df.at[float(station), \"Ending Station Latitude\"]).values[0]\n lng = (df.at[float(station), \"Ending Station Longitude\"]).values[0]\n # Determining data legitimacy, NaN -> Not a Number\n if str(lat) != 'NaN' and str(lng) != 'NaN':\n temp.append([lat, lng])\n temp.append(freq)\n geoCodeEnd.append(temp)\n dfS = pd.read_csv(filename, sep=\",\", index_col='Starting Station ID', low_memory=False)\n geoCodeStart = []\n for station in allStations:\n tempS = []\n freqS = dfS.at[float(station), \"Starting Station Latitude\"].size\n assert type(freqS) == int\n lat = (dfS.at[float(station), \"Starting Station Latitude\"]).values[0]\n lng = (dfS.at[float(station), \"Starting Station Longitude\"]).values[0]\n if str(lat) != 'NaN' and str(lng) != 'NaN':\n tempS.append([lat, lng])\n tempS.append(freqS)\n geoCodeStart.append(tempS)\n return geoCodeEnd, allStations, geoCodeStart\n\n\n# This will calculate the usage of the users\ndef getRegularRiders():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n a = df.loc[df['Passholder Type'] != \"Walk-up\"]\n return len(a.index) / 182 # 182 is the number of days in the given period minus weekends and holidays\n\n\n# This will count the Average distance of the users\ndef avgDistance():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n df = df[[\"Duration\", \"Starting Station Latitude\", \"Starting Station Longitude\", \"Ending Station Latitude\",\n \"Ending Station Longitude\"]]\n # Distance calculation modified method from StackOverflow\n sum = 0\n count = 0\n time = 0\n for index, row in df.iterrows():\n lat1 = row[\"Starting Station Latitude\"]\n lat2 = row[\"Ending Station Latitude\"]\n lon1 = row[\"Starting Station Longitude\"]\n lon2 = row[\"Ending Station Longitude\"]\n if str(lat1) != 'nan' and str(lat2) != 'nan' and str(lon1) != 'nan' and str(lon2) != 'nan':\n coords_1 = (lat1, lon1)\n coords_2 = (lat2, lon2)\n dist = geopy.distance.geodesic(coords_1, coords_2).miles\n if dist > 0:\n time = time + row[\"Duration\"]\n sum = sum + dist\n count = count + 1\n return (sum / count), (time / count)\n\n\n# This will calculate User`s passes type\ndef pieChartPassHolder():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n df = df[[\"Passholder Type\"]]\n w = len((df.loc[df['Passholder Type'] == \"Walk-up\"]).index)\n f = len((df.loc[df['Passholder Type'] == \"Flex Pass\"]).index)\n m = len((df.loc[df['Passholder Type'] == \"Monthly Pass\"]).index)\n s = len((df.loc[df['Passholder Type'] == \"Staff Annual\"]).index)\n return [w, f, m, s]\n\n\n# This will calculate whether the user is a one-way or round trip\ndef pieChartTripRoute():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n df = df[[\"Trip Route Category\"]]\n o = len((df.loc[df[\"Trip Route Category\"] == \"One Way\"]).index)\n r = len((df.loc[df[\"Trip Route Category\"] == \"Round Trip\"]).index)\n return [o, r]\n\n\n# this will show a line to represent the Relationship between temperature and number of bicycles lent\ndef lineByMonth():\n totals = {7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", converters={'Start Time': pd.to_datetime}, low_memory=False)\n for index, row in df.iterrows():\n totals[((row[\"Start Time\"]).month)] += 1\n return totals\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
CFM-MSG/Code_LEORN
[ "fabea1e1ded973a4db692e51e2df442bde55f626" ]
[ "lib/models/frame_modules/frame_pool.py" ]
[ "import torch\nfrom torch import nn\n\n\nclass FrameAvgPool(nn.Module):\n\n def __init__(self, cfg):\n super(FrameAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n vis_h = torch.relu(self.vis_conv(visual_input))\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n return vis_h # batchsize * 512 * 16\n\n\nclass MultiFeatureAvgPool_C(nn.Module):\n def __init__(self, cfg):\n super(MultiFeatureAvgPool_C, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n # self.global_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_conv = nn.Conv1d(hidden_size + input_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n # self.norm1 = nn.BatchNorm1d(hidden_size)\n self.norm = nn.BatchNorm1d(hidden_size)\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n assert isinstance(visual_input, list)\n rcnn_feature = visual_input[0].transpose(1, 2)\n global_feature = visual_input[1].transpose(1, 2)\n # global_feature = self.global_conv(global_feature)\n # global_feature = self.norm1(global_feature)\n # global_feature = torch.relu(global_feature)\n\n vis_h = torch.cat([rcnn_feature, global_feature], dim=1)\n vis_h = self.vis_conv(vis_h)\n vis_h = torch.relu(vis_h)\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n vis_h = self.norm(vis_h)\n\n return vis_h # batchsize * 512 * 16\n\n\nclass MultiFeatureAvgPool(nn.Module):\n def __init__(self, cfg):\n super(MultiFeatureAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n self.global_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_conv = nn.Conv1d(hidden_size + hidden_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n # self.norm1 = nn.BatchNorm1d(hidden_size)\n # self.norm2 = nn.BatchNorm1d(hidden_size)\n self.norm = nn.BatchNorm1d(hidden_size)\n # self.__init_fuse_conv__(hidden_size)\n\n def __init_fuse_conv__(self, hidden_size):\n weight1 = torch.eye(hidden_size, hidden_size)\n weight2 = torch.zeros(hidden_size, hidden_size)\n weight = torch.cat([weight1, weight2], dim=1).unsqueeze(2)\n weight = nn.Parameter(weight)\n bias = nn.Parameter(torch.zeros(hidden_size))\n self.vis_conv.weight = weight\n self.vis_conv.bias = bias\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n assert isinstance(visual_input, list)\n rcnn_feature = visual_input[0].transpose(1, 2)\n global_feature = visual_input[1].transpose(1, 2)\n global_feature = self.global_conv(global_feature)\n global_feature = torch.relu(global_feature)\n\n vis_h = torch.cat([rcnn_feature, global_feature], dim=1)\n vis_h = self.vis_conv(vis_h)\n vis_h = self.norm(vis_h)\n vis_h = torch.relu(vis_h)\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n return vis_h # batchsize * 512 * 16\n\nclass MultiFeaturePoolAvg(nn.Module):\n def __init__(self, cfg):\n super(MultiFeatureAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n self.global_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_conv = nn.Conv1d(hidden_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n self.fuse_conv = nn.Conv1d(hidden_size + hidden_size, hidden_size, 1, 1)\n self.__init_fuse_conv__(hidden_size)\n\n def __init_fuse_conv__(self, hidden_size):\n weight1 = torch.eye(hidden_size, hidden_size)\n weight2 = torch.zeros(hidden_size, hidden_size)\n weight = torch.cat([weight1, weight2], dim=1).unsqueeze(2)\n weight = nn.Parameter(weight)\n bias = nn.Parameter(torch.zeros(hidden_size))\n self.fuse_conv.weight = weight\n self.fuse_conv.bias = bias\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n assert isinstance(visual_input, list)\n rcnn_feature = visual_input[0].transpose(1, 2)\n global_feature = visual_input[1].transpose(1, 2)\n\n global_feature = self.global_conv(global_feature)\n global_feature = torch.relu(global_feature)\n global_feature = self.avg_pool(global_feature)\n\n vis_h = self.vis_conv(rcnn_feature)\n vis_h = torch.relu(vis_h)\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n vis_h = torch.cat([vis_h, global_feature], dim=1)\n vis_h = torch.relu(self.fuse_conv(vis_h))\n return vis_h # batchsize * 512 * 16\n\nclass FrameMaxPool(nn.Module):\n\n def __init__(self, input_size, hidden_size, stride):\n super(FrameMaxPool, self).__init__()\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.max_pool = nn.MaxPool1d(stride)\n\n def forward(self, visual_input):\n vis_h = torch.relu(self.vis_conv(visual_input))\n vis_h = self.max_pool(vis_h)\n return vis_h\n\n\nclass SequentialFrameAttentionPool(nn.Module):\n\n def __init__(self, cfg):\n super(SequentialFrameAttentionPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n self.hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n self.stride = cfg.STRIDE # 16\n self.sqn = cfg.SQN_NUM\n # self.sqn = 2\n att_hidden_size = 256\n\n self.vis_conv = nn.Conv1d(input_size, self.hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, self.stride)\n\n self.global_emb_fn = nn.ModuleList([nn.Linear(self.hidden_size, self.hidden_size) for i in range(self.sqn)])\n self.guide_emb_fn = nn.Sequential(*[\n nn.Linear(2 * self.hidden_size, self.hidden_size),\n nn.ReLU()\n ])\n\n self.att_fn1 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn2 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn3 = nn.Linear(att_hidden_size, 1)\n self.softmax = nn.Softmax(dim=1)\n # self.drop = nn.Dropout()\n\n self.vis_out_conv = nn.Conv1d(self.hidden_size * self.sqn, self.hidden_size, 1, 1)\n\n def forward(self, visual_input):\n B, _, v_len = visual_input.shape\n vis_h = torch.relu(self.vis_conv(visual_input))\n\n avg_vis = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n seg_list = []\n att_seg_list = []\n for i in range(v_len // self.stride):\n vis_seg = vis_h[:, :, self.stride * i: self.stride * (i + 1)].transpose(1, 2) # batchsize * 16 * 512\n avg_seg = avg_vis[:, :, i]\n prev_se = avg_seg.new_zeros(B, self.hidden_size)\n\n sqn_list = []\n att_list = []\n for m in range(self.sqn):\n v_n = self.global_emb_fn[m](avg_seg)\n g_n = torch.relu(self.guide_emb_fn(torch.cat([v_n, prev_se], dim=1))) # batchsize * 512\n\n att = torch.tanh(self.att_fn1(g_n).unsqueeze(1).expand(-1, self.stride, -1) + self.att_fn2(vis_seg))\n att = self.att_fn3(att)\n\n att = self.softmax(att) # batchsize * 16 * 1\n # TODO 使用sigmoid还是softmax\n # att = torch.sigmoid(att) * 2 - 1\n\n prev_se = torch.sum(vis_seg * att, dim=1) # batchsize * 512\n sqn_list.append(prev_se)\n att_list.append(att)\n\n vis_new = torch.cat(sqn_list, dim=1)\n seg_list.append(vis_new)\n att_seg_list.append(torch.cat(att_list, dim=2)) # batchsize * 16 * sqn\n\n vis_out = torch.relu(self.vis_out_conv(torch.stack(seg_list, dim=2)))\n att_out = torch.stack(att_seg_list, dim=1) # batchsize * 16 * 16 * sqn\n\n return vis_out, att_out\n\n\nclass SequentialFrameWordAttentionPool(nn.Module):\n\n def __init__(self, cfg):\n super(SequentialFrameWordAttentionPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n self.hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n self.stride = cfg.STRIDE # 16\n # self.sqn = cfg.SQN_NUM\n self.sqn = 3\n att_hidden_size = 256\n\n self.vis_conv = nn.Conv1d(input_size, self.hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, self.stride)\n\n self.global_emb_fn = nn.ModuleList([nn.Linear(self.hidden_size, self.hidden_size) for i in range(self.sqn)])\n self.guide_emb_fn = nn.Sequential(*[\n nn.Linear(2 * self.hidden_size, self.hidden_size),\n nn.ReLU()\n ])\n\n self.att_fn1 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn2 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn3 = nn.Linear(att_hidden_size, 1)\n self.softmax = nn.Softmax(dim=1)\n # self.drop = nn.Dropout()\n\n self.vis_out_conv = nn.Conv1d(self.hidden_size, self.hidden_size, 1, 1)\n\n self.text_linear = nn.Linear(self.hidden_size, self.hidden_size)\n\n def forward(self, visual_input, text_feature):\n B, _, v_len = visual_input.shape\n vis_h = torch.relu(self.vis_conv(visual_input))\n\n avg_vis = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n text_att = self.text_linear(text_feature) # batchsize * 512\n\n seg_list = []\n att_seg_list = []\n for i in range(v_len // self.stride):\n vis_seg = vis_h[:, :, self.stride * i: self.stride * (i + 1)].transpose(1, 2) # batchsize * 16 * 512\n avg_seg = avg_vis[:, :, i].squeeze()\n prev_se = avg_seg.new_zeros(B, self.hidden_size)\n\n sqn_list = []\n att_list = []\n for m in range(self.sqn):\n v_n = self.global_emb_fn[m](avg_seg)\n g_n = torch.relu(self.guide_emb_fn(torch.cat([v_n, prev_se], dim=1))) # batchsize * 512\n\n att = torch.tanh(self.att_fn1(g_n).unsqueeze(1).expand(-1, 16, -1) + self.att_fn2(vis_seg))\n att = self.att_fn3(att)\n att = self.softmax(att) # batchsize * 16 * 1\n\n prev_se = torch.sum(vis_seg * att, dim=1) # batchsize * 512\n sqn_list.append(prev_se)\n att_list.append(att)\n\n vis_for_att = torch.stack(sqn_list, dim=1) # batch * sqn * hidden_size\n fuse_att = torch.softmax(torch.matmul(vis_for_att, text_att.unsqueeze(2)), dim=1) # batch * sqn * 1\n\n vis_new = torch.sum(vis_for_att * fuse_att, dim=1)\n seg_list.append(vis_new)\n att_seg_list.append(torch.cat(att_list, dim=2)) # batchsize * 16 * sqn\n # TODO 使用加权后的attention还是原始的attention\n\n vis_out = torch.relu(self.vis_out_conv(torch.stack(seg_list, dim=2)))\n att_out = torch.stack(att_seg_list, dim=1) # batchsize * 16 * 16 * sqn\n\n return vis_out, att_out\n\n\nclass WordAttentionPool(nn.Module):\n\n def __init__(self, cfg):\n super(WordAttentionPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n self.stride = cfg.STRIDE # 16\n\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.text_linear = nn.Linear(hidden_size, hidden_size)\n\n def forward(self, visual_input, text_feature):\n _, _, v_len = visual_input.shape # batchsize * 4096 * 256\n\n vis_att = torch.relu(self.vis_conv(visual_input)) # batchsize * 512 * 256\n text_att = torch.relu(self.text_linear(text_feature)) # batch * 512\n\n att = torch.matmul(text_att.unsqueeze(1), vis_att).transpose(1, 2) # batchsize * 256 * 1\n\n seg_list = []\n for i in range(v_len // self.stride):\n vis_seg = visual_input[:, :, self.stride * i: self.stride * (i + 1)].transpose(1,\n 2) # batchsize * 16 * 4096\n att_seg = torch.softmax(att[:, self.stride * i: self.stride * (i + 1), :], dim=1) # batchsize * 16 * 1\n vis_new = torch.sum(vis_seg * att_seg, dim=1) # batchsize * 4096\n seg_list.append(vis_new)\n\n vis_out = torch.relu(self.vis_conv(torch.stack(seg_list, dim=2))) # batchsize * 512 * 16\n\n return vis_out\n\n\nclass MovementFlowAvgPool(nn.Module):\n def __init__(self, cfg):\n super(MovementFlowAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE # 16\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_flow_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n\n self.fusion_conv = nn.Conv1d(hidden_size * 2, hidden_size, 1, 1)\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n B, H, l = visual_input.size()\n vis_flow = torch.zeros(B, H, l).type_as(visual_input)\n for i in range(l - 1):\n vis_flow[:, :, i] = visual_input[:, :, i + 1] - visual_input[:, :, i]\n vis_flow[:, :, l - 1] = vis_flow[:, :, l - 2]\n vis_h = torch.relu(self.vis_conv(visual_input))\n vis_flow_h = torch.relu(self.vis_conv(vis_flow))\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n vis_flow_h = self.avg_pool(vis_flow_h)\n\n vis_h = torch.relu(self.fusion_conv(torch.cat([vis_h, vis_flow_h], dim=1)))\n\n return vis_h # batchsize * 512 * 16\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Softmax", "torch.nn.Parameter", "torch.softmax", "torch.cat", "torch.zeros", "torch.eye", "torch.sum", "torch.nn.MaxPool1d", "torch.relu", "torch.nn.Linear", "torch.nn.Conv1d", "torch.stack", "torch.nn.ReLU", "torch.nn.AvgPool1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
crtrentz/MONAI
[ "355db48e46047a18e3bb9dbd83f424a8ad0a2622" ]
[ "tests/test_png_rw.py" ]
[ "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\nfrom skimage import io\n\nfrom monai.data import write_png\n\n\nclass TestPngWrite(unittest.TestCase):\n def test_write_gray(self):\n out_dir = tempfile.mkdtemp()\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 3, 1)\n img_save_val = 255 * img\n # saving with io.imsave (h, w, 1) will only give us (h,w) while reading it back.\n img_save_val = img_save_val[:, :, 0].astype(np.uint8)\n write_png(img, image_name, scale=True)\n out = io.imread(image_name)\n np.testing.assert_allclose(out, img_save_val)\n shutil.rmtree(out_dir)\n\n def test_write_rgb(self):\n out_dir = tempfile.mkdtemp()\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 3, 3)\n img_save_val = (255 * img).astype(np.uint8)\n write_png(img, image_name, scale=True)\n out = io.imread(image_name)\n np.testing.assert_allclose(out, img_save_val)\n shutil.rmtree(out_dir)\n\n def test_write_output_shape(self):\n out_dir = tempfile.mkdtemp()\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 2, 3)\n write_png(img, image_name, (4, 4), scale=True)\n out = io.imread(image_name)\n np.testing.assert_allclose(out.shape, (4, 4, 3))\n shutil.rmtree(out_dir)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.rand", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ckst123/KoBERT-events
[ "68eb22845b179bcaf13771fea776be3d9772306f" ]
[ "run.py" ]
[ "from data_loader import load_data, tokenizer\nfrom models import BertForMultipleSequenceClassification\n\nfrom transformers import AutoConfig\nimport torch\nfrom tqdm.auto import tqdm\nfrom transformers import get_scheduler\nfrom transformers import AdamW\nfrom sklearn.metrics import accuracy_score, f1_score\n\nlabel_list = ['확진자수','완치자수','사망여부','집단감염','백신관련','방역지침','경제지원','마스크','국제기구','병원관련']\n\ndef train(model, optimizer, lr_scheduler, train_dataloader, num_epochs, num_training_steps, device):\n \n progress_bar = tqdm(range(num_training_steps))\n\n model.train()\n for epoch in range(num_epochs):\n for batch in train_dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n outputs = model(**batch)\n loss = outputs.loss\n loss.backward()\n \n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n\n\ndef eval(model, eval_dataloader, metric, device):\n model.eval()\n preds = []\n targets = []\n probs = []\n for batch in eval_dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n with torch.no_grad():\n outputs = model(**batch)\n \n logits = outputs.logits\n predictions = torch.stack([torch.argmax(logit, dim=-1) for logit in logits], dim=1)\n preds.append(predictions)\n targets.append(batch[\"labels\"])\n\n\n preds = torch.cat(preds, dim=0).cpu().numpy()\n targets = torch.cat(targets, dim=0).cpu().numpy()\n N, M = preds.shape\n for i in range(M):\n print(\"%s results\" % label_list[i])\n acc = accuracy_score(targets[:,i], preds[:,i])\n f1 = f1_score(targets[:,i], preds[:,i], average='binary')\n\n print('accuracy', acc * 100)\n print('f1 score', f1 * 100)\n\n \n \n\n\ndef main():\n checkpoint = \"klue/bert-base\"\n train_dataloader, eval_dataloader = load_data()\n config = AutoConfig.from_pretrained(checkpoint)\n config.num_classes=[2] * 10\n model = BertForMultipleSequenceClassification.from_pretrained(checkpoint, config=config)\n \n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n model.to(device)\n\n\n optimizer = AdamW(model.parameters(), lr=5e-5)\n num_epochs = 3\n num_training_steps = num_epochs * len(train_dataloader)\n lr_scheduler = get_scheduler(\n \"linear\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=num_training_steps\n )\n\n train(model, optimizer, lr_scheduler, train_dataloader, num_epochs, num_training_steps, device)\n print()\n\n eval(model, eval_dataloader, 'metric', device)\n \n\nif __name__ == '__main__':\n main()" ]
[ [ "torch.cat", "torch.argmax", "torch.no_grad", "torch.cuda.is_available", "torch.device", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liytt85/gail-tf-pro
[ "ad92f41c26c34e8fabc536664fb11b44f25956cf" ]
[ "gailtf/baselines/ppo1/pposgd_simple.py" ]
[ "from gailtf.baselines.common import Dataset, explained_variance, fmt_row, zipsame\nfrom gailtf.baselines import logger\nimport gailtf.baselines.common.tf_util as U\nimport tensorflow as tf, numpy as np\nimport time, os, sys\nfrom gailtf.baselines.common.mpi_adam import MpiAdam\nfrom gailtf.baselines.common.mpi_moments import mpi_moments\nfrom mpi4py import MPI\nfrom collections import deque\nimport pickle as pkl\n\n# Sample one trajectory (until trajectory end)\ndef traj_episode_generator(pi, env, horizon, stochastic):\n t = 0\n ac = env.action_space.sample() # not used, just so we have the datatype\n new = True # marks if we're on first timestep of an episode\n\n ob = env.reset()\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n\n # Initialize history arrays\n obs = []; rews = []; news = []; acs = []\n\n while True:\n prevac = ac\n ac, vpred = pi.act(stochastic, ob)\n obs.append(ob)\n news.append(new)\n acs.append(ac)\n ob, rew, new, _ = env.step(ac)\n rews.append(rew)\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if t > 0 and (new or t % horizon == 0):\n # convert list into numpy array\n obs = np.array(obs)\n rews = np.array(rews)\n news = np.array(news)\n acs = np.array(acs)\n yield {\"ob\":obs, \"rew\":rews, \"new\":news, \"ac\":acs,\n \"ep_ret\":cur_ep_ret, \"ep_len\":cur_ep_len}\n ob = env.reset()\n cur_ep_ret = 0; cur_ep_len = 0; t = 0\n\n # Initialize history arrays\n obs = []; rews = []; news = []; acs = []\n t += 1\n\ndef traj_segment_generator(pi, env, horizon, stochastic):\n t = 0\n ac = env.action_space.sample() # not used, just so we have the datatype\n new = True # marks if we're on first timestep of an episode\n ob = env.reset()\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n obs = np.array([ob for _ in range(horizon)])\n rews = np.zeros(horizon, 'float32')\n vpreds = np.zeros(horizon, 'float32')\n news = np.zeros(horizon, 'int32')\n acs = np.array([ac for _ in range(horizon)])\n prevacs = acs.copy()\n\n while True:\n prevac = ac\n ac, vpred = pi.act(stochastic, ob)\n # Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct\n # terminal value\n if t > 0 and t % horizon == 0:\n yield {\"ob\" : obs, \"rew\" : rews, \"vpred\" : vpreds, \"new\" : news,\n \"ac\" : acs, \"prevac\" : prevacs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\" : ep_rets, \"ep_lens\" : ep_lens}\n # Be careful!!! if you change the downstream algorithm to aggregate\n # several of these batches, then be sure to do a deepcopy\n ep_rets = []\n ep_lens = []\n i = t % horizon\n obs[i] = ob\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = env.step(ac)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = env.reset()\n t += 1\n\ndef add_vtarg_and_adv(seg, gamma, lam):\n \"\"\"\n Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)\n \"\"\"\n new = np.append(seg[\"new\"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1\n vpred = np.append(seg[\"vpred\"], seg[\"nextvpred\"])\n T = len(seg[\"rew\"])\n seg[\"adv\"] = gaelam = np.empty(T, 'float32')\n rew = seg[\"rew\"]\n lastgaelam = 0\n for t in reversed(range(T)):\n nonterminal = 1-new[t+1]\n delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n seg[\"tdlamret\"] = seg[\"adv\"] + seg[\"vpred\"]\n\ndef learn(env, policy_func, *,\n timesteps_per_batch, # timesteps per actor per update\n clip_param, entcoeff, # clipping parameter epsilon, entropy coeff\n optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers\n gamma, lam, # advantage estimation\n max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint\n callback=None, # you can do anything in the callback, since it takes locals(), globals()\n adam_epsilon=1e-5,\n schedule='constant', # annealing for stepsize parameters (epsilon and adam)\n save_per_iter=100,\n ckpt_dir=None, task=\"train\",\n sample_stochastic=True,\n load_model_path=None, task_name=None, max_sample_traj=1500\n ):\n # Setup losses and stuff\n # ----------------------------------------\n ob_space = env.observation_space\n ac_space = env.action_space\n pi = policy_func(\"pi\", ob_space, ac_space) # Construct network for new policy\n oldpi = policy_func(\"oldpi\", ob_space, ac_space) # Network for old policy\n atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)\n ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return\n\n lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule\n clip_param = clip_param * lrmult # Annealed cliping parameter epislon\n\n ob = U.get_placeholder_cached(name=\"ob\")\n ac = pi.pdtype.sample_placeholder([None])\n\n kloldnew = oldpi.pd.kl(pi.pd)\n ent = pi.pd.entropy()\n meankl = U.mean(kloldnew)\n meanent = U.mean(ent)\n pol_entpen = (-entcoeff) * meanent\n\n ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold\n surr1 = ratio * atarg # surrogate from conservative policy iteration\n surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #\n pol_surr = - U.mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)\n vf_loss = U.mean(tf.square(pi.vpred - ret))\n total_loss = pol_surr + pol_entpen + vf_loss\n losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]\n loss_names = [\"pol_surr\", \"pol_entpen\", \"vf_loss\", \"kl\", \"ent\"]\n\n var_list = pi.get_trainable_variables()\n lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])\n adam = MpiAdam(var_list, epsilon=adam_epsilon)\n\n assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)\n for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])\n compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)\n\n U.initialize()\n adam.sync()\n\n # Prepare for rollouts\n # ----------------------------------------\n seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)\n traj_gen = traj_episode_generator(pi, env, timesteps_per_batch, stochastic=sample_stochastic)\n\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n tstart = time.time()\n lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths\n rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards\n\n assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, \"Only one time constraint permitted\"\n\n if task == 'sample_trajectory':\n # not elegant, i know :(\n sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic)\n sys.exit()\n\n while True:\n if callback: callback(locals(), globals())\n if max_timesteps and timesteps_so_far >= max_timesteps:\n break\n elif max_episodes and episodes_so_far >= max_episodes:\n break\n elif max_iters and iters_so_far >= max_iters:\n break\n elif max_seconds and time.time() - tstart >= max_seconds:\n break\n\n if schedule == 'constant':\n cur_lrmult = 1.0\n elif schedule == 'linear':\n cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)\n else:\n raise NotImplementedError\n\n # Save model\n if iters_so_far % save_per_iter == 0 and ckpt_dir is not None:\n U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far)\n\n logger.log(\"********** Iteration %i ************\"%iters_so_far)\n\n seg = seg_gen.__next__()\n add_vtarg_and_adv(seg, gamma, lam)\n\n # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))\n ob, ac, atarg, tdlamret = seg[\"ob\"], seg[\"ac\"], seg[\"adv\"], seg[\"tdlamret\"]\n vpredbefore = seg[\"vpred\"] # predicted value function before udpate\n atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate\n d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)\n optim_batchsize = optim_batchsize or ob.shape[0]\n\n if hasattr(pi, \"ob_rms\"): pi.ob_rms.update(ob) # update running mean/std for policy\n\n assign_old_eq_new() # set old parameter values to new parameter values\n logger.log(\"Optimizing...\")\n logger.log(fmt_row(13, loss_names))\n # Here we do a bunch of optimization epochs over the data\n for _ in range(optim_epochs):\n losses = [] # list of tuples, each of which gives the loss for a minibatch\n for batch in d.iterate_once(optim_batchsize):\n *newlosses, g = lossandgrad(batch[\"ob\"], batch[\"ac\"], batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult)\n adam.update(g, optim_stepsize * cur_lrmult) \n losses.append(newlosses)\n logger.log(fmt_row(13, np.mean(losses, axis=0)))\n\n logger.log(\"Evaluating losses...\")\n losses = []\n for batch in d.iterate_once(optim_batchsize):\n newlosses = compute_losses(batch[\"ob\"], batch[\"ac\"], batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult)\n losses.append(newlosses) \n meanlosses,_,_ = mpi_moments(losses, axis=0)\n logger.log(fmt_row(13, meanlosses))\n for (lossval, name) in zipsame(meanlosses, loss_names):\n logger.record_tabular(\"loss_\"+name, lossval)\n logger.record_tabular(\"ev_tdlam_before\", explained_variance(vpredbefore, tdlamret))\n lrlocal = (seg[\"ep_lens\"], seg[\"ep_rets\"]) # local values\n listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples\n lens, rews = map(flatten_lists, zip(*listoflrpairs))\n lenbuffer.extend(lens)\n rewbuffer.extend(rews)\n logger.record_tabular(\"EpLenMean\", np.mean(lenbuffer))\n logger.record_tabular(\"EpRewMean\", np.mean(rewbuffer))\n logger.record_tabular(\"EpThisIter\", len(lens))\n episodes_so_far += len(lens)\n timesteps_so_far += sum(lens)\n iters_so_far += 1\n logger.record_tabular(\"EpisodesSoFar\", episodes_so_far)\n logger.record_tabular(\"TimestepsSoFar\", timesteps_so_far)\n logger.record_tabular(\"TimeElapsed\", time.time() - tstart)\n if MPI.COMM_WORLD.Get_rank()==0:\n logger.dump_tabular()\n\ndef sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic):\n\n assert load_model_path is not None\n U.load_state(load_model_path)\n sample_trajs = []\n for iters_so_far in range(max_sample_traj):\n logger.log(\"********** Iteration %i ************\"%iters_so_far)\n traj = traj_gen.__next__()\n ob, new, ep_ret, ac, rew, ep_len = traj['ob'], traj['new'], traj['ep_ret'], traj['ac'], traj['rew'], traj['ep_len']\n logger.record_tabular(\"ep_ret\", ep_ret)\n logger.record_tabular(\"ep_len\", ep_len)\n logger.record_tabular(\"immediate reward\", np.mean(rew))\n if MPI.COMM_WORLD.Get_rank()==0:\n logger.dump_tabular()\n traj_data = {\"ob\":ob, \"ac\":ac, \"rew\": rew, \"ep_ret\":ep_ret}\n sample_trajs.append(traj_data)\n\n sample_ep_rets = [traj[\"ep_ret\"] for traj in sample_trajs]\n logger.log(\"Average total return: %f\"%(sum(sample_ep_rets)/len(sample_ep_rets)))\n if sample_stochastic:\n task_name = 'stochastic.' + task_name\n else:\n task_name = 'deterministic.' + task_name\n pkl.dump(sample_trajs, open(task_name+\".pkl\", \"wb\"))\n\ndef flatten_lists(listoflists):\n return [el for list_ in listoflists for el in list_]\n" ]
[ [ "tensorflow.minimum", "tensorflow.assign", "tensorflow.placeholder", "numpy.append", "numpy.mean", "tensorflow.square", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
tianjianjiang/allennlp
[ "35b285585e0677b1025eac1c19b5eefe7e2a70db", "35b285585e0677b1025eac1c19b5eefe7e2a70db", "35b285585e0677b1025eac1c19b5eefe7e2a70db", "0839f5c263911ec5ff04a2ebe575493c7e0436ef", "35b285585e0677b1025eac1c19b5eefe7e2a70db", "35b285585e0677b1025eac1c19b5eefe7e2a70db", "35b285585e0677b1025eac1c19b5eefe7e2a70db", "0839f5c263911ec5ff04a2ebe575493c7e0436ef", "0839f5c263911ec5ff04a2ebe575493c7e0436ef" ]
[ "allennlp/training/metrics/entropy.py", "allennlp/modules/similarity_functions/linear.py", "allennlp/tests/data/dataset_readers/multiprocess_dataset_reader_test.py", "allennlp/data/token_indexers/spacy_indexer.py", "allennlp/nn/initializers.py", "allennlp/modules/span_extractors/bidirectional_endpoint_span_extractor.py", "allennlp/tests/modules/conditional_random_field_test.py", "allennlp/tests/training/metrics/unigram_recall_test.py", "allennlp/training/metrics/conll_coref_scores.py" ]
[ "from typing import Optional\n\nfrom overrides import overrides\nimport torch\n\nfrom allennlp.training.metrics.metric import Metric\n\n\[email protected](\"entropy\")\nclass Entropy(Metric):\n def __init__(self) -> None:\n self._entropy = 0.0\n self._count = 0\n\n @overrides\n def __call__(\n self, # type: ignore\n logits: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n ):\n \"\"\"\n Parameters\n ----------\n logits : ``torch.Tensor``, required.\n A tensor of unnormalized log probabilities of shape (batch_size, ..., num_classes).\n mask: ``torch.Tensor``, optional (default = None).\n A masking tensor of shape (batch_size, ...).\n \"\"\"\n logits, mask = self.unwrap_to_tensors(logits, mask)\n\n if mask is None:\n mask = torch.ones(logits.size()[:-1])\n\n log_probs = torch.nn.functional.log_softmax(logits, dim=-1)\n probabilities = torch.exp(log_probs) * mask.unsqueeze(-1)\n weighted_negative_likelihood = -log_probs * probabilities\n entropy = weighted_negative_likelihood.sum(-1)\n\n self._entropy += entropy.sum() / mask.sum()\n self._count += 1\n\n @overrides\n def get_metric(self, reset: bool = False):\n \"\"\"\n Returns\n -------\n The scalar average entropy.\n \"\"\"\n average_value = self._entropy / self._count if self._count > 0 else 0\n if reset:\n self.reset()\n return average_value\n\n @overrides\n def reset(self):\n self._entropy = 0.0\n self._count = 0\n", "import math\n\nfrom overrides import overrides\nimport torch\nfrom torch.nn.parameter import Parameter\n\nfrom allennlp.modules.similarity_functions.similarity_function import SimilarityFunction\nfrom allennlp.nn import Activation, util\n\n\[email protected](\"linear\")\nclass LinearSimilarity(SimilarityFunction):\n \"\"\"\n This similarity function performs a dot product between a vector of weights and some\n combination of the two input vectors, followed by an (optional) activation function. The\n combination used is configurable.\n\n If the two vectors are ``x`` and ``y``, we allow the following kinds of combinations: ``x``,\n ``y``, ``x*y``, ``x+y``, ``x-y``, ``x/y``, where each of those binary operations is performed\n elementwise. You can list as many combinations as you want, comma separated. For example, you\n might give ``x,y,x*y`` as the ``combination`` parameter to this class. The computed similarity\n function would then be ``w^T [x; y; x*y] + b``, where ``w`` is a vector of weights, ``b`` is a\n bias parameter, and ``[;]`` is vector concatenation.\n\n Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the\n similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can\n accomplish that with this class by using \"x*y\" for `combination`.\n\n Parameters\n ----------\n tensor_1_dim : ``int``\n The dimension of the first tensor, ``x``, described above. This is ``x.size()[-1]`` - the\n length of the vector that will go into the similarity computation. We need this so we can\n build weight vectors correctly.\n tensor_2_dim : ``int``\n The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the\n length of the vector that will go into the similarity computation. We need this so we can\n build weight vectors correctly.\n combination : ``str``, optional (default=\"x,y\")\n Described above.\n activation : ``Activation``, optional (default=linear (i.e. no activation))\n An activation function applied after the ``w^T * [x;y] + b`` calculation. Default is no\n activation.\n \"\"\"\n\n def __init__(\n self,\n tensor_1_dim: int,\n tensor_2_dim: int,\n combination: str = \"x,y\",\n activation: Activation = None,\n ) -> None:\n super().__init__()\n self._combination = combination\n combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])\n self._weight_vector = Parameter(torch.Tensor(combined_dim))\n self._bias = Parameter(torch.Tensor(1))\n self._activation = activation or Activation.by_name(\"linear\")()\n self.reset_parameters()\n\n def reset_parameters(self):\n std = math.sqrt(6 / (self._weight_vector.size(0) + 1))\n self._weight_vector.data.uniform_(-std, std)\n self._bias.data.fill_(0)\n\n @overrides\n def forward(self, tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:\n combined_tensors = util.combine_tensors(self._combination, [tensor_1, tensor_2])\n dot_product = torch.matmul(combined_tensors, self._weight_vector)\n return self._activation(dot_product + self._bias)\n", "from collections import Counter\nfrom multiprocessing import Queue, Process\nfrom queue import Empty\nfrom typing import Tuple\n\nimport numpy as np\n\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.data.dataset_readers import MultiprocessDatasetReader, SequenceTaggingDatasetReader\nfrom allennlp.data.dataset_readers.multiprocess_dataset_reader import QIterable\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.iterators import BasicIterator\nfrom allennlp.data.vocabulary import Vocabulary\n\n\ndef fingerprint(instance: Instance) -> Tuple[str, ...]:\n \"\"\"\n Get a hashable representation of a sequence tagging instance\n that can be put in a Counter.\n \"\"\"\n text_tuple = tuple(t.text for t in instance.fields[\"tokens\"].tokens) # type: ignore\n labels_tuple = tuple(instance.fields[\"tags\"].labels) # type: ignore\n return text_tuple + labels_tuple\n\n\nclass TestMultiprocessDatasetReader(AllenNlpTestCase):\n def setUp(self) -> None:\n super().setUp()\n\n # use SequenceTaggingDatasetReader as the base reader\n self.base_reader = SequenceTaggingDatasetReader(lazy=True)\n base_file_path = AllenNlpTestCase.FIXTURES_ROOT / \"data\" / \"sequence_tagging.tsv\"\n\n # Make 100 copies of the data\n raw_data = open(base_file_path).read()\n for i in range(100):\n file_path = self.TEST_DIR / f\"identical_{i}.tsv\"\n with open(file_path, \"w\") as f:\n f.write(raw_data)\n\n self.all_distinct_path = str(self.TEST_DIR / \"all_distinct.tsv\")\n with open(self.all_distinct_path, \"w\") as all_distinct:\n for i in range(100):\n file_path = self.TEST_DIR / f\"distinct_{i}.tsv\"\n line = f\"This###DT\\tis###VBZ\\tsentence###NN\\t{i}###CD\\t.###.\\n\"\n with open(file_path, \"w\") as f:\n f.write(line)\n all_distinct.write(line)\n\n self.identical_files_glob = str(self.TEST_DIR / \"identical_*.tsv\")\n self.distinct_files_glob = str(self.TEST_DIR / \"distinct_*.tsv\")\n\n # For some of the tests we need a vocab, we'll just use the base_reader for that.\n self.vocab = Vocabulary.from_instances(self.base_reader.read(str(base_file_path)))\n\n def test_multiprocess_read(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=4)\n\n all_instances = []\n\n for instance in reader.read(self.identical_files_glob):\n all_instances.append(instance)\n\n # 100 files * 4 sentences / file\n assert len(all_instances) == 100 * 4\n\n counts = Counter(fingerprint(instance) for instance in all_instances)\n\n # should have the exact same data 100 times\n assert len(counts) == 4\n assert counts[(\"cats\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"dogs\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"snakes\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"birds\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n\n def test_multiprocess_read_partial_does_not_hang(self):\n # Use a small queue size such that the processes generating the data will block.\n reader = MultiprocessDatasetReader(\n base_reader=self.base_reader, num_workers=4, output_queue_size=10\n )\n\n all_instances = []\n\n # Half of 100 files * 4 sentences / file\n i = 0\n for instance in reader.read(self.identical_files_glob):\n # Stop early such that the processes generating the data remain\n # active (given the small queue size).\n if i == 200:\n break\n i += 1\n all_instances.append(instance)\n\n # This should be trivially true. The real test here is that we exit\n # normally and don't hang due to the still active processes.\n assert len(all_instances) == 200\n\n def test_multiprocess_read_with_qiterable(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=4)\n\n all_instances = []\n qiterable = reader.read(self.identical_files_glob)\n assert isinstance(qiterable, QIterable)\n\n # Essentially QIterable.__iter__. Broken out here as we intend it to be\n # a public interface.\n qiterable.start()\n while qiterable.num_active_workers.value > 0 or qiterable.num_inflight_items.value > 0:\n while True:\n try:\n all_instances.append(qiterable.output_queue.get(block=False, timeout=1.0))\n with qiterable.num_inflight_items.get_lock():\n qiterable.num_inflight_items.value -= 1\n except Empty:\n break\n qiterable.join()\n\n # 100 files * 4 sentences / file\n assert len(all_instances) == 100 * 4\n\n counts = Counter(fingerprint(instance) for instance in all_instances)\n\n # should have the exact same data 100 times\n assert len(counts) == 4\n assert counts[(\"cats\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"dogs\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"snakes\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"birds\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n\n def test_multiprocess_read_in_subprocess_is_deterministic(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=1)\n q = Queue()\n\n def read():\n for instance in reader.read(self.distinct_files_glob):\n q.put(fingerprint(instance))\n\n # Ensure deterministic shuffling.\n np.random.seed(0)\n p = Process(target=read)\n p.start()\n p.join()\n\n # Convert queue to list.\n actual_fingerprints = []\n while not q.empty():\n actual_fingerprints.append(q.get(block=False))\n\n assert len(actual_fingerprints) == 100\n\n expected_fingerprints = []\n for instance in self.base_reader.read(self.all_distinct_path):\n expected_fingerprints.append(fingerprint(instance))\n\n np.random.seed(0)\n expected_fingerprints.sort()\n # This should be shuffled into exactly the same order as actual_fingerprints.\n np.random.shuffle(expected_fingerprints)\n\n assert actual_fingerprints == expected_fingerprints\n\n def test_multiple_epochs(self):\n reader = MultiprocessDatasetReader(\n base_reader=self.base_reader, num_workers=2, epochs_per_read=3\n )\n\n all_instances = []\n\n for instance in reader.read(self.identical_files_glob):\n all_instances.append(instance)\n\n # 100 files * 4 sentences per file * 3 epochs\n assert len(all_instances) == 100 * 4 * 3\n\n counts = Counter(fingerprint(instance) for instance in all_instances)\n\n # should have the exact same data 100 * 3 times\n assert len(counts) == 4\n assert counts[(\"cats\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n assert counts[(\"dogs\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n assert counts[(\"snakes\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n assert counts[(\"birds\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n\n def test_with_iterator(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=2)\n instances = reader.read(self.identical_files_glob)\n\n iterator = BasicIterator(batch_size=32)\n iterator.index_with(self.vocab)\n\n batches = [batch for batch in iterator(instances, num_epochs=1)]\n\n # 400 instances / batch_size 32 = 12 full batches + 1 batch of 16\n sizes = sorted([len(batch[\"tags\"]) for batch in batches])\n assert sizes == [16] + 12 * [32]\n", "from typing import Dict, List\n\nfrom overrides import overrides\nfrom spacy.tokens import Token as SpacyToken\nimport torch\nimport numpy\n\nfrom allennlp.common.util import pad_sequence_to_length\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.data.tokenizers.token import Token\nfrom allennlp.data.token_indexers.token_indexer import TokenIndexer\n\n\[email protected](\"spacy\")\nclass SpacyTokenIndexer(TokenIndexer[numpy.ndarray]):\n \"\"\"\n This :class:`SpacyTokenIndexer` represents tokens as word vectors\n from a spacy model. You might want to do this for two main reasons;\n easier integration with a spacy pipeline and no out of vocabulary\n tokens.\n\n Parameters\n ----------\n hidden_dim : ``int``, optional (default=``96``)\n The dimension of the vectors that spacy generates for\n representing words.\n token_min_padding_length : ``int``, optional (default=``0``)\n See :class:`TokenIndexer`.\n \"\"\"\n\n def __init__(self, hidden_dim: int = 96, token_min_padding_length: int = 0) -> None:\n self._hidden_dim = hidden_dim\n super().__init__(token_min_padding_length)\n\n @overrides\n def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):\n # We are using spacy to generate embeddings directly for our model,\n # so we don't need to capture the vocab - it is defined by the spacy\n # model we are using instead.\n pass\n\n @overrides\n def tokens_to_indices(\n self, tokens: List[SpacyToken], vocabulary: Vocabulary, index_name: str\n ) -> Dict[str, List[numpy.ndarray]]:\n\n if not all([isinstance(x, SpacyToken) for x in tokens]):\n raise ValueError(\n \"The spacy indexer requires you to use a Tokenizer which produces SpacyTokens.\"\n )\n indices: List[numpy.ndarray] = []\n for token in tokens:\n indices.append(token.vector)\n\n return {index_name: indices}\n\n def get_padding_token(self) -> numpy.ndarray:\n return numpy.zeros(self._hidden_dim, dtype=numpy.float32)\n\n @overrides\n def get_padding_lengths(self, token: numpy.ndarray) -> Dict[str, numpy.ndarray]:\n return {}\n\n @overrides\n def as_padded_tensor(\n self,\n tokens: Dict[str, List[numpy.ndarray]],\n desired_num_tokens: Dict[str, int],\n padding_lengths: Dict[str, int],\n ) -> Dict[str, torch.Tensor]:\n\n val = {\n key: torch.FloatTensor(\n pad_sequence_to_length(\n val, desired_num_tokens[key], default_value=self.get_padding_token\n )\n )\n for key, val in tokens.items()\n }\n return val\n", "\"\"\"\nAn initializer is just a PyTorch function.\nHere we implement a proxy class that allows us\nto register them and supply any additional function arguments\n(for example, the ``mean`` and ``std`` of a normal initializer)\nas named arguments to the constructor.\n\nThe available initialization functions are\n\n* `\"normal\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.normal_>`_\n* `\"uniform\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.uniform_>`_\n* `\"constant\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.constant_>`_\n* `\"eye\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.eye_>`_\n* `\"dirac\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.dirac_>`_\n* `\"xavier_uniform\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_uniform_>`_\n* `\"xavier_normal\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_normal_>`_\n* `\"kaiming_uniform\"\n <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_uniform_>`_\n* `\"kaiming_normal\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_normal_>`_\n* `\"orthogonal\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.orthogonal_>`_\n* `\"sparse\" <https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.sparse_>`_\n* :func:`\"block_orthogonal\" <block_orthogonal>`\n* :func:`\"uniform_unit_scaling\" <uniform_unit_scaling>`\n* :class:`\"pretrained\" <PretrainedModelInitializer>`\n\"\"\"\nimport logging\nimport re\nimport math\nfrom typing import Callable, List, Tuple, Type, Dict\nimport itertools\nfrom overrides import overrides\n\nimport torch\nimport torch.nn.init\n\nfrom allennlp.common import Registrable\nfrom allennlp.common.params import Params\nfrom allennlp.common.checks import ConfigurationError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Initializer(Registrable):\n \"\"\"\n An initializer is really just a bare pytorch function. This class\n is a proxy that allows us to implement ``Registerable`` for those functions.\n \"\"\"\n\n default_implementation = \"normal\"\n\n def __call__(self, tensor: torch.Tensor, **kwargs) -> None:\n \"\"\"\n This function is here just to make mypy happy. We expect initialization functions to\n follow this API; the builtin pytorch initialization functions follow this just fine, even\n though they don't subclass ``Initialization``. We're just making it explicit here, so mypy\n knows that initializers are callable like this.\n \"\"\"\n raise NotImplementedError\n\n\ndef uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = \"linear\"):\n \"\"\"\n An initaliser which preserves output variance for approximately gaussian\n distributed inputs. This boils down to initialising layers using a uniform\n distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where\n ``dim[0]`` is equal to the input dimension of the parameter and the ``scale``\n is a constant scaling factor which depends on the non-linearity used.\n\n See `Random Walk Initialisation for Training Very Deep Feedforward Networks\n <https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_\n for more information.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n The tensor to initialise.\n nonlinearity : ``str``, optional (default = \"linear\")\n The non-linearity which is performed after the projection that this\n tensor is involved in. This must be the name of a function contained\n in the ``torch.nn.functional`` package.\n\n Returns\n -------\n The initialised tensor.\n \"\"\"\n size = 1.0\n # Estimate the input size. This won't work perfectly,\n # but it covers almost all use cases where this initialiser\n # would be expected to be useful, i.e in large linear and\n # convolutional layers, as the last dimension will almost\n # always be the output size.\n for dimension in list(tensor.size())[:-1]:\n size *= dimension\n\n activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)\n max_value = math.sqrt(3 / size) * activation_scaling\n\n return tensor.data.uniform_(-max_value, max_value)\n\n\ndef block_orthogonal(tensor: torch.Tensor, split_sizes: List[int], gain: float = 1.0) -> None:\n \"\"\"\n An initializer which allows initializing model parameters in \"blocks\". This is helpful\n in the case of recurrent models which use multiple gates applied to linear projections,\n which can be computed efficiently if they are concatenated together. However, they are\n separate parameters which should be initialized independently.\n\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n A tensor to initialize.\n split_sizes : List[int], required.\n A list of length ``tensor.ndim()`` specifying the size of the\n blocks along that particular dimension. E.g. ``[10, 20]`` would\n result in the tensor being split into chunks of size 10 along the\n first dimension and 20 along the second.\n gain : float, optional (default = 1.0)\n The gain (scaling) applied to the orthogonal initialization.\n \"\"\"\n data = tensor.data\n sizes = list(tensor.size())\n if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):\n raise ConfigurationError(\n \"tensor dimensions must be divisible by their respective \"\n \"split_sizes. Found size: {} and split_sizes: {}\".format(sizes, split_sizes)\n )\n indexes = [list(range(0, max_size, split)) for max_size, split in zip(sizes, split_sizes)]\n # Iterate over all possible blocks within the tensor.\n for block_start_indices in itertools.product(*indexes):\n # A list of tuples containing the index to start at for this block\n # and the appropriate step size (i.e split_size[i] for dimension i).\n index_and_step_tuples = zip(block_start_indices, split_sizes)\n # This is a tuple of slices corresponding to:\n # tensor[index: index + step_size, ...]. This is\n # required because we could have an arbitrary number\n # of dimensions. The actual slices we need are the\n # start_index: start_index + step for each dimension in the tensor.\n block_slice = tuple(\n [slice(start_index, start_index + step) for start_index, step in index_and_step_tuples]\n )\n data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)\n\n\ndef zero(tensor: torch.Tensor) -> None:\n return tensor.data.zero_()\n\n\ndef lstm_hidden_bias(tensor: torch.Tensor) -> None:\n \"\"\"\n Initialize the biases of the forget gate to 1, and all other gates to 0,\n following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures\n \"\"\"\n # gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)\n tensor.data.zero_()\n hidden_size = tensor.shape[0] // 4\n tensor.data[hidden_size : (2 * hidden_size)] = 1.0\n\n\ndef _initializer_wrapper(init_function: Callable[..., None]) -> Type[Initializer]:\n class Init(Initializer):\n _initializer_wrapper = True\n\n def __init__(self, **kwargs):\n self._init_function = init_function\n self._kwargs = kwargs\n\n def __call__(self, tensor: torch.Tensor, **kwargs) -> None:\n self._init_function(tensor, **self._kwargs)\n\n def __repr__(self):\n return \"Init: %s, with params: %s\" % (self._init_function, self._kwargs)\n\n @classmethod\n def from_params(cls, params: Params): # type: ignore\n return cls(**params.as_dict())\n\n return Init\n\n\n# There are no classes to decorate, so we hack these into Registrable._registry\nRegistrable._registry[Initializer] = {\n \"normal\": _initializer_wrapper(torch.nn.init.normal_),\n \"uniform\": _initializer_wrapper(torch.nn.init.uniform_),\n \"orthogonal\": _initializer_wrapper(torch.nn.init.orthogonal_),\n \"constant\": _initializer_wrapper(torch.nn.init.constant_),\n \"dirac\": _initializer_wrapper(torch.nn.init.dirac_),\n \"xavier_normal\": _initializer_wrapper(torch.nn.init.xavier_normal_),\n \"xavier_uniform\": _initializer_wrapper(torch.nn.init.xavier_uniform_),\n \"kaiming_normal\": _initializer_wrapper(torch.nn.init.kaiming_normal_),\n \"kaiming_uniform\": _initializer_wrapper(torch.nn.init.kaiming_uniform_),\n \"sparse\": _initializer_wrapper(torch.nn.init.sparse_),\n \"eye\": _initializer_wrapper(torch.nn.init.eye_),\n \"block_orthogonal\": _initializer_wrapper(block_orthogonal),\n \"uniform_unit_scaling\": _initializer_wrapper(uniform_unit_scaling),\n \"zero\": _initializer_wrapper(zero),\n \"lstm_hidden_bias\": _initializer_wrapper(lstm_hidden_bias),\n}\n\n\[email protected](\"pretrained\")\nclass PretrainedModelInitializer(Initializer):\n \"\"\"\n An initializer which allows initializing parameters using a pretrained model. The\n initializer will load all of the weights from the ``weights_file_path`` and use the\n name of the new parameters to index into the pretrained parameters. Therefore,\n by default, the names of the new and pretrained parameters must be the same.\n However, this behavior can be overridden using the ``parameter_name_overrides``,\n which remaps the name of the new parameter to the key which should be used\n to index into the pretrained parameters.\n\n The initializer will load all of the weights from the ``weights_file_path``\n regardless of which parameters will actually be used to initialize the new model.\n So, if you need to initialize several parameters using a pretrained model, the most\n memory-efficient way to do this is to use one ``PretrainedModelInitializer`` per\n weights file and use a regex to match all of the new parameters which need to be\n initialized.\n\n The below entry in the :class:`InitializerApplicator` parameters will initialize\n ``linear_1.weight`` and ``linear_2.weight`` using a pretrained model.\n ``linear_1.weight`` will be initialized to the pretrained\n parameters called ``linear_1.weight``, but ``linear_2.weight`` will be initialized\n to the pretrained parameters called ``linear_3.weight``::\n\n [\"linear_1.weight|linear_2.weight\",\n {\n \"type\": \"pretrained\",\n \"weights_file_path\": \"best.th\",\n \"parameter_name_overrides\": {\n \"linear_2.weight\": \"linear_3.weight\"\n }\n }\n ]\n\n To initialize weights for all the parameters from a pretrained model (assuming their names\n remain unchanged), use the following instead:\n\n .. code-block:: js\n\n [\".*\",\n {\n \"type\": \"pretrained\",\n \"weights_file_path\": \"best.th\",\n \"parameter_name_overrides\": {}\n }\n ]\n\n Parameters\n ----------\n weights_file_path : ``str``, required\n The path to the weights file which has the pretrained model parameters.\n parameter_name_overrides : ``Dict[str, str]``, optional (default = None)\n The mapping from the new parameter name to the name which should be used\n to index into the pretrained model parameters. If a parameter name is not\n specified, the initializer will use the parameter's default name as the key.\n \"\"\"\n\n def __init__(\n self, weights_file_path: str, parameter_name_overrides: Dict[str, str] = None\n ) -> None:\n self.weights: Dict[str, torch.Tensor] = torch.load(weights_file_path)\n self.parameter_name_overrides = parameter_name_overrides or {}\n\n @overrides\n def __call__(self, tensor: torch.Tensor, parameter_name: str, **kwargs) -> None: # type: ignore\n # Select the new parameter name if it's being overridden\n if parameter_name in self.parameter_name_overrides:\n parameter_name = self.parameter_name_overrides[parameter_name]\n\n # If the size of the source and destination tensors are not the\n # same, then we need to raise an error\n source_weights = self.weights[parameter_name]\n if tensor.data.size() != source_weights.size():\n raise ConfigurationError(\n \"Incompatible sizes found for parameter %s. \"\n \"Found %s and %s\" % (parameter_name, tensor.data.size(), source_weights.size())\n )\n\n # Copy the parameters from the source to the destination\n tensor.data[:] = source_weights[:]\n\n\nclass InitializerApplicator:\n \"\"\"\n Applies initializers to the parameters of a Module based on regex matches. Any parameter not\n explicitly matching a regex will not be initialized, instead using whatever the default\n initialization was in the module's code.\n \"\"\"\n\n def __init__(\n self, initializers: List[Tuple[str, Initializer]] = None, prevent_regexes: List[str] = None\n ) -> None:\n \"\"\"\n Parameters\n ----------\n initializers : ``List[Tuple[str, Initializer]]``, optional (default = [])\n A list mapping parameter regexes to initializers. We will check each parameter against\n each regex in turn, and apply the initializer paired with the first matching regex, if\n any. If \"prevent\" is assigned to any regex, then it will override and prevent the matched\n parameters to be initialzed.\n \"\"\"\n self._initializers = initializers or []\n self._prevent_regex = None\n if prevent_regexes:\n self._prevent_regex = \"(\" + \")|(\".join(prevent_regexes) + \")\"\n\n def __call__(self, module: torch.nn.Module) -> None:\n \"\"\"\n Applies an initializer to all parameters in a module that match one of the regexes we were\n given in this object's constructor. Does nothing to parameters that do not match.\n\n Parameters\n ----------\n module : torch.nn.Module, required.\n The Pytorch module to apply the initializers to.\n \"\"\"\n logger.info(\"Initializing parameters\")\n unused_regexes = {initializer[0] for initializer in self._initializers}\n uninitialized_parameters = set()\n # Store which initialisers were applied to which parameters.\n for name, parameter in module.named_parameters():\n for initializer_regex, initializer in self._initializers:\n allow = self._prevent_regex is None or not bool(\n re.search(self._prevent_regex, name)\n )\n if allow and re.search(initializer_regex, name):\n logger.info(\"Initializing %s using %s initializer\", name, initializer_regex)\n initializer(parameter, parameter_name=name)\n unused_regexes.discard(initializer_regex)\n break\n else: # no break\n uninitialized_parameters.add(name)\n for regex in unused_regexes:\n logger.warning(\"Did not use initialization regex that was passed: %s\", regex)\n logger.info(\n \"Done initializing parameters; the following parameters are using their \"\n \"default initialization from their code\"\n )\n uninitialized_parameter_list = list(uninitialized_parameters)\n uninitialized_parameter_list.sort()\n for name in uninitialized_parameter_list:\n logger.info(\" %s\", name)\n\n @classmethod\n def from_params(cls, params: List[Tuple[str, Params]] = None) -> \"InitializerApplicator\":\n \"\"\"\n Converts a Params object into an InitializerApplicator. The json should\n be formatted as follows::\n\n [\n [\"parameter_regex_match1\",\n {\n \"type\": \"normal\"\n \"mean\": 0.01\n \"std\": 0.1\n }\n ],\n [\"parameter_regex_match2\", \"uniform\"]\n [\"prevent_init_regex\", \"prevent\"]\n ]\n\n where the first item in each tuple is the regex that matches to parameters, and the second\n item is a set of parameters that will be passed to ``Initialzer.from_params()``. These\n values can either be strings, in which case they correspond to the names of initializers,\n or dictionaries, in which case they must contain the \"type\" key, corresponding to the name\n of an initializer. In addition, they may contain auxiliary named parameters which will be\n fed to the initializer itself. To determine valid auxiliary parameters, please refer to the\n torch.nn.init documentation. Only \"prevent\" is a special type which does not have corresponding\n initializer. Any parameter matching its corresponding regex will be overridden to NOT initialize.\n\n Returns\n -------\n An InitializerApplicator containing the specified initializers.\n \"\"\"\n\n params = params or []\n\n def is_prevent(item):\n return item in (\"prevent\", {\"type\": \"prevent\"})\n\n prevent_regexes = [param[0] for param in params if is_prevent(param[1])]\n params = [param for param in params if param[1] if not is_prevent(param[1])]\n initializers = [\n (name, Initializer.from_params(init_params)) for name, init_params in params\n ]\n return InitializerApplicator(initializers, prevent_regexes)\n", "import torch\nfrom torch.nn.parameter import Parameter\nfrom overrides import overrides\n\nfrom allennlp.modules.span_extractors.span_extractor import SpanExtractor\nfrom allennlp.modules.token_embedders.embedding import Embedding\nfrom allennlp.nn import util\nfrom allennlp.common.checks import ConfigurationError\n\n\[email protected](\"bidirectional_endpoint\")\nclass BidirectionalEndpointSpanExtractor(SpanExtractor):\n \"\"\"\n Represents spans from a bidirectional encoder as a concatenation of two different\n representations of the span endpoints, one for the forward direction of the encoder\n and one from the backward direction. This type of representation encodes some subtlety,\n because when you consider the forward and backward directions separately, the end index\n of the span for the backward direction's representation is actually the start index.\n\n By default, this ``SpanExtractor`` represents spans as\n ``sequence_tensor[inclusive_span_end] - sequence_tensor[exclusive_span_start]``\n meaning that the representation is the difference between the the last word in the span\n and the word `before` the span started. Note that the start and end indices are with\n respect to the direction that the RNN is going in, so for the backward direction, the\n start/end indices are reversed.\n\n Additionally, the width of the spans can be embedded and concatenated on to the\n final combination.\n\n The following other types of representation are supported for both the forward and backward\n directions, assuming that ``x = span_start_embeddings`` and ``y = span_end_embeddings``.\n\n ``x``, ``y``, ``x*y``, ``x+y``, ``x-y``, ``x/y``, where each of those binary operations\n is performed elementwise. You can list as many combinations as you want, comma separated.\n For example, you might give ``x,y,x*y`` as the ``combination`` parameter to this class.\n The computed similarity function would then be ``[x; y; x*y]``, which can then be optionally\n concatenated with an embedded representation of the width of the span.\n\n Parameters\n ----------\n input_dim : ``int``, required.\n The final dimension of the ``sequence_tensor``.\n forward_combination : str, optional (default = \"y-x\").\n The method used to combine the ``forward_start_embeddings`` and ``forward_end_embeddings``\n for the forward direction of the bidirectional representation.\n See above for a full description.\n backward_combination : str, optional (default = \"x-y\").\n The method used to combine the ``backward_start_embeddings`` and ``backward_end_embeddings``\n for the backward direction of the bidirectional representation.\n See above for a full description.\n num_width_embeddings : ``int``, optional (default = None).\n Specifies the number of buckets to use when representing\n span width features.\n span_width_embedding_dim : ``int``, optional (default = None).\n The embedding size for the span_width features.\n bucket_widths : ``bool``, optional (default = False).\n Whether to bucket the span widths into log-space buckets. If ``False``,\n the raw span widths are used.\n use_sentinels : ``bool``, optional (default = ``True``).\n If ``True``, sentinels are used to represent exclusive span indices for the elements\n in the first and last positions in the sequence (as the exclusive indices for these\n elements are outside of the the sequence boundary). This is not strictly necessary,\n as you may know that your exclusive start and end indices are always within your sequence\n representation, such as if you have appended/prepended <START> and <END> tokens to your\n sequence.\n \"\"\"\n\n def __init__(\n self,\n input_dim: int,\n forward_combination: str = \"y-x\",\n backward_combination: str = \"x-y\",\n num_width_embeddings: int = None,\n span_width_embedding_dim: int = None,\n bucket_widths: bool = False,\n use_sentinels: bool = True,\n ) -> None:\n super().__init__()\n self._input_dim = input_dim\n self._forward_combination = forward_combination\n self._backward_combination = backward_combination\n self._num_width_embeddings = num_width_embeddings\n self._bucket_widths = bucket_widths\n\n if self._input_dim % 2 != 0:\n raise ConfigurationError(\n \"The input dimension is not divisible by 2, but the \"\n \"BidirectionalEndpointSpanExtractor assumes the embedded representation \"\n \"is bidirectional (and hence divisible by 2).\"\n )\n if num_width_embeddings is not None and span_width_embedding_dim is not None:\n self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)\n elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):\n raise ConfigurationError(\n \"To use a span width embedding representation, you must\"\n \"specify both num_width_buckets and span_width_embedding_dim.\"\n )\n else:\n self._span_width_embedding = None\n\n self._use_sentinels = use_sentinels\n if use_sentinels:\n self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))\n self._end_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))\n\n def get_input_dim(self) -> int:\n return self._input_dim\n\n def get_output_dim(self) -> int:\n unidirectional_dim = int(self._input_dim / 2)\n forward_combined_dim = util.get_combined_dim(\n self._forward_combination, [unidirectional_dim, unidirectional_dim]\n )\n backward_combined_dim = util.get_combined_dim(\n self._backward_combination, [unidirectional_dim, unidirectional_dim]\n )\n if self._span_width_embedding is not None:\n return (\n forward_combined_dim\n + backward_combined_dim\n + self._span_width_embedding.get_output_dim()\n )\n return forward_combined_dim + backward_combined_dim\n\n @overrides\n def forward(\n self,\n sequence_tensor: torch.FloatTensor,\n span_indices: torch.LongTensor,\n sequence_mask: torch.LongTensor = None,\n span_indices_mask: torch.LongTensor = None,\n ) -> torch.FloatTensor:\n\n # Both of shape (batch_size, sequence_length, embedding_size / 2)\n forward_sequence, backward_sequence = sequence_tensor.split(\n int(self._input_dim / 2), dim=-1\n )\n forward_sequence = forward_sequence.contiguous()\n backward_sequence = backward_sequence.contiguous()\n\n # shape (batch_size, num_spans)\n span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]\n\n if span_indices_mask is not None:\n span_starts = span_starts * span_indices_mask\n span_ends = span_ends * span_indices_mask\n # We want `exclusive` span starts, so we remove 1 from the forward span starts\n # as the AllenNLP ``SpanField`` is inclusive.\n # shape (batch_size, num_spans)\n exclusive_span_starts = span_starts - 1\n # shape (batch_size, num_spans, 1)\n start_sentinel_mask = (exclusive_span_starts == -1).long().unsqueeze(-1)\n\n # We want `exclusive` span ends for the backward direction\n # (so that the `start` of the span in that direction is exlusive), so\n # we add 1 to the span ends as the AllenNLP ``SpanField`` is inclusive.\n exclusive_span_ends = span_ends + 1\n\n if sequence_mask is not None:\n # shape (batch_size)\n sequence_lengths = util.get_lengths_from_binary_sequence_mask(sequence_mask)\n else:\n # shape (batch_size), filled with the sequence length size of the sequence_tensor.\n sequence_lengths = torch.ones_like(\n sequence_tensor[:, 0, 0], dtype=torch.long\n ) * sequence_tensor.size(1)\n\n # shape (batch_size, num_spans, 1)\n end_sentinel_mask = (\n (exclusive_span_ends >= sequence_lengths.unsqueeze(-1)).long().unsqueeze(-1)\n )\n\n # As we added 1 to the span_ends to make them exclusive, which might have caused indices\n # equal to the sequence_length to become out of bounds, we multiply by the inverse of the\n # end_sentinel mask to erase these indices (as we will replace them anyway in the block below).\n # The same argument follows for the exclusive span start indices.\n exclusive_span_ends = exclusive_span_ends * (1 - end_sentinel_mask.squeeze(-1))\n exclusive_span_starts = exclusive_span_starts * (1 - start_sentinel_mask.squeeze(-1))\n\n # We'll check the indices here at runtime, because it's difficult to debug\n # if this goes wrong and it's tricky to get right.\n if (exclusive_span_starts < 0).any() or (\n exclusive_span_ends > sequence_lengths.unsqueeze(-1)\n ).any():\n raise ValueError(\n f\"Adjusted span indices must lie inside the length of the sequence tensor, \"\n f\"but found: exclusive_span_starts: {exclusive_span_starts}, \"\n f\"exclusive_span_ends: {exclusive_span_ends} for a sequence tensor with lengths \"\n f\"{sequence_lengths}.\"\n )\n\n # Forward Direction: start indices are exclusive. Shape (batch_size, num_spans, input_size / 2)\n forward_start_embeddings = util.batched_index_select(\n forward_sequence, exclusive_span_starts\n )\n # Forward Direction: end indices are inclusive, so we can just use span_ends.\n # Shape (batch_size, num_spans, input_size / 2)\n forward_end_embeddings = util.batched_index_select(forward_sequence, span_ends)\n\n # Backward Direction: The backward start embeddings use the `forward` end\n # indices, because we are going backwards.\n # Shape (batch_size, num_spans, input_size / 2)\n backward_start_embeddings = util.batched_index_select(\n backward_sequence, exclusive_span_ends\n )\n # Backward Direction: The backward end embeddings use the `forward` start\n # indices, because we are going backwards.\n # Shape (batch_size, num_spans, input_size / 2)\n backward_end_embeddings = util.batched_index_select(backward_sequence, span_starts)\n\n if self._use_sentinels:\n # If we're using sentinels, we need to replace all the elements which were\n # outside the dimensions of the sequence_tensor with either the start sentinel,\n # or the end sentinel.\n float_end_sentinel_mask = end_sentinel_mask.float()\n float_start_sentinel_mask = start_sentinel_mask.float()\n forward_start_embeddings = (\n forward_start_embeddings * (1 - float_start_sentinel_mask)\n + float_start_sentinel_mask * self._start_sentinel\n )\n backward_start_embeddings = (\n backward_start_embeddings * (1 - float_end_sentinel_mask)\n + float_end_sentinel_mask * self._end_sentinel\n )\n\n # Now we combine the forward and backward spans in the manner specified by the\n # respective combinations and concatenate these representations.\n # Shape (batch_size, num_spans, forward_combination_dim)\n forward_spans = util.combine_tensors(\n self._forward_combination, [forward_start_embeddings, forward_end_embeddings]\n )\n # Shape (batch_size, num_spans, backward_combination_dim)\n backward_spans = util.combine_tensors(\n self._backward_combination, [backward_start_embeddings, backward_end_embeddings]\n )\n # Shape (batch_size, num_spans, forward_combination_dim + backward_combination_dim)\n span_embeddings = torch.cat([forward_spans, backward_spans], -1)\n\n if self._span_width_embedding is not None:\n # Embed the span widths and concatenate to the rest of the representations.\n if self._bucket_widths:\n span_widths = util.bucket_values(\n span_ends - span_starts, num_total_buckets=self._num_width_embeddings\n )\n else:\n span_widths = span_ends - span_starts\n\n span_width_embeddings = self._span_width_embedding(span_widths)\n return torch.cat([span_embeddings, span_width_embeddings], -1)\n\n if span_indices_mask is not None:\n return span_embeddings * span_indices_mask.float().unsqueeze(-1)\n return span_embeddings\n", "import itertools\nimport math\n\nfrom pytest import approx, raises\nimport torch\n\nfrom allennlp.modules import ConditionalRandomField\nfrom allennlp.modules.conditional_random_field import allowed_transitions\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.testing import AllenNlpTestCase\n\n\nclass TestConditionalRandomField(AllenNlpTestCase):\n def setUp(self):\n super().setUp()\n self.logits = torch.Tensor(\n [\n [[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3, 0.1], [0, 0, 0.9, 10, 1]],\n [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, 0.3, 0.1], [0, 0, 0.9, 1, 1]],\n ]\n )\n self.tags = torch.LongTensor([[2, 3, 4], [3, 2, 2]])\n\n self.transitions = torch.Tensor(\n [\n [0.1, 0.2, 0.3, 0.4, 0.5],\n [0.8, 0.3, 0.1, 0.7, 0.9],\n [-0.3, 2.1, -5.6, 3.4, 4.0],\n [0.2, 0.4, 0.6, -0.3, -0.4],\n [1.0, 1.0, 1.0, 1.0, 1.0],\n ]\n )\n\n self.transitions_from_start = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.6])\n self.transitions_to_end = torch.Tensor([-0.1, -0.2, 0.3, -0.4, -0.4])\n\n # Use the CRF Module with fixed transitions to compute the log_likelihood\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n\n def score(self, logits, tags):\n \"\"\"\n Computes the likelihood score for the given sequence of tags,\n given the provided logits (and the transition weights in the CRF model)\n \"\"\"\n # Start with transitions from START and to END\n total = self.transitions_from_start[tags[0]] + self.transitions_to_end[tags[-1]]\n # Add in all the intermediate transitions\n for tag, next_tag in zip(tags, tags[1:]):\n total += self.transitions[tag, next_tag]\n # Add in the logits for the observed tags\n for logit, tag in zip(logits, tags):\n total += logit[tag]\n return total\n\n def test_forward_works_without_mask(self):\n log_likelihood = self.crf(self.logits, self.tags).item()\n\n # Now compute the log-likelihood manually\n manual_log_likelihood = 0.0\n\n # For each instance, manually compute the numerator\n # (which is just the score for the logits and actual tags)\n # and the denominator\n # (which is the log-sum-exp of the scores for the logits across all possible tags)\n for logits_i, tags_i in zip(self.logits, self.tags):\n numerator = self.score(logits_i.detach(), tags_i.detach())\n all_scores = [\n self.score(logits_i.detach(), tags_j)\n for tags_j in itertools.product(range(5), repeat=3)\n ]\n denominator = math.log(sum(math.exp(score) for score in all_scores))\n # And include them in the manual calculation.\n manual_log_likelihood += numerator - denominator\n\n # The manually computed log likelihood should equal the result of crf.forward.\n assert manual_log_likelihood.item() == approx(log_likelihood)\n\n def test_forward_works_with_mask(self):\n # Use a non-trivial mask\n mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n\n log_likelihood = self.crf(self.logits, self.tags, mask).item()\n\n # Now compute the log-likelihood manually\n manual_log_likelihood = 0.0\n\n # For each instance, manually compute the numerator\n # (which is just the score for the logits and actual tags)\n # and the denominator\n # (which is the log-sum-exp of the scores for the logits across all possible tags)\n for logits_i, tags_i, mask_i in zip(self.logits, self.tags, mask):\n # Find the sequence length for this input and only look at that much of each sequence.\n sequence_length = torch.sum(mask_i.detach())\n logits_i = logits_i.data[:sequence_length]\n tags_i = tags_i.data[:sequence_length]\n\n numerator = self.score(logits_i, tags_i)\n all_scores = [\n self.score(logits_i, tags_j)\n for tags_j in itertools.product(range(5), repeat=sequence_length)\n ]\n denominator = math.log(sum(math.exp(score) for score in all_scores))\n # And include them in the manual calculation.\n manual_log_likelihood += numerator - denominator\n\n # The manually computed log likelihood should equal the result of crf.forward.\n assert manual_log_likelihood.item() == approx(log_likelihood)\n\n def test_viterbi_tags(self):\n mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n\n viterbi_path = self.crf.viterbi_tags(self.logits, mask)\n\n # Separate the tags and scores.\n viterbi_tags = [x for x, y in viterbi_path]\n viterbi_scores = [y for x, y in viterbi_path]\n\n # Check that the viterbi tags are what I think they should be.\n assert viterbi_tags == [[2, 4, 3], [4, 2]]\n\n # We can also iterate over all possible tag sequences and use self.score\n # to check the likelihood of each. The most likely sequence should be the\n # same as what we get from viterbi_tags.\n most_likely_tags = []\n best_scores = []\n\n for logit, mas in zip(self.logits, mask):\n sequence_length = torch.sum(mas.detach())\n most_likely, most_likelihood = None, -float(\"inf\")\n for tags in itertools.product(range(5), repeat=sequence_length):\n score = self.score(logit.data, tags)\n if score > most_likelihood:\n most_likely, most_likelihood = tags, score\n # Convert tuple to list; otherwise == complains.\n most_likely_tags.append(list(most_likely))\n best_scores.append(most_likelihood)\n\n assert viterbi_tags == most_likely_tags\n assert viterbi_scores == best_scores\n\n def test_constrained_viterbi_tags(self):\n constraints = {\n (0, 0),\n (0, 1),\n (1, 1),\n (1, 2),\n (2, 2),\n (2, 3),\n (3, 3),\n (3, 4),\n (4, 4),\n (4, 0),\n }\n\n # Add the transitions to the end tag\n # and from the start tag.\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n\n crf = ConditionalRandomField(num_tags=5, constraints=constraints)\n crf.transitions = torch.nn.Parameter(self.transitions)\n crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)\n crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n\n mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n\n viterbi_path = crf.viterbi_tags(self.logits, mask)\n\n # Get just the tags from each tuple of (tags, score).\n viterbi_tags = [x for x, y in viterbi_path]\n\n # Now the tags should respect the constraints\n assert viterbi_tags == [[2, 3, 3], [2, 3]]\n\n def test_allowed_transitions(self):\n\n bio_labels = [\"O\", \"B-X\", \"I-X\", \"B-Y\", \"I-Y\"] # start tag, end tag\n # 0 1 2 3 4 5 6\n allowed = allowed_transitions(\"BIO\", dict(enumerate(bio_labels)))\n\n # The empty spaces in this matrix indicate disallowed transitions.\n assert set(allowed) == { # Extra column for end tag.\n (0, 0),\n (0, 1),\n (0, 3),\n (0, 6),\n (1, 0),\n (1, 1),\n (1, 2),\n (1, 3),\n (1, 6),\n (2, 0),\n (2, 1),\n (2, 2),\n (2, 3),\n (2, 6),\n (3, 0),\n (3, 1),\n (3, 3),\n (3, 4),\n (3, 6),\n (4, 0),\n (4, 1),\n (4, 3),\n (4, 4),\n (4, 6),\n (5, 0),\n (5, 1),\n (5, 3), # Extra row for start tag\n }\n\n bioul_labels = [\n \"O\",\n \"B-X\",\n \"I-X\",\n \"L-X\",\n \"U-X\",\n \"B-Y\",\n \"I-Y\",\n \"L-Y\",\n \"U-Y\",\n ] # start tag, end tag\n # 0 1 2 3 4 5 6 7 8 9 10\n allowed = allowed_transitions(\"BIOUL\", dict(enumerate(bioul_labels)))\n\n # The empty spaces in this matrix indicate disallowed transitions.\n assert set(allowed) == { # Extra column for end tag.\n (0, 0),\n (0, 1),\n (0, 4),\n (0, 5),\n (0, 8),\n (0, 10),\n (1, 2),\n (1, 3), # noqa\n (2, 2),\n (2, 3),\n (3, 0),\n (3, 1),\n (3, 4),\n (3, 5),\n (3, 8),\n (3, 10),\n (4, 0),\n (4, 1),\n (4, 4),\n (4, 5),\n (4, 8),\n (4, 10),\n (5, 6),\n (5, 7),\n (6, 6),\n (6, 7),\n (7, 0),\n (7, 1),\n (7, 4),\n (7, 5),\n (7, 8),\n (7, 10),\n (8, 0),\n (8, 1),\n (8, 4),\n (8, 5),\n (8, 8),\n (8, 10),\n # Extra row for start tag.\n (9, 0),\n (9, 1),\n (9, 4),\n (9, 5),\n (9, 8),\n }\n\n iob1_labels = [\"O\", \"B-X\", \"I-X\", \"B-Y\", \"I-Y\"] # start tag, end tag\n # 0 1 2 3 4 5 6\n allowed = allowed_transitions(\"IOB1\", dict(enumerate(iob1_labels)))\n\n # The empty spaces in this matrix indicate disallowed transitions.\n assert set(allowed) == { # Extra column for end tag.\n (0, 0),\n (0, 2),\n (0, 4),\n (0, 6),\n (1, 0),\n (1, 1),\n (1, 2),\n (1, 4),\n (1, 6),\n (2, 0),\n (2, 1),\n (2, 2),\n (2, 4),\n (2, 6),\n (3, 0),\n (3, 2),\n (3, 3),\n (3, 4),\n (3, 6),\n (4, 0),\n (4, 2),\n (4, 3),\n (4, 4),\n (4, 6),\n (5, 0),\n (5, 2),\n (5, 4), # Extra row for start tag\n }\n with raises(ConfigurationError):\n allowed_transitions(\"allennlp\", {})\n\n bmes_labels = [\"B-X\", \"M-X\", \"E-X\", \"S-X\", \"B-Y\", \"M-Y\", \"E-Y\", \"S-Y\"] # start tag, end tag\n # 0 1 2 3 4 5 6 7 8 9\n allowed = allowed_transitions(\"BMES\", dict(enumerate(bmes_labels)))\n assert set(allowed) == {\n (0, 1),\n (0, 2),\n (1, 1),\n (1, 2), # Extra column for end tag.\n (2, 0),\n (2, 3),\n (2, 4),\n (2, 7),\n (2, 9), # noqa\n (3, 0),\n (3, 3),\n (3, 4),\n (3, 7),\n (3, 9),\n (4, 5),\n (4, 6),\n (5, 5),\n (5, 6),\n (6, 0),\n (6, 3),\n (6, 4),\n (6, 7),\n (6, 9),\n (7, 0),\n (7, 3),\n (7, 4),\n (7, 7),\n (7, 9),\n (8, 0),\n (8, 3),\n (8, 4),\n (8, 7), # Extra row for start tag\n }\n", "import torch\nimport numpy\n\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.training.metrics import UnigramRecall\n\n\nclass UnigramRecallTest(AllenNlpTestCase):\n def test_sequence_recall(self):\n recall = UnigramRecall()\n gold = torch.Tensor([[1, 2, 3], [2, 4, 8], [7, 1, 1]])\n predictions = torch.Tensor(\n [[[1, 2, 3], [1, 2, -1]], [[2, 4, 8], [2, 5, 9]], [[-1, -1, -1], [7, 1, -1]]]\n )\n\n recall(predictions, gold)\n actual_recall = recall.get_metric()\n numpy.testing.assert_almost_equal(actual_recall, 1)\n\n def test_sequence_recall_respects_mask(self):\n recall = UnigramRecall()\n gold = torch.Tensor([[2, 4, 8], [1, 2, 3], [7, 1, 1], [11, 14, 17]])\n predictions = torch.Tensor(\n [\n [[2, 4, 8], [2, 5, 9]], # 3/3\n [[-1, 2, 4], [3, 8, -1]], # 2/2\n [[-1, -1, -1], [7, 2, -1]], # 1/2\n [[12, 13, 17], [11, 13, 18]], # 2/2\n ]\n )\n mask = torch.Tensor([[1, 1, 1], [0, 1, 1], [1, 1, 0], [1, 0, 1]])\n\n recall(predictions, gold, mask)\n actual_recall = recall.get_metric()\n numpy.testing.assert_almost_equal(actual_recall, 7 / 8)\n\n def test_sequence_recall_accumulates_and_resets_correctly(self):\n recall = UnigramRecall()\n gold = torch.Tensor([[1, 2, 3]])\n recall(torch.Tensor([[[1, 2, 3]]]), gold)\n recall(torch.Tensor([[[7, 8, 4]]]), gold)\n\n actual_recall = recall.get_metric(reset=True)\n numpy.testing.assert_almost_equal(actual_recall, 1 / 2)\n assert recall.correct_count == 0\n assert recall.total_count == 0\n\n def test_get_metric_on_new_object_works(self):\n recall = UnigramRecall()\n\n actual_recall = recall.get_metric(reset=True)\n numpy.testing.assert_almost_equal(actual_recall, 0)\n", "from typing import Any, Dict, List, Tuple\nfrom collections import Counter\n\nfrom overrides import overrides\nfrom scipy.optimize import linear_sum_assignment\nimport numpy as np\nimport torch\n\nfrom allennlp.training.metrics.metric import Metric\n\n\[email protected](\"conll_coref_scores\")\nclass ConllCorefScores(Metric):\n def __init__(self) -> None:\n self.scorers = [Scorer(m) for m in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]\n\n @overrides\n def __call__(\n self, # type: ignore\n top_spans: torch.Tensor,\n antecedent_indices: torch.Tensor,\n predicted_antecedents: torch.Tensor,\n metadata_list: List[Dict[str, Any]],\n ):\n \"\"\"\n Parameters\n ----------\n top_spans : ``torch.Tensor``\n (start, end) indices for all spans kept after span pruning in the model.\n Expected shape: (batch_size, num_spans, 2)\n antecedent_indices : ``torch.Tensor``\n For each span, the indices of all allowed antecedents for that span. This is\n independent of the batch dimension, as it's just based on order in the document.\n Expected shape: (num_spans, num_antecedents)\n predicted_antecedents: ``torch.Tensor``\n For each span, this contains the index (into antecedent_indices) of the most likely\n antecedent for that span.\n Expected shape: (batch_size, num_spans)\n metadata_list : ``List[Dict[str, Any]]``\n A metadata dictionary for each instance in the batch. We use the \"clusters\" key from\n this dictionary, which has the annotated gold coreference clusters for that instance.\n \"\"\"\n top_spans, antecedent_indices, predicted_antecedents = self.unwrap_to_tensors(\n top_spans, antecedent_indices, predicted_antecedents\n )\n for i, metadata in enumerate(metadata_list):\n gold_clusters, mention_to_gold = self.get_gold_clusters(metadata[\"clusters\"])\n predicted_clusters, mention_to_predicted = self.get_predicted_clusters(\n top_spans[i], antecedent_indices, predicted_antecedents[i]\n )\n for scorer in self.scorers:\n scorer.update(\n predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold\n )\n\n @overrides\n def get_metric(self, reset: bool = False) -> Tuple[float, float, float]:\n metrics = (lambda e: e.get_precision(), lambda e: e.get_recall(), lambda e: e.get_f1())\n precision, recall, f1_score = tuple(\n sum(metric(e) for e in self.scorers) / len(self.scorers) for metric in metrics\n )\n if reset:\n self.reset()\n return precision, recall, f1_score\n\n @overrides\n def reset(self):\n self.scorers = [Scorer(metric) for metric in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]\n\n @staticmethod\n def get_gold_clusters(gold_clusters):\n gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]\n mention_to_gold = {}\n for gold_cluster in gold_clusters:\n for mention in gold_cluster:\n mention_to_gold[mention] = gold_cluster\n return gold_clusters, mention_to_gold\n\n @staticmethod\n def get_predicted_clusters(\n top_spans: torch.Tensor,\n antecedent_indices: torch.Tensor,\n predicted_antecedents: torch.Tensor,\n ) -> Tuple[\n List[Tuple[Tuple[int, int], ...]], Dict[Tuple[int, int], Tuple[Tuple[int, int], ...]]\n ]:\n # Pytorch 0.4 introduced scalar tensors, so our calls to tuple() and such below don't\n # actually give ints unless we convert to numpy first. So we do that here.\n top_spans = top_spans.numpy() # (num_spans, 2)\n antecedent_indices = antecedent_indices.numpy() # (num_spans, num_antecedents)\n predicted_antecedents = predicted_antecedents.numpy() # (num_spans,)\n\n predicted_clusters_to_ids: Dict[Tuple[int, int], int] = {}\n clusters: List[List[Tuple[int, int]]] = []\n for i, predicted_antecedent in enumerate(predicted_antecedents):\n if predicted_antecedent < 0:\n continue\n\n # Find predicted index in the antecedent spans.\n predicted_index = antecedent_indices[i, predicted_antecedent]\n # Must be a previous span.\n assert i > predicted_index\n antecedent_span: Tuple[int, int] = tuple(top_spans[predicted_index]) # type: ignore\n\n # Check if we've seen the span before.\n if antecedent_span in predicted_clusters_to_ids.keys():\n predicted_cluster_id: int = predicted_clusters_to_ids[antecedent_span]\n else:\n # We start a new cluster.\n predicted_cluster_id = len(clusters)\n clusters.append([antecedent_span])\n predicted_clusters_to_ids[antecedent_span] = predicted_cluster_id\n\n mention: Tuple[int, int] = tuple(top_spans[i]) # type: ignore\n clusters[predicted_cluster_id].append(mention)\n predicted_clusters_to_ids[mention] = predicted_cluster_id\n\n # finalise the spans and clusters.\n final_clusters = [tuple(cluster) for cluster in clusters]\n # Return a mapping of each mention to the cluster containing it.\n mention_to_cluster: Dict[Tuple[int, int], Tuple[Tuple[int, int], ...]] = {\n mention: final_clusters[cluster_id]\n for mention, cluster_id in predicted_clusters_to_ids.items()\n }\n\n return final_clusters, mention_to_cluster\n\n\nclass Scorer:\n \"\"\"\n Mostly borrowed from <https://github.com/clarkkev/deep-coref/blob/master/evaluation.py>\n \"\"\"\n\n def __init__(self, metric):\n self.precision_numerator = 0\n self.precision_denominator = 0\n self.recall_numerator = 0\n self.recall_denominator = 0\n self.metric = metric\n\n def update(self, predicted, gold, mention_to_predicted, mention_to_gold):\n if self.metric == self.ceafe:\n p_num, p_den, r_num, r_den = self.metric(predicted, gold)\n else:\n p_num, p_den = self.metric(predicted, mention_to_gold)\n r_num, r_den = self.metric(gold, mention_to_predicted)\n self.precision_numerator += p_num\n self.precision_denominator += p_den\n self.recall_numerator += r_num\n self.recall_denominator += r_den\n\n def get_f1(self):\n precision = (\n 0\n if self.precision_denominator == 0\n else self.precision_numerator / float(self.precision_denominator)\n )\n recall = (\n 0\n if self.recall_denominator == 0\n else self.recall_numerator / float(self.recall_denominator)\n )\n return 0 if precision + recall == 0 else 2 * precision * recall / (precision + recall)\n\n def get_recall(self):\n if self.recall_numerator == 0:\n return 0\n else:\n return self.recall_numerator / float(self.recall_denominator)\n\n def get_precision(self):\n if self.precision_numerator == 0:\n return 0\n else:\n return self.precision_numerator / float(self.precision_denominator)\n\n def get_prf(self):\n return self.get_precision(), self.get_recall(), self.get_f1()\n\n @staticmethod\n def b_cubed(clusters, mention_to_gold):\n \"\"\"\n Averaged per-mention precision and recall.\n <https://pdfs.semanticscholar.org/cfe3/c24695f1c14b78a5b8e95bcbd1c666140fd1.pdf>\n \"\"\"\n numerator, denominator = 0, 0\n for cluster in clusters:\n if len(cluster) == 1:\n continue\n gold_counts = Counter()\n correct = 0\n for mention in cluster:\n if mention in mention_to_gold:\n gold_counts[tuple(mention_to_gold[mention])] += 1\n for cluster2, count in gold_counts.items():\n if len(cluster2) != 1:\n correct += count * count\n numerator += correct / float(len(cluster))\n denominator += len(cluster)\n return numerator, denominator\n\n @staticmethod\n def muc(clusters, mention_to_gold):\n \"\"\"\n Counts the mentions in each predicted cluster which need to be re-allocated in\n order for each predicted cluster to be contained by the respective gold cluster.\n <https://aclweb.org/anthology/M/M95/M95-1005.pdf>\n \"\"\"\n true_p, all_p = 0, 0\n for cluster in clusters:\n all_p += len(cluster) - 1\n true_p += len(cluster)\n linked = set()\n for mention in cluster:\n if mention in mention_to_gold:\n linked.add(mention_to_gold[mention])\n else:\n true_p -= 1\n true_p -= len(linked)\n return true_p, all_p\n\n @staticmethod\n def phi4(gold_clustering, predicted_clustering):\n \"\"\"\n Subroutine for ceafe. Computes the mention F measure between gold and\n predicted mentions in a cluster.\n \"\"\"\n return (\n 2\n * len([mention for mention in gold_clustering if mention in predicted_clustering])\n / float(len(gold_clustering) + len(predicted_clustering))\n )\n\n @staticmethod\n def ceafe(clusters, gold_clusters):\n \"\"\"\n Computes the Constrained EntityAlignment F-Measure (CEAF) for evaluating coreference.\n Gold and predicted mentions are aligned into clusterings which maximise a metric - in\n this case, the F measure between gold and predicted clusters.\n\n <https://www.semanticscholar.org/paper/On-Coreference-Resolution-Performance-Metrics-Luo/de133c1f22d0dfe12539e25dda70f28672459b99>\n \"\"\"\n clusters = [cluster for cluster in clusters if len(cluster) != 1]\n scores = np.zeros((len(gold_clusters), len(clusters)))\n for i, gold_cluster in enumerate(gold_clusters):\n for j, cluster in enumerate(clusters):\n scores[i, j] = Scorer.phi4(gold_cluster, cluster)\n row, col = linear_sum_assignment(-scores)\n similarity = sum(scores[row, col])\n return similarity, len(clusters), similarity, len(gold_clusters)\n" ]
[ [ "torch.exp", "torch.nn.functional.log_softmax" ], [ "torch.matmul", "torch.Tensor" ], [ "numpy.random.shuffle", "numpy.random.seed" ], [ "numpy.zeros" ], [ "torch.nn.init.calculate_gain", "torch.load" ], [ "torch.ones_like", "torch.cat" ], [ "torch.LongTensor", "torch.nn.Parameter", "torch.Tensor" ], [ "numpy.testing.assert_almost_equal", "torch.Tensor" ], [ "scipy.optimize.linear_sum_assignment" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
y-veys/iGibson
[ "1442c50187f8fcef118b097c195fef707eef04cb", "1442c50187f8fcef118b097c195fef707eef04cb" ]
[ "gibson2/core/physics/drivers/minitaur.py", "gibson2/envs/locomotor_env.py" ]
[ "\"\"\"This file implements the functionalities of a minitaur using pybullet.\n\n\"\"\"\nimport copy\nimport math\nimport numpy as np\nfrom gibson2.core.physics.drivers import motor\nfrom gibson2.core.physics.robot_locomotors import LocomotorRobot\nfrom gibson2.core.physics.robot_bases import Joint, BodyPart\nimport os, sys\nimport pybullet as p\nimport gym\nfrom transforms3d.euler import euler2quat\n\ntracking_camera = {'yaw': 20, 'z_offset': 0.3, 'distance': 2, 'pitch': -20}\n\n\nclass MinitaurBase(LocomotorRobot):\n model_type = \"URDF\"\n default_scale = 1\n\n KNEE_CONSTRAINT_POINT_RIGHT = [0, 0.005, 0.2]\n KNEE_CONSTRAINT_POINT_LEFT = [0, 0.01, 0.2]\n OVERHEAT_SHUTDOWN_TORQUE = 2.45\n OVERHEAT_SHUTDOWN_TIME = 1.0\n LEG_POSITION = [\"front_left\", \"back_left\", \"front_right\", \"back_right\"]\n MOTOR_NAMES = [\n \"motor_front_leftL_joint\", \"motor_front_leftR_joint\", \"motor_back_leftL_joint\",\n \"motor_back_leftR_joint\", \"motor_front_rightL_joint\", \"motor_front_rightR_joint\",\n \"motor_back_rightL_joint\", \"motor_back_rightR_joint\"\n ]\n LEG_LINK_ID = [2, 3, 5, 6, 8, 9, 11, 12, 15, 16, 18, 19, 21, 22, 24, 25]\n MOTOR_LINK_ID = [1, 4, 7, 10, 14, 17, 20, 23]\n FOOT_LINK_ID = [3, 6, 9, 12, 16, 19, 22, 25]\n BASE_LINK_ID = -1\n OBSERVATION_DIM = 3 * len(MOTOR_NAMES) + 4 # VELOCITY, ANGLE, TORQUES\n\n self_collision_enabled = True\n motor_velocity_limit = np.inf\n\n #accurate_motor_model_enabled=False ## (hzyjerry): affect speed?\n motor_kp = 1.00\n motor_kd = 0.2\n torque_control_enabled = False\n motor_overheat_protection = True\n on_rack = False\n kd_for_pd_controllers = 0.3\n mjcf_scaling = 1\n num_motors = 8\n num_legs = int(num_motors / 2)\n motor_direction = [-1, -1, -1, -1, 1, 1, 1, 1]\n observed_motor_torques = np.zeros(num_motors)\n applied_motor_torques = np.zeros(num_motors)\n max_force = 5.5\n joint_name_to_id = None\n \"\"\"The minitaur class that simulates a quadruped robot from Ghost Robotics.\n \"\"\"\n\n def __init__(self, config, env=None, pd_control_enabled=True,\n accurate_motor_model_enabled=True):\n \"\"\"Constructs a minitaur and reset it to the initial states.\n\n Properties:\n self_collision_enabled: Whether to enable self collision.\n motor_velocity_limit: The upper limit of the motor velocity.\n pd_control_enabled: Whether to use PD control for the motors. If true, need smaller time step to stablize (1/500.0 timestep)\n accurate_motor_model_enabled: Whether to use the accurate DC motor model.\n motor_kp: proportional gain for the accurate motor model\n motor_kd: derivative gain for the acurate motor model\n torque_control_enabled: Whether to use the torque control, if set to\n False, pose control will be used.\n motor_overheat_protection: Whether to shutdown the motor that has exerted\n large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time\n (OVERHEAT_SHUTDOWN_TIME). See apply_action() in minitaur.py for more\n details.\n on_rack: Whether to place the minitaur on rack. This is only used to debug\n the walking gait. In this mode, the minitaur's base is hanged midair so\n that its walking gait is clearer to visualize.\n kd_for_pd_controllers: kd value for the pd controllers of the motors.\n \"\"\"\n self.config = config\n self.model_type = \"URDF\"\n #self.robot_name = \"quadruped\"\n self.robot_name = \"base_chassis_link\"\n scale = config[\"robot_scale\"] if \"robot_scale\" in config.keys() else self.default_scale\n\n LocomotorRobot.__init__(self,\n \"quadruped/minitaur.urdf\",\n self.robot_name,\n action_dim=8,\n sensor_dim=self.OBSERVATION_DIM,\n power=5,\n scale=scale,\n initial_pos=config['initial_pos'],\n target_pos=config[\"target_pos\"],\n resolution=config[\"resolution\"],\n env=env)\n\n self.r_f = 0.1\n self.time_step = config[\"speed\"][\"timestep\"]\n self.pd_control_enabled = pd_control_enabled\n self.minitaur = None ## TODO: fix this\n self.accurate_motor_model_enabled = accurate_motor_model_enabled\n if self.accurate_motor_model_enabled:\n self._kp = self.motor_kp\n self._kd = self.motor_kd\n self._motor_model = motor.MotorModel(torque_control_enabled=self.torque_control_enabled,\n kp=self._kp,\n kd=self._kd)\n elif self.pd_control_enabled:\n self._kp = 8\n self._kd = self.kd_for_pd_controllers\n else:\n self._kp = 1\n self._kd = 1\n\n if config[\"is_discrete\"]:\n self.action_space = gym.spaces.Discrete(17)\n self.torque = 10\n ## Hip_1, Ankle_1, Hip_2, Ankle_2, Hip_3, Ankle_3, Hip_4, Ankle_4\n self.action_list = [[self.r_f * self.torque, 0, 0, 0, 0, 0, 0, 0],\n [0, self.r_f * self.torque, 0, 0, 0, 0, 0, 0],\n [0, 0, self.r_f * self.torque, 0, 0, 0, 0, 0],\n [0, 0, 0, self.r_f * self.torque, 0, 0, 0, 0],\n [0, 0, 0, 0, self.r_f * self.torque, 0, 0, 0],\n [0, 0, 0, 0, 0, self.r_f * self.torque, 0, 0],\n [0, 0, 0, 0, 0, 0, self.r_f * self.torque, 0],\n [0, 0, 0, 0, 0, 0, 0, self.r_f * self.torque],\n [-self.r_f * self.torque, 0, 0, 0, 0, 0, 0, 0],\n [0, -self.r_f * self.torque, 0, 0, 0, 0, 0, 0],\n [0, 0, -self.r_f * self.torque, 0, 0, 0, 0, 0],\n [0, 0, 0, -self.r_f * self.torque, 0, 0, 0, 0],\n [0, 0, 0, 0, -self.r_f * self.torque, 0, 0, 0],\n [0, 0, 0, 0, 0, -self.r_f * self.torque, 0, 0],\n [0, 0, 0, 0, 0, 0, -self.r_f * self.torque, 0],\n [0, 0, 0, 0, 0, 0, 0, -self.r_f * self.torque],\n [0, 0, 0, 0, 0, 0, 0, 0]]\n self.setup_keys_to_action()\n self.debug_count = 0\n self.qmax = [0] * 8\n self.fmax = [0] * 8\n\n def _RecordMassInfoFromURDF(self):\n self._base_mass_urdf = p.getDynamicsInfo(self.minitaur, self.BASE_LINK_ID)[0]\n self._leg_masses_urdf = []\n self._leg_masses_urdf.append(p.getDynamicsInfo(self.minitaur, self.LEG_LINK_ID[0])[0])\n self._leg_masses_urdf.append(p.getDynamicsInfo(self.minitaur, self.MOTOR_LINK_ID[0])[0])\n\n def _BuildJointNameToIdDict(self):\n num_joints = p.getNumJoints(self.minitaur)\n self.joint_name_to_id = {}\n for i in range(num_joints):\n joint_info = p.getJointInfo(self.minitaur, i)\n self.joint_name_to_id[joint_info[1].decode(\"UTF-8\")] = joint_info[0]\n\n def _BuildMotorIdList(self):\n self._motor_id_list = [self.joint_name_to_id[motor_name] for motor_name in self.MOTOR_NAMES]\n\n def robot_specific_reset(self, reload_urdf=True):\n \"\"\"Reset the minitaur to its initial states.\n\n Args:\n reload_urdf: Whether to reload the urdf file. If not, Reset() just place\n the minitaur back to its starting position.\n \"\"\"\n if self.minitaur is None:\n self.minitaur = self.robot_ids[0]\n\n if self.joint_name_to_id is None:\n self._BuildJointNameToIdDict()\n self._BuildMotorIdList()\n self._RecordMassInfoFromURDF()\n self.ResetPose(add_constraint=True)\n self._overheat_counter = np.zeros(self.num_motors)\n self._motor_enabled_list = [True] * self.num_motors\n if self.on_rack:\n p.createConstraint(self.minitaur, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0],\n [0, 0, 1])\n self.ResetPose(add_constraint=True)\n\n def _SetMotorTorqueById(self, motor_id, torque):\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=motor_id,\n controlMode=p.TORQUE_CONTROL,\n force=torque)\n\n def _SetDesiredMotorAngleById(self, motor_id, desired_angle):\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=motor_id,\n controlMode=p.POSITION_CONTROL,\n targetPosition=desired_angle,\n positionGain=self._kp,\n velocityGain=self._kd,\n force=self.max_force)\n\n def _SetDesiredMotorAngleByName(self, motor_name, desired_angle):\n self._SetDesiredMotorAngleById(self.joint_name_to_id[motor_name], desired_angle)\n\n def calc_potential(self):\n return 0\n\n def setup_keys_to_action(self):\n self.keys_to_action = {\n (ord('s'), ): 0, ## backward\n (ord('w'), ): 1, ## forward\n (ord('d'), ): 2, ## turn right\n (ord('a'), ): 3, ## turn left\n (): 4\n }\n\n def ResetPose(self, add_constraint):\n \"\"\"Reset the pose of the minitaur.\n\n Args:\n add_constraint: Whether to add a constraint at the joints of two feet.\n \"\"\"\n for i in range(self.num_legs):\n self._ResetPoseForLeg(i, add_constraint)\n\n def _ResetPoseForLeg(self, leg_id, add_constraint):\n \"\"\"Reset the initial pose for the leg.\n\n Args:\n leg_id: It should be 0, 1, 2, or 3, which represents the leg at\n front_left, back_left, front_right and back_right.\n add_constraint: Whether to add a constraint at the joints of two feet.\n \"\"\"\n knee_friction_force = 0\n half_pi = math.pi / 2.0\n knee_angle = -2.1834\n\n leg_position = self.LEG_POSITION[leg_id]\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"motor_\" + leg_position + \"L_joint\"],\n self.motor_direction[2 * leg_id] * half_pi,\n targetVelocity=0)\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"L_link\"],\n self.motor_direction[2 * leg_id] * knee_angle,\n targetVelocity=0)\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"motor_\" + leg_position + \"R_joint\"],\n self.motor_direction[2 * leg_id + 1] * half_pi,\n targetVelocity=0)\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"R_link\"],\n self.motor_direction[2 * leg_id + 1] * knee_angle,\n targetVelocity=0)\n if add_constraint:\n p.createConstraint(self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"R_link\"],\n self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"L_link\"],\n p.JOINT_POINT2POINT, [0, 0, 0], self.KNEE_CONSTRAINT_POINT_RIGHT,\n self.KNEE_CONSTRAINT_POINT_LEFT)\n\n if self.accurate_motor_model_enabled or self.pd_control_enabled:\n # Disable the default motor in pybullet.\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"motor_\" + leg_position +\n \"L_joint\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"motor_\" + leg_position +\n \"R_joint\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n\n else:\n self._SetDesiredMotorAngleByName(\"motor_\" + leg_position + \"L_joint\",\n self.motor_direction[2 * leg_id] * half_pi)\n self._SetDesiredMotorAngleByName(\"motor_\" + leg_position + \"R_joint\",\n self.motor_direction[2 * leg_id + 1] * half_pi)\n\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"knee_\" + leg_position +\n \"L_link\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"knee_\" + leg_position +\n \"R_link\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n\n def GetBasePosition(self):\n \"\"\"Get the position of minitaur's base.\n\n Returns:\n The position of minitaur's base.\n \"\"\"\n position, _ = (p.getBasePositionAndOrientation(self.minitaur))\n return position\n\n def GetBaseOrientation(self):\n \"\"\"Get the orientation of minitaur's base, represented as quaternion.\n\n Returns:\n The orientation of minitaur's base.\n \"\"\"\n _, orientation = (p.getBasePositionAndOrientation(self.minitaur))\n return orientation\n\n def GetActionDimension(self):\n \"\"\"Get the length of the action list.\n\n Returns:\n The length of the action list.\n \"\"\"\n return self.num_motors\n\n def GetObservationUpperBound(self):\n \"\"\"Get the upper bound of the observation.\n\n Returns:\n The upper bound of an observation. See GetObservation() for the details\n of each element of an observation.\n \"\"\"\n upper_bound = np.array([0.0] * self.GetObservationDimension())\n upper_bound[0:self.num_motors] = math.pi # Joint angle.\n upper_bound[self.num_motors:2 * self.num_motors] = (motor.MOTOR_SPEED_LIMIT\n ) # Joint velocity.\n upper_bound[2 * self.num_motors:3 * self.num_motors] = (motor.OBSERVED_TORQUE_LIMIT\n ) # Joint torque.\n upper_bound[3 * self.num_motors:] = 1.0 # Quaternion of base orientation.\n return upper_bound\n\n def GetObservationLowerBound(self):\n \"\"\"Get the lower bound of the observation.\"\"\"\n return -self.GetObservationUpperBound()\n\n def GetObservationDimension(self):\n \"\"\"Get the length of the observation list.\n\n Returns:\n The length of the observation list.\n \"\"\"\n return len(self.GetObservation())\n\n def calc_state(self):\n return self.GetObservation()\n\n def GetObservation(self):\n \"\"\"Get the observations of minitaur.\n\n It includes the angles, velocities, torques and the orientation of the base.\n\n Returns:\n The observation list. observation[0:8] are motor angles. observation[8:16]\n are motor velocities, observation[16:24] are motor torques.\n observation[24:28] is the orientation of the base, in quaternion form.\n \"\"\"\n observation = []\n observation.extend(self.GetMotorAngles().tolist())\n observation.extend(self.GetMotorVelocities().tolist())\n observation.extend(self.GetMotorTorques().tolist())\n observation.extend(list(self.GetBaseOrientation()))\n return observation\n\n def apply_action(self, motor_commands):\n \"\"\"Set the desired motor angles to the motors of the minitaur.\n\n Note (hzyjerry): motor commands are set based on desired angles, not torques\n\n The desired motor angles are clipped based on the maximum allowed velocity.\n If the pd_control_enabled is True, a torque is calculated according to\n the difference between current and desired joint angle, as well as the joint\n velocity. This torque is exerted to the motor. For more information about\n PD control, please refer to: https://en.wikipedia.org/wiki/PID_controller.\n\n Args:\n motor_commands: The eight desired motor angles.\n \"\"\"\n #print(\"motor commands 1\", motor_commands)\n if self.motor_velocity_limit < np.inf:\n current_motor_angle = self.GetMotorAngles()\n motor_commands_max = (current_motor_angle + self.time_step * self.motor_velocity_limit)\n motor_commands_min = (current_motor_angle - self.time_step * self.motor_velocity_limit)\n #motor_commands = np.clip(motor_commands, motor_commands_min, motor_commands_max)\n #print(\"motor commands 2\", motor_commands)\n if self.accurate_motor_model_enabled or self.pd_control_enabled:\n q = self.GetMotorAngles()\n qdot = self.GetMotorVelocities()\n if self.accurate_motor_model_enabled:\n actual_torque, observed_torque = self._motor_model.convert_to_torque(\n motor_commands, q, qdot)\n #print(\"q\", q, \"qdot\", qdot)\n #print(\"motor commands\", motor_commands)\n #print(\"actual torque\", actual_torque, \"observed torque\", observed_torque)\n self.debug_count += 1\n #if self.debug_count == 30:\n # sys.exit()\n for i in range(len(self.qmax)):\n if q[i] > self.qmax[i]:\n self.qmax[i] = q[i]\n #print(\"Q max\", self.qmax)\n\n if self.motor_overheat_protection:\n for i in range(self.num_motors):\n if abs(actual_torque[i]) > self.OVERHEAT_SHUTDOWN_TORQUE:\n self._overheat_counter[i] += 1\n else:\n self._overheat_counter[i] = 0\n if (self._overheat_counter[i] >\n self.OVERHEAT_SHUTDOWN_TIME / self.time_step):\n self._motor_enabled_list[i] = False\n # The torque is already in the observation space because we use\n # GetMotorAngles and GetMotorVelocities.\n self.observed_motor_torques = observed_torque\n #actual_torque.fill(0.0)\n\n # Transform into the motor space when applying the torque.\n self.applied_motor_torques = np.multiply(actual_torque, self.motor_direction)\n for motor_id, motor_torque, motor_enabled in zip(self._motor_id_list,\n self.applied_motor_torques,\n self._motor_enabled_list):\n if motor_enabled:\n self._SetMotorTorqueById(motor_id, motor_torque)\n else:\n self._SetMotorTorqueById(motor_id, 0)\n #print(\"Apply motor\", self.applied_motor_torques)\n for i in range(len(self.fmax)):\n if motor_commands[i] > self.fmax[i]:\n self.fmax[i] = motor_commands[i]\n #print(\"F max\", self.fmax)\n\n else:\n torque_commands = -self._kp * (q - motor_commands) - self._kd * qdot\n\n # The torque is already in the observation space because we use\n # GetMotorAngles and GetMotorVelocities.\n self.observed_motor_torques = torque_commands\n\n # Transform into the motor space when applying the torque.\n self.applied_motor_torques = np.multiply(self.observed_motor_torques,\n self.motor_direction)\n\n for motor_id, motor_torque in zip(self._motor_id_list, self.applied_motor_torques):\n self._SetMotorTorqueById(motor_id, motor_torque)\n print(\"Apply motor\", self.applied_motor_torques)\n else:\n motor_commands_with_direction = np.multiply(motor_commands, self.motor_direction)\n for motor_id, motor_command_with_direction in zip(self._motor_id_list,\n motor_commands_with_direction):\n print(\"command\", motor_command_with_direction)\n self._SetDesiredMotorAngleById(motor_id, motor_command_with_direction)\n\n def GetMotorAngles(self):\n \"\"\"Get the eight motor angles at the current moment.\n\n Returns:\n Motor angles.\n \"\"\"\n motor_angles = [\n p.getJointState(self.minitaur, motor_id)[0] for motor_id in self._motor_id_list\n ]\n motor_angles = np.multiply(motor_angles, self.motor_direction)\n return motor_angles\n\n def GetMotorVelocities(self):\n \"\"\"Get the velocity of all eight motors.\n\n Returns:\n Velocities of all eight motors.\n \"\"\"\n motor_velocities = [\n p.getJointState(self.minitaur, motor_id)[1] for motor_id in self._motor_id_list\n ]\n motor_velocities = np.multiply(motor_velocities, self.motor_direction)\n return motor_velocities\n\n def GetMotorTorques(self):\n \"\"\"Get the amount of torques the motors are exerting.\n\n Returns:\n Motor torques of all eight motors.\n \"\"\"\n if self.accurate_motor_model_enabled or self.pd_control_enabled:\n return self.observed_motor_torques\n else:\n motor_torques = [\n p.getJointState(self.minitaur, motor_id)[3] for motor_id in self._motor_id_list\n ]\n motor_torques = np.multiply(motor_torques, self.motor_direction)\n return motor_torques\n\n def ConvertFromLegModel(self, actions):\n \"\"\"Convert the actions that use leg model to the real motor actions.\n\n Args:\n actions: The theta, phi of the leg model.\n Returns:\n The eight desired motor angles that can be used in ApplyAction().\n \"\"\"\n motor_angle = copy.deepcopy(actions)\n scale_for_singularity = 1\n offset_for_singularity = 1.5\n half_num_motors = int(self.num_motors / 2)\n quater_pi = math.pi / 4\n for i in range(self.num_motors):\n action_idx = i // 2\n forward_backward_component = (\n -scale_for_singularity * quater_pi *\n (actions[action_idx + half_num_motors] + offset_for_singularity))\n extension_component = (-1)**i * quater_pi * actions[action_idx]\n if i >= half_num_motors:\n extension_component = -extension_component\n motor_angle[i] = (math.pi + forward_backward_component + extension_component)\n return motor_angle\n\n def GetBaseMassFromURDF(self):\n \"\"\"Get the mass of the base from the URDF file.\"\"\"\n return self._base_mass_urdf\n\n def GetLegMassesFromURDF(self):\n \"\"\"Get the mass of the legs from the URDF file.\"\"\"\n return self._leg_masses_urdf\n\n def SetBaseMass(self, base_mass):\n p.changeDynamics(self.minitaur, self.BASE_LINK_ID, mass=base_mass)\n\n def SetLegMasses(self, leg_masses):\n \"\"\"Set the mass of the legs.\n\n A leg includes leg_link and motor. All four leg_links have the same mass,\n which is leg_masses[0]. All four motors have the same mass, which is\n leg_mass[1].\n\n Args:\n leg_masses: The leg masses. leg_masses[0] is the mass of the leg link.\n leg_masses[1] is the mass of the motor.\n \"\"\"\n for link_id in self.LEG_LINK_ID:\n p.changeDynamics(self.minitaur, link_id, mass=leg_masses[0])\n for link_id in self.MOTOR_LINK_ID:\n p.changeDynamics(self.minitaur, link_id, mass=leg_masses[1])\n\n def SetFootFriction(self, foot_friction):\n \"\"\"Set the lateral friction of the feet.\n\n Args:\n foot_friction: The lateral friction coefficient of the foot. This value is\n shared by all four feet.\n \"\"\"\n for link_id in self.FOOT_LINK_ID:\n p.changeDynamics(self.minitaur, link_id, lateralFriction=foot_friction)\n\n def SetBatteryVoltage(self, voltage):\n if self.accurate_motor_model_enabled:\n self._motor_model.set_voltage(voltage)\n\n def SetMotorViscousDamping(self, viscous_damping):\n if self.accurate_motor_model_enabled:\n self._motor_model.set_viscous_damping(viscous_damping)\n\n\nclass Minitaur(MinitaurBase):\n '''Wrapper class for gibson interface\n \n Attribtues:\n self.eyes\n self.resolution\n self.walk_target_x, self.walk_target_y\n self.mjcf_scaling\n self.observation_space\n self.action_space\n self.sensor_space\n\n Interface:\n self.apply_action()\n self.calc_state()\n self.addToScene()\n '''\n\n def __init__(self, config, env, pd_control_enabled=True, accurate_motor_model_enabled=True):\n MinitaurBase.__init__(self, config, env, pd_control_enabled, accurate_motor_model_enabled)\n\n def calc_state(self):\n MinitaurBase.GetObservation(self)\n", "import gibson2\nfrom gibson2.core.physics.interactive_objects import VisualMarker, InteractiveObj, BoxShape, YCBObject, VisualShape\nfrom gibson2.core.physics.robot_locomotors import Turtlebot\nfrom gibson2.utils.utils import parse_config, rotate_vector_3d, l2_distance, quatToXYZW, cartesian_to_polar\nfrom gibson2.envs.base_env import BaseEnv\nfrom transforms3d.euler import euler2quat\nfrom collections import OrderedDict\nimport argparse\nfrom transforms3d.quaternions import quat2mat, qmult\nimport gym\nimport numpy as np\nimport os\nimport pybullet as p\nfrom IPython import embed\nimport cv2\nimport time\nimport collections\nimport logging\n\n\nclass NavigateEnv(BaseEnv):\n \"\"\"\n We define navigation environments following Anderson, Peter, et al. 'On evaluation of embodied navigation agents.'\n arXiv preprint arXiv:1807.06757 (2018). (https://arxiv.org/pdf/1807.06757.pdf)\n \"\"\"\n def __init__(\n self,\n config_file,\n model_id=None,\n mode='headless',\n action_timestep=1 / 10.0,\n physics_timestep=1 / 240.0,\n automatic_reset=False,\n device_idx=0,\n render_to_tensor=False\n ):\n \"\"\"\n :param config_file: config_file path\n :param model_id: override model_id in config file\n :param mode: headless or gui mode\n :param action_timestep: environment executes action per action_timestep second\n :param physics_timestep: physics timestep for pybullet\n :param automatic_reset: whether to automatic reset after an episode finishes\n :param device_idx: device_idx: which GPU to run the simulation and rendering on\n \"\"\"\n super(NavigateEnv, self).__init__(config_file=config_file,\n model_id=model_id,\n mode=mode,\n action_timestep=action_timestep,\n physics_timestep=physics_timestep,\n device_idx=device_idx,\n render_to_tensor=render_to_tensor)\n self.automatic_reset = automatic_reset\n\n def load_task_setup(self):\n \"\"\"\n Load task setup, including initialization, termination conditino, reward, collision checking, discount factor\n \"\"\"\n # initial and target pose\n self.initial_pos = np.array(self.config.get('initial_pos', [0, 0, 0]))\n self.initial_orn = np.array(self.config.get('initial_orn', [0, 0, 0]))\n self.target_pos = np.array(self.config.get('target_pos', [5, 5, 0]))\n self.target_orn = np.array(self.config.get('target_orn', [0, 0, 0]))\n\n self.initial_pos_z_offset = self.config.get('initial_pos_z_offset', 0.1)\n check_collision_distance = self.initial_pos_z_offset * 0.5\n # s = 0.5 * G * (t ** 2)\n check_collision_distance_time = np.sqrt(check_collision_distance / (0.5 * 9.8))\n self.check_collision_loop = int(check_collision_distance_time / self.physics_timestep)\n\n self.additional_states_dim = self.config.get('additional_states_dim', 0)\n self.goal_dim = self.config.get('goal_dim', 0)\n self.base_proprioceptive_states_dim = self.config.get('base_proprioceptive_states_dim', 0)\n self.arm_proprioceptive_states_dim = self.config.get('arm_proprioceptive_states_dim', 0)\n\n self.goal_format = self.config.get('goal_format', 'polar')\n\n # termination condition\n self.dist_tol = self.config.get('dist_tol', 0.5)\n self.max_step = self.config.get('max_step', 500)\n self.max_collisions_allowed = self.config.get('max_collisions_allowed', 500)\n\n # reward\n self.reward_type = self.config.get('reward_type', 'l2')\n assert self.reward_type in ['geodesic', 'l2', 'sparse']\n\n self.success_reward = self.config.get('success_reward', 10.0)\n self.slack_reward = self.config.get('slack_reward', -0.01)\n\n # reward weight\n self.potential_reward_weight = self.config.get('potential_reward_weight', 1.0)\n self.collision_reward_weight = self.config.get('collision_reward_weight', -0.1)\n\n # ignore the agent's collision with these body ids\n self.collision_ignore_body_b_ids = set(self.config.get('collision_ignore_body_b_ids', []))\n # ignore the agent's collision with these link ids of itself\n self.collision_ignore_link_a_ids = set(self.config.get('collision_ignore_link_a_ids', []))\n\n # discount factor\n self.discount_factor = self.config.get('discount_factor', 0.99)\n\n self.num_obstacles = self.config.get('num_obstacles', 0)\n self.obstacle_type = self.config.get('obstacle_type', 'block')\n\n print(\"NUM OBSTACLES: {}\".format(self.num_obstacles))\n print(\"TYPE OBSTACLE: {}\".format(self.obstacle_type))\n print(\"TASK TYPE: {}\".format(self.config[\"task\"]))\n print(\"TOLERANCE: {}\".format(self.dist_tol))\n \n self.num_walls = 5\n self.obstacles = []\n self.obs_dir = []\n self.obs_positions = []\n \n self.reset_step = 25\n self.walls = []\n\n self._num_envs = 1\n\n def load_observation_space(self):\n \"\"\"\n Load observation space\n \"\"\"\n self.output = self.config['output']\n self.image_width = self.config.get('image_width', 128)\n self.image_height = self.config.get('image_height', 128)\n observation_space = OrderedDict()\n if 'close_to_goal' in self.output: \n self.close_to_goal_dim = 1\n self.close_to_goal_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(self.close_to_goal_dim,),\n dtype=np.float32)\n observation_space['close_to_goal'] = self.close_to_goal_space\n if 'sensor' in self.output:\n self.sensor_dim = self.additional_states_dim\n self.sensor_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(self.sensor_dim,),\n dtype=np.float32)\n observation_space['sensor'] = self.sensor_space\n if 'base_proprioceptive' in self.output:\n self.base_proprioceptive_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(self.base_proprioceptive_states_dim,),\n dtype=np.float32)\n observation_space['base_proprioceptive'] = self.base_proprioceptive_space\n if 'arm_proprioceptive' in self.output:\n self.arm_proprioceptive_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(self.arm_proprioceptive_states_dim,),\n dtype=np.float32)\n observation_space['arm_proprioceptive'] = self.arm_proprioceptive_space\n if 'goal' in self.output:\n self.goal_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(self.goal_dim,),\n dtype=np.float32)\n observation_space['goal'] = self.goal_space\n if 'last_camera_mask_indices' in self.output:\n self.last_camera_mask_indices_space = gym.spaces.Box(\n low=-np.inf,\n high=np.inf,\n shape=(1,),\n dtype=np.int64)\n observation_space['last_camera_mask_indices'] = self.last_camera_mask_indices_space\n if 'rgb' in self.output:\n self.rgb_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.image_height, self.image_width, 3),\n dtype=np.float32)\n observation_space['rgb'] = self.rgb_space\n if 'wrist_rgb' in self.output: \n self.wrist_rgb_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.image_height, self.image_width, 3),\n dtype=np.float32)\n observation_space['wrist_rgb'] = self.wrist_rgb_space\n if 'depth' in self.output:\n self.depth_noise_rate = self.config.get('depth_noise_rate', 0.0)\n self.depth_low = self.config.get('depth_low', 0.0)\n self.depth_high = self.config.get('depth_high', 10.0)\n self.depth_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.image_height, self.image_width, 1),\n dtype=np.float32)\n observation_space['depth'] = self.depth_space\n if 'wrist_depth' in self.output:\n self.depth_noise_rate = self.config.get('depth_noise_rate', 0.0)\n self.depth_low = self.config.get('depth_low', 0.0)\n self.depth_high = self.config.get('depth_high', 10.0)\n self.wrist_depth_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.image_height, self.image_width, 1),\n dtype=np.float32)\n observation_space['wrist_depth'] = self.wrist_depth_space\n if 'rgbd' in self.output:\n self.rgbd_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.image_height, self.image_width, 4),\n dtype=np.float32)\n observation_space['rgbd'] = self.rgbd_space\n if 'seg' in self.output:\n self.seg_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.image_height, self.image_width, 1),\n dtype=np.float32)\n observation_space['seg'] = self.seg_space\n if 'wrist_seg' in self.output:\n self.wrist_seg_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.image_height, self.image_width, 1),\n dtype=np.float32)\n observation_space['wrist_seg'] = self.wrist_seg_space\n if 'scan' in self.output:\n self.scan_noise_rate = self.config.get('scan_noise_rate', 0.0)\n self.n_horizontal_rays = self.config.get('n_horizontal_rays', 128)\n self.n_vertical_beams = self.config.get('n_vertical_beams', 1)\n assert self.n_vertical_beams == 1, 'scan can only handle one vertical beam for now'\n self.laser_linear_range = self.config.get('laser_linear_range', 10.0)\n self.laser_angular_range = self.config.get('laser_angular_range', 180.0)\n self.min_laser_dist = self.config.get('min_laser_dist', 0.05)\n self.laser_link_name = self.config.get('laser_link_name', 'scan_link')\n self.scan_space = gym.spaces.Box(low=0.0,\n high=1.0,\n shape=(self.n_horizontal_rays * self.n_vertical_beams, 1),\n dtype=np.float32)\n observation_space['scan'] = self.scan_space\n if 'rgb_filled' in self.output: # use filler\n try:\n import torch.nn as nn\n import torch\n from torchvision import datasets, transforms\n from gibson2.learn.completion import CompletionNet\n except:\n raise Exception('Trying to use rgb_filled (\"the goggle\"), but torch is not installed. Try \"pip install torch torchvision\".')\n\n self.comp = CompletionNet(norm=nn.BatchNorm2d, nf=64)\n self.comp = torch.nn.DataParallel(self.comp).cuda()\n self.comp.load_state_dict(\n torch.load(os.path.join(gibson2.assets_path, 'networks', 'model.pth')))\n self.comp.eval()\n\n self.observation_space = gym.spaces.Dict(observation_space)\n\n def load_action_space(self):\n \"\"\"\n Load action space\n \"\"\"\n self.action_space = self.robots[0].action_space\n\n def load_visualization(self):\n \"\"\"\n Load visualization, such as initial and target position, shortest path, etc\n \"\"\"\n if (self.mode != 'gui' and self.mode != 'iggui' and self.mode != 'pbgui' and self.mode !='headless'):\n return\n \n '''\n cyl_length = 0.2\n self.initial_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,\n rgba_color=[1, 0, 0, 1],\n radius=0.5,\n length=cyl_length,\n initial_offset=[0, 0, cyl_length / 2.0])\n self.target_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,\n rgba_color=[0, 0, 1, 1],\n radius=0.5,\n length=cyl_length,\n initial_offset=[0, 0, cyl_length / 2.0])\n self.initial_pos_vis_obj.load()\n self.target_pos_vis_obj.load()\n\n if self.scene.build_graph:\n self.num_waypoints_vis = 250\n self.waypoints_vis = [VisualMarker(visual_shape=p.GEOM_CYLINDER,\n rgba_color=[0, 1, 0, 0.3],\n radius=0.1,\n length=cyl_length,\n initial_offset=[0, 0, cyl_length / 2.0])\n for _ in range(self.num_waypoints_vis)]\n for waypoint in self.waypoints_vis:\n waypoint.load()\n '''\n\n # add visual objects\n self.visual_object_at_initial_target_pos = self.config.get(\n 'visual_object_at_initial_target_pos', False)\n\n if self.visual_object_at_initial_target_pos:\n #self.initial_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,\n # rgba_color=[1, 0, 0, 0.95],\n # radius=0.02,\n # length=5)\n #self.target_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,\n # rgba_color=[1, 1, 0, 0.7],\n # radius=0.02,\n # length=5)\n \n if self.obstacle_type == 'realistic':\n original_size = np.array([0.07733, 0.169027, 0.218797])\n scale = self.dist_tol * 2 / original_size\n if self.config['task'] == 'pointgoal':\n scale = 0.2 / original_size\n self.target_pos_vis_obj = YCBObject('003_cracker_box', scale=scale, collision=False)\n \n elif self.obstacle_type == 'block':\n self.target_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_SPHERE,\n rgba_color=[1, 0, 0, 1],\n radius=0.09)\n #self.initial_pos_vis_obj.load()\n \n\n if self.config.get('target_visual_object_visible_to_agent', False):\n self.simulator.import_object(self.target_pos_vis_obj, class_id=2)\n #self.simulator.import_object(self.target_pos_vis_obj_exact)\n else:\n self.target_pos_vis_obj.load()\n #self.target_pos_vis_obj_exact.load()\n # set mass to 0.0 to avoid gravity\n \n if self.obstacle_type == 'realistic':\n p.changeDynamics(self.target_pos_vis_obj.body_id, -1, mass=0.0)\n\n def load_obstacles(self):\n for i in range(self.num_obstacles):\n if self.obstacle_type == 'realistic':\n obstacle = VisualShape(\n os.path.join(gibson2.assets_path, 'models/quadrotor/quadrotor_base.obj'),\n scale=[0.025, 0.2, 0.2])\n self.simulator.import_object(obstacle)\n # set mass to 0.0 to avoid gravity\n for joint_id in range(-1, p.getNumJoints(obstacle.body_id)):\n p.changeDynamics(obstacle.body_id, joint_id, mass=0.0)\n \n elif self.obstacle_type == 'block':\n obstacle = BoxShape(dim=[0.075, 0.6, 0.075], \n visual_only=False, \n mass=0, \n color=[1, 1, 0, 0.95])\n self.simulator.import_object(obstacle, class_id=1)\n obstacle.load()\n self.obstacles.append(obstacle)\n\n def load_walls(self):\n back_wall = BoxShape(pos=[-2.0, 0, 1.0], \n dim=[0.1, 1.5, 1.0], \n visual_only=False, \n mass=1000, \n color=[1, 1, 1, 1])\n\n front_wall = BoxShape(pos=[8.0, 0, 1.0], \n dim=[0.1, 1.5, 1.0], \n visual_only=False, \n mass=1000, \n color=[1, 1, 1, 1])\n\n left_wall = BoxShape(pos=[3.0, 1.6, 1.0], \n dim=[5.1, 0.1, 1.0], \n visual_only=False, \n mass=1000, \n color=[1, 1, 1, 1])\n\n right_wall = BoxShape(pos=[3.0, -1.6, 1.0], \n dim=[5.1, 0.1, 1.0], \n visual_only=False, \n mass=1000, \n color=[1, 1, 1, 1])\n ceiling = BoxShape(pos=[3, 0, 2.05],\n dim=[5.2, 1.8, 0.05],\n visual_only=True,\n mass=0,\n color=[1, 1, 1, 1])\n\n self.simulator.import_object(back_wall, class_id=0)\n self.simulator.import_object(front_wall, class_id=0)\n self.simulator.import_object(left_wall, class_id=0)\n self.simulator.import_object(right_wall, class_id=0)\n self.simulator.import_object(ceiling, class_id=0)\n\n # back_wall.load()\n # front_wall.load()\n # left_wall.load()\n # right_wall.load()\n\n self.walls.append(back_wall)\n self.walls.append(front_wall)\n self.walls.append(left_wall)\n self.walls.append(right_wall)\n self.walls.append(ceiling)\n\n self.wall_constraints = []\n for wall in self.walls:\n constraint = p.createConstraint(\n 0, -1, wall.body_id, -1, p.JOINT_FIXED,\n [0, 0, 1],\n wall.get_position(),\n [0, 0, 0],\n wall.get_orientation(),\n [0, 0, 0, 1])\n self.wall_constraints.append(constraint)\n\n def load_miscellaneous_variables(self):\n \"\"\"\n Load miscellaneous variables for book keeping\n \"\"\"\n self.current_step = 0\n self.collision_step = 0\n self.current_episode = 0\n self.floor_num = 0\n\n def load(self):\n \"\"\"\n Load navigation environment\n \"\"\"\n super(NavigateEnv, self).load()\n self.load_task_setup()\n self.load_observation_space()\n self.load_action_space()\n self.load_walls()\n self.load_obstacles()\n self.load_visualization()\n self.load_miscellaneous_variables()\n\n def global_to_local(self, pos):\n \"\"\"\n Convert a 3D point in global frame to agent's local frame\n :param pos: a 3D point in global frame\n :return: the same 3D point in agent's local frame\n \"\"\"\n return rotate_vector_3d(pos - self.robots[0].get_position(), *self.robots[0].get_rpy())\n\n def get_additional_states(self):\n \"\"\"\n :return: non-perception observation, such as goal location\n \"\"\"\n additional_states = []\n #additional_states = self.global_to_local(self.target_pos)[:2]\n #if self.goal_format == 'polar':\n # additional_states = np.array(cartesian_to_polar(additional_states[0], additional_states[1]))\n\n #if self.config['task'] == 'reaching': \n # additional_states = np.append(additional_states, self.target_pos[2:])\n\n #additional_states = []\n # linear velocity along the x-axis\n linear_velocity = rotate_vector_3d(self.robots[0].get_linear_velocity(),\n *self.robots[0].get_rpy())[0]\n # angular velocity along the z-axis\n angular_velocity = rotate_vector_3d(self.robots[0].get_angular_velocity(),\n *self.robots[0].get_rpy())[2]\n\n additional_states = np.append(additional_states, [linear_velocity, angular_velocity])\n\n self.robots[0].calc_state()\n additional_states = np.append(additional_states, np.sin(self.robots[0].joint_position[7]))\n additional_states = np.append(additional_states, np.cos(self.robots[0].joint_position[7]))\n\n if self.config['task'] == 'reaching':\n # End-effector\n end_effector_pos_local = self.global_to_local(self.robots[0].get_end_effector_position())\n additional_states = np.append(additional_states, end_effector_pos_local)\n\n # Height\n #additional_states = np.append(additional_states, self.target_pos[2:])\n\n # L2 distance between end-effector and goal\n #additional_states = np.append(additional_states, self.get_l2_potential())\n\n # Joint positions and velocities \n self.robots[0].calc_state()\n additional_states = np.append(additional_states, np.sin(self.robots[0].joint_position[2:7]))\n additional_states = np.append(additional_states, np.cos(self.robots[0].joint_position[2:7]))\n additional_states = np.append(additional_states, self.robots[0].joint_velocity[2:7])\n #additional_states = np.append(additional_states, self.robots[0].joint_torque\n\n assert additional_states.shape[0] == self.additional_states_dim, \\\n 'additional states dimension mismatch {} v.s. {}'.format(additional_states.shape[0], self.additional_states_dim)\n\n return additional_states\n\n def get_base_proprioceptive_states(self):\n\n base_proprioceptive_states = []\n\n # linear velocity along the x-axis\n linear_velocity = rotate_vector_3d(self.robots[0].get_linear_velocity(),\n *self.robots[0].get_rpy())[0]\n # angular velocity along the z-axis\n angular_velocity = rotate_vector_3d(self.robots[0].get_angular_velocity(),\n *self.robots[0].get_rpy())[2]\n\n base_proprioceptive_states = np.append(base_proprioceptive_states, [linear_velocity, angular_velocity])\n\n # Camera location\n self.robots[0].calc_state()\n base_proprioceptive_states = np.append(base_proprioceptive_states, np.sin(self.robots[0].joint_position[7]))\n base_proprioceptive_states = np.append(base_proprioceptive_states, np.cos(self.robots[0].joint_position[7]))\n\n assert base_proprioceptive_states.shape[0] == self.base_proprioceptive_states_dim, \\\n 'base proprioceptive states dimension mismatch {} v.s. {}'.format(base_proprioceptive_states.shape[0], self.base_proprioceptive_states_dim)\n\n return base_proprioceptive_states\n\n def get_arm_proprioceptive_states(self):\n\n arm_proprioceptive_states = []\n\n # End-effector Position\n end_effector_pos_local = self.global_to_local(self.robots[0].get_end_effector_position())\n arm_proprioceptive_states = np.append(arm_proprioceptive_states, end_effector_pos_local)\n\n arm_proprioceptive_states = np.append(arm_proprioceptive_states, np.sin(self.robots[0].joint_position[2:7]))\n arm_proprioceptive_states = np.append(arm_proprioceptive_states, np.cos(self.robots[0].joint_position[2:7]))\n arm_proprioceptive_states = np.append(arm_proprioceptive_states, self.robots[0].joint_velocity[2:7])\n\n assert arm_proprioceptive_states.shape[0] == self.arm_proprioceptive_states_dim, \\\n 'arm proprioceptive states dimension mismatch {} v.s. {}'.format(arm_proprioceptive_states.shape[0], self.arm_proprioceptive_states_dim)\n\n return arm_proprioceptive_states\n\n def get_goal(self):\n \"\"\"\n :return: goal location\n \"\"\"\n goal = []\n goal = self.global_to_local(self.target_pos)[:2]\n if self.goal_format == 'polar':\n goal = np.array(cartesian_to_polar(goal[0], goal[1]))\n goal = np.array([goal[0], np.sin(goal[1]), np.cos(goal[1])])\n goal = np.append(goal, self.target_pos[2:])\n\n assert goal.shape[0] == self.goal_dim, \\\n 'goal state dimension mismatch {} v.s. {}'.format(goal.shape[0], self.goal_dim)\n\n return goal\n\n def get_close_to_goal(self):\n if self.robots[0].get_position()[0] >= 1.5*self.num_obstacles: \n \treturn 1.0\n else: \n \treturn 0.0\n\n\n def add_naive_noise_to_sensor(self, sensor_reading, noise_rate, noise_value=1.0):\n \"\"\"\n Add naive sensor dropout to perceptual sensor, such as RGBD and LiDAR scan\n :param sensor_reading: raw sensor reading, range must be between [0.0, 1.0]\n :param noise_rate: how much noise to inject, 0.05 means 5% of the data will be replaced with noise_value\n :param noise_value: noise_value to overwrite raw sensor reading\n :return: sensor reading corrupted with noise\n \"\"\"\n if noise_rate <= 0.0:\n return sensor_reading\n\n assert len(sensor_reading[(sensor_reading < 0.0) | (sensor_reading > 1.0)]) == 0,\\\n 'sensor reading has to be between [0.0, 1.0]'\n\n valid_mask = np.random.choice(2, sensor_reading.shape, p=[noise_rate, 1.0 - noise_rate])\n sensor_reading[valid_mask == 0] = noise_value\n return sensor_reading\n\n def get_depth(self):\n \"\"\"\n :return: depth sensor reading, normalized to [0.0, 1.0]\n \"\"\"\n depth = -self.simulator.renderer.render_robot_cameras(modes=('3d'))[0][:, :, 2:3]\n # 0.0 is a special value for invalid entries\n depth[depth < self.depth_low] = 0.0\n depth[depth > self.depth_high] = self.depth_high\n\n # re-scale depth to [0.0, 1.0]\n depth /= self.depth_high\n depth = self.add_naive_noise_to_sensor(depth, self.depth_noise_rate, noise_value=0.0)\n\n #if np.isnan(depth).any():\n # print(\"Has Nan\")\n # depth = np.nan_to_num(depth)\n\n #elif not np.isnan(depth).any():\n # print(\"Doesn't have Nan's\")\n\n return depth\n\n def get_wrist_depth(self):\n \"\"\"\n :return: wrist cam depth sensor reading, normalized to [0.0, 1.0]\n \"\"\"\n depth = -self.simulator.renderer.render_robot_cameras(modes=('3d'))[1][:, :, 2:3]\n # 0.0 is a special value for invalid entries\n depth[depth < self.depth_low] = 0.0\n depth[depth > self.depth_high] = self.depth_high\n\n # re-scale depth to [0.0, 1.0]\n depth /= self.depth_high\n depth = self.add_naive_noise_to_sensor(depth, self.depth_noise_rate, noise_value=0.0)\n\n #if np.isnan(depth).any():\n # print(\"Has Nan\")\n # depth = np.nan_to_num(depth)\n\n #elif not np.isnan(depth).any():\n # print(\"Doesn't have Nan's\")\n\n return depth\n\n def get_rgb(self):\n \"\"\"\n :return: RGB sensor reading, normalized to [0.0, 1.0]\n \"\"\"\n return self.simulator.renderer.render_robot_cameras(modes=('rgb'))[0][:, :, :3]\n\n def get_wrist_rgb(self):\n \"\"\"\n :return: wrist cam RGB sensor reading, normalized to [0.0, 1.0]\n \"\"\"\n return self.simulator.renderer.render_robot_cameras(modes=('rgb'))[1][:, :, :3]\n\n def get_pc(self):\n \"\"\"\n :return: pointcloud sensor reading\n \"\"\"\n return self.simulator.renderer.render_robot_cameras(modes=('3d'))[0]\n\n def get_normal(self):\n \"\"\"\n :return: surface normal reading\n \"\"\"\n return self.simulator.renderer.render_robot_cameras(modes='normal')\n\n def get_seg(self):\n \"\"\"\n :return: semantic segmentation mask, normalized to [0.0, 1.0]\n \"\"\"\n seg = self.simulator.renderer.render_robot_cameras(modes='seg')[0][:, :, 0:1]\n #if self.num_object_classes is not None:\n # seg = np.clip(seg * 255.0 / self.num_object_classes, 0.0, 1.0)\n\n # seg = np.clip(seg * 255.0 / (self.num_walls + self.num_obstacles + 2) , 0.0, 1.0)\n\n # 0: building, 1: obstacles, 2: goal\n seg = seg * 255.0 / 2.0\n\n # all_but_goal = seg < 1\n # seg[all_but_goal] = 0\n\n return seg\n\n def get_wrist_seg(self):\n \"\"\"\n :return: wrist cam semantic segmentation mask, normalized to [0.0, 1.0]\n \"\"\"\n seg = self.simulator.renderer.render_robot_cameras(modes='seg')[1][:, :, 0:1]\n #if self.num_object_classes is not None:\n # seg = np.clip(seg * 255.0 / self.num_object_classes, 0.0, 1.0)\n\n # seg = np.clip(seg * 255.0 / (self.num_walls + self.num_obstacles + 2) , 0.0, 1.0)\n\n # 0: building, 1: obstacles, 2: goal\n seg = seg * 255.0 / 2.0\n\n # all_but_goal = seg < 1\n # seg[all_but_goal] = 0\n\n return seg\n\n def get_scan(self):\n \"\"\"\n :return: LiDAR sensor reading, normalized to [0.0, 1.0]\n \"\"\"\n laser_angular_half_range = self.laser_angular_range / 2.0\n if self.laser_link_name not in self.robots[0].parts:\n raise Exception('Trying to simulate LiDAR sensor, but laser_link_name cannot be found in the robot URDF file. Please add a link named laser_link_name at the intended laser pose. Feel free to check out assets/models/turtlebot/turtlebot.urdf and examples/configs/turtlebot_p2p_nav.yaml for examples.')\n laser_pose = self.robots[0].parts[self.laser_link_name].get_pose()\n angle = np.arange(-laser_angular_half_range / 180 * np.pi,\n laser_angular_half_range / 180 * np.pi,\n self.laser_angular_range / 180.0 * np.pi / self.n_horizontal_rays)\n unit_vector_local = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angle])\n transform_matrix = quat2mat([laser_pose[6], laser_pose[3], laser_pose[4], laser_pose[5]]) # [x, y, z, w]\n unit_vector_world = transform_matrix.dot(unit_vector_local.T).T\n\n start_pose = np.tile(laser_pose[:3], (self.n_horizontal_rays, 1))\n start_pose += unit_vector_world * self.min_laser_dist\n end_pose = laser_pose[:3] + unit_vector_world * self.laser_linear_range\n results = p.rayTestBatch(start_pose, end_pose, 6) # numThreads = 6\n\n hit_fraction = np.array([item[2] for item in results]) # hit fraction = [0.0, 1.0] of self.laser_linear_range\n hit_fraction = self.add_naive_noise_to_sensor(hit_fraction, self.scan_noise_rate)\n scan = np.expand_dims(hit_fraction, 1)\n return scan\n\n def get_state(self, collision_links=[]):\n \"\"\"\n :param collision_links: collisions from last time step\n :return: observation as a dictionary\n \"\"\"\n state = OrderedDict()\n if 'close_to_goal' in self.output: \n \t state['close_to_goal'] = self.get_close_to_goal()\n if 'sensor' in self.output:\n state['sensor'] = self.get_additional_states()\n if 'base_proprioceptive' in self.output:\n state['base_proprioceptive'] = self.get_base_proprioceptive_states()\n if 'arm_proprioceptive' in self.output:\n state['arm_proprioceptive'] = self.get_arm_proprioceptive_states() \n if 'goal' in self.output:\n state['goal'] = self.get_goal()\n if 'last_camera_mask_indices' in self.output:\n state['last_camera_mask_indices'] = np.array([self.last_camera_mask_indices], dtype=np.int64)\n if 'rgb' in self.output:\n state['rgb'] = self.get_rgb()\n if 'wrist_rgb' in self.output:\n state['wrist_rgb'] = self.get_wrist_rgb()\n if 'depth' in self.output:\n state['depth'] = self.get_depth()\n if 'wrist_depth' in self.output:\n state['wrist_depth'] = self.get_wrist_depth()\n if 'pc' in self.output:\n state['pc'] = self.get_pc()\n if 'rgbd' in self.output:\n rgb = self.get_rgb()\n depth = self.get_depth()\n state['rgbd'] = np.concatenate((rgb, depth), axis=2)\n if 'normal' in self.output:\n state['normal'] = self.get_normal()\n if 'seg' in self.output:\n state['seg'] = self.get_seg()\n if 'wrist_seg' in self.output:\n state['wrist_seg'] = self.get_wrist_seg()\n if 'rgb_filled' in self.output:\n with torch.no_grad():\n tensor = transforms.ToTensor()((state['rgb'] * 255).astype(np.uint8)).cuda()\n rgb_filled = self.comp(tensor[None, :, :, :])[0].permute(1, 2, 0).cpu().numpy()\n state['rgb_filled'] = rgb_filled\n if 'scan' in self.output:\n state['scan'] = self.get_scan()\n return state\n\n def run_simulation(self):\n \"\"\"\n Run simulation for one action timestep (simulator_loop physics timestep)\n :return: collisions from this simulation\n \"\"\"\n collision_links = []\n for _ in range(self.simulator_loop):\n self.simulator_step()\n collision_links.append(list(p.getContactPoints(bodyA=self.robots[0].robot_ids[0])))\n self.simulator.sync()\n\n return self.filter_collision_links(collision_links)\n\n def filter_collision_links(self, collision_links):\n \"\"\"\n Filter out collisions that should be ignored\n :param collision_links: original collisions, a list of lists of collisions\n :return: filtered collisions\n \"\"\"\n new_collision_links = []\n for collision_per_sim_step in collision_links:\n new_collision_per_sim_step = []\n for item in collision_per_sim_step:\n # ignore collision with body b\n if item[2] in self.collision_ignore_body_b_ids:\n continue\n\n # ignore collision with robot link a\n if item[3] in self.collision_ignore_link_a_ids:\n continue\n\n # ignore self collision with robot link a (body b is also robot itself)\n if item[2] == self.robots[0].robot_ids[0] and item[4] in self.collision_ignore_link_a_ids:\n continue\n\n new_collision_per_sim_step.append(item)\n new_collision_links.append(new_collision_per_sim_step)\n return new_collision_links\n\n def get_position_of_interest(self):\n \"\"\"\n Get position of interest.\n :return: If pointgoal task, return base position. If reaching task, return end effector position.\n \"\"\"\n if self.config['task'] == 'pointgoal':\n return self.robots[0].get_position()\n elif self.config['task'] == 'reaching':\n return self.robots[0].get_end_effector_position()\n\n def get_shortest_path(self, from_initial_pos=False, entire_path=False):\n \"\"\"\n :param from_initial_pos: whether source is initial position rather than current position\n :param entire_path: whether to return the entire shortest path\n :return: shortest path and geodesic distance to the target position\n \"\"\"\n if from_initial_pos:\n source = self.initial_pos[:2]\n else:\n source = self.robots[0].get_position()[:2]\n target = self.target_pos[:2]\n return self.scene.get_shortest_path(self.floor_num, source, target, entire_path=entire_path)\n\n def get_geodesic_potential(self):\n \"\"\"\n :return: geodesic distance to the target position\n \"\"\"\n _, geodesic_dist = self.get_shortest_path()\n return geodesic_dist\n\n def get_l2_potential(self):\n \"\"\"\n :return: L2 distance to the target position\n \"\"\"\n return l2_distance(self.target_pos, self.get_position_of_interest())\n\n def is_goal_reached(self):\n return l2_distance(self.get_position_of_interest(), self.target_pos) < self.dist_tol\n\n def get_reward(self, collision_links=[], action=None, info={}):\n \"\"\"\n :param collision_links: collisions from last time step\n :param action: last action\n :param info: a dictionary to store additional info\n :return: reward, info\n \"\"\"\n collision_links_flatten = [item for sublist in collision_links for item in sublist]\n reward = self.slack_reward # |slack_reward| = 0.01 per step\n\n if self.reward_type == 'l2':\n new_potential = self.get_l2_potential()\n elif self.reward_type == 'geodesic':\n new_potential = self.get_geodesic_potential()\n potential_reward = self.potential - new_potential\n reward += potential_reward * self.potential_reward_weight # |potential_reward| ~= 0.1 per step\n self.potential = new_potential\n\n collision_reward = float(len(collision_links_flatten) > 0)\n self.collision_step += int(collision_reward)\n reward += collision_reward * self.collision_reward_weight # |collision_reward| ~= 1.0 per step if collision\n\n if self.is_goal_reached():\n reward += self.success_reward # |success_reward| = 10.0 per step\n return reward, info\n\n def get_termination(self, collision_links=[], action=None, info={}):\n \"\"\"\n :param collision_links: collisions from last time step\n :param info: a dictionary to store additional info\n :return: done, info\n \"\"\"\n done = False\n\n # goal reached\n if self.is_goal_reached():\n done = True\n info['success'] = True\n print('SUCCESS_GOAL_REACHED')\n\n # max collisions reached\n if self.collision_step > self.max_collisions_allowed:\n done = True\n info['success'] = False\n print('COLLISION')\n\n # time out\n elif self.current_step >= self.max_step:\n done = True\n info['success'] = False\n print('TIMEOUT')\n\n if done:\n print('RETURN:', self.episode_return)\n info['episode_length'] = self.current_step\n info['collision_step'] = self.collision_step\n info['path_length'] = self.path_length\n info['spl'] = float(info['success']) * min(1.0, self.geodesic_dist / self.path_length)\n\n return done, info\n\n def before_simulation(self):\n \"\"\"\n Cache bookkeeping data before simulation\n :return: cache\n \"\"\"\n return {'robot_position': self.robots[0].get_position()}\n\n def after_simulation(self, cache, collision_links):\n \"\"\"\n Accumulate evaluation stats\n :param cache: cache returned from before_simulation\n :param collision_links: collisions from last time step\n \"\"\"\n old_robot_position = cache['robot_position'][:2]\n new_robot_position = self.robots[0].get_position()[:2]\n self.path_length += l2_distance(old_robot_position, new_robot_position)\n\n def step_visualization(self):\n \n if (self.mode != 'gui' and self.mode != 'iggui' and self.mode != 'pbgui' and self.mode != 'headless'):\n return\n\n #self.initial_pos_vis_obj.set_position(self.initial_pos)\n\n if self.config['task'] == 'pointgoal':\n self.target_pos_vis_obj.set_position([self.target_pos[0], self.target_pos[1], 1.0])\n elif self.config['task'] == 'reaching':\n self.target_pos_vis_obj.set_position(self.target_pos)\n #self.target_pos_vis_obj_exact.set_position(self.target_pos)\n\n '''\n if self.scene.build_graph:\n shortest_path, _ = self.get_shortest_path(entire_path=True)\n floor_height = 0.0 if self.floor_num is None else self.scene.get_floor_height(self.floor_num)\n num_nodes = min(self.num_waypoints_vis, shortest_path.shape[0])\n for i in range(num_nodes):\n self.waypoints_vis[i].set_position(pos=np.array([shortest_path[i][0],\n shortest_path[i][1],\n floor_height]))\n for i in range(num_nodes, self.num_waypoints_vis):\n self.waypoints_vis[i].set_position(pos=np.array([0.0, 0.0, 100.0]))\n '''\n \n\n for i in range(self.num_obstacles):\n\n obs = list(self.obstacles[i].get_position())\n\n if obs[1] > 0.7 and not self.obs_dir[i]: \n self.obs_dir[i] = True\n elif obs[1] < -0.7 and self.obs_dir[i]: \n self.obs_dir[i] = False \n\n if self.obs_dir[i]:\n obs[1] += -0.02\n elif not self.obs_dir[i]: \n obs[1] += 0.02\n\n obs[0] = self.obs_positions[i][0]\n obs[2] = self.obs_positions[i][2]\n\n self.obstacles[i].set_position_orientation(obs, [0, 0, 0, 1])\n \n '''\n curr_obj_pos = list(self.obstacles[0].get_position())\n\n if (self.reset_step == 0):\n self.go_left = np.random.choice([0,1])\n self.reset_step = 25\n\n if curr_obj_pos[1] > 0.7 and not self.go_left: \n self.go_left = True\n elif curr_obj_pos[1] < -0.7 and self.go_left: \n self.go_left = False \n\n if self.go_left:\n curr_obj_pos[1] += -0.02\n self.reset_step -= 1 \n\n elif not self.go_left: \n curr_obj_pos[1] += 0.02\n self.reset_step -= 1 \n\n curr_obj_pos[0] = 1.0\n curr_obj_pos[2] = 0.075\n\n self.obstacles[0].set_position_orientation(curr_obj_pos, [0, 0, 0, 1])\n '''\n\n def step(self, action):\n \"\"\"\n apply robot's action and get state, reward, done and info, following OpenAI gym's convention\n :param action: a list of control signals\n :return: state, reward, done, info\n \"\"\"\n self.current_step += 1\n if action is not None:\n self.robots[0].apply_action(action)\n cache = self.before_simulation()\n collision_links = self.run_simulation()\n self.after_simulation(cache, collision_links)\n\n state = self.get_state(collision_links)\n info = {}\n reward, info = self.get_reward(collision_links, action, info)\n self.episode_return += reward\n done, info = self.get_termination(collision_links, action, info)\n self.step_visualization()\n\n if done and self.automatic_reset:\n info['last_observation'] = state\n state = self.reset()\n return state, reward, done, info\n\n\n def set_camera(self, camera_mask_indices):\n \"\"\"\n Set the camera based on the index in the input. If the index is 0, set it to most upwards position. \n If the index is 2, set it to most downwards position. Otherwise, it should be in the center. \n \"\"\"\n if camera_mask_indices[0] == 0: \n self.robots[0].ordered_joints[7].reset_joint_state(0.0, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(-0.52, 0.0)\n\n elif camera_mask_indices[0] == 2: \n self.robots[0].ordered_joints[7].reset_joint_state(0.0, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(0.52, 0.0)\n\n elif camera_mask_indices[0] == 3: \n self.robots[0].ordered_joints[7].reset_joint_state(0.52, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(-0.52, 0.0)\n\n elif camera_mask_indices[0] == 4: \n self.robots[0].ordered_joints[7].reset_joint_state(0.52, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(0.0, 0.0)\n\n elif camera_mask_indices[0] == 5: \n self.robots[0].ordered_joints[7].reset_joint_state(0.52, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(0.52, 0.0)\n\n elif camera_mask_indices[0] == 6: \n self.robots[0].ordered_joints[7].reset_joint_state(-0.52, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(-0.52, 0.0)\n\n elif camera_mask_indices[0] == 7: \n self.robots[0].ordered_joints[7].reset_joint_state(-0.52, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(0.0, 0.0)\n\n elif camera_mask_indices[0] == 8: \n self.robots[0].ordered_joints[7].reset_joint_state(-0.52, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(0.52, 0.0)\n\n else: \n self.robots[0].ordered_joints[7].reset_joint_state(0.0, 0.0)\n self.robots[0].ordered_joints[8].reset_joint_state(0.0, 0.0)\n\n self.last_camera_mask_indices = camera_mask_indices[0]\n\n def reset_agent(self):\n \"\"\"\n Reset the robot's joint configuration and base pose until no collision\n \"\"\"\n reset_success = False\n max_trials = 100\n for _ in range(max_trials):\n self.reset_initial_and_target_pos()\n if self.test_valid_position('robot', self.robots[0], self.initial_pos, self.initial_orn) and \\\n self.test_valid_position('robot', self.robots[0], self.target_pos):\n reset_success = True\n break\n\n if not reset_success:\n logging.warning(\"WARNING: Failed to reset robot without collision\")\n\n self.land('robot', self.robots[0], self.initial_pos, self.initial_orn)\n\n def reset_initial_and_target_pos(self):\n \"\"\"\n Reset initial_pos, initial_orn and target_pos\n \"\"\"\n return\n\n def check_collision(self, body_id):\n \"\"\"\n :param body_id: pybullet body id\n :return: whether the given body_id has no collision\n \"\"\"\n for _ in range(self.check_collision_loop):\n self.simulator_step()\n collisions = list(p.getContactPoints(bodyA=body_id))\n\n if logging.root.level <= logging.DEBUG: #Only going into this if it is for logging --> efficiency\n for item in collisions:\n logging.debug('bodyA:{}, bodyB:{}, linkA:{}, linkB:{}'.format(item[1], item[2], item[3], item[4]))\n\n if len(collisions) > 0:\n return False\n return True\n\n def set_pos_orn_with_z_offset(self, obj, pos, orn=None, offset=None):\n \"\"\"\n Reset position and orientation for the robot or the object\n :param obj: an instance of robot or object\n :param pos: position\n :param orn: orientation\n :param offset: z offset\n \"\"\"\n if orn is None:\n orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])\n\n if offset is None:\n offset = self.initial_pos_z_offset\n\n obj.set_position_orientation([pos[0], pos[1], pos[2] + offset],\n quatToXYZW(euler2quat(*orn), 'wxyz'))\n\n def test_valid_position(self, obj_type, obj, pos, orn=None):\n \"\"\"\n Test if the robot or the object can be placed with no collision\n :param obj_type: string \"robot\" or \"obj\"\n :param obj: an instance of robot or object\n :param pos: position\n :param orn: orientation\n :return: validity\n \"\"\"\n assert obj_type in ['robot', 'obj']\n\n self.set_pos_orn_with_z_offset(obj, pos, orn)\n\n if obj_type == 'robot':\n obj.robot_specific_reset()\n obj.keep_still()\n\n body_id = obj.robot_ids[0] if obj_type == 'robot' else obj.body_id\n return self.check_collision(body_id)\n\n def land(self, obj_type, obj, pos, orn):\n \"\"\"\n Land the robot or the object onto the floor, given a valid position and orientation\n :param obj_type: string \"robot\" or \"obj\"\n :param obj: an instance of robot or object\n :param pos: position\n :param orn: orientation\n \"\"\"\n assert obj_type in ['robot', 'obj']\n\n self.set_pos_orn_with_z_offset(obj, pos, orn)\n\n if obj_type == 'robot':\n obj.robot_specific_reset()\n obj.keep_still()\n\n body_id = obj.robot_ids[0] if obj_type == 'robot' else obj.body_id\n\n land_success = False\n # land for maximum 1 second, should fall down ~5 meters\n max_simulator_step = int(1.0 / self.physics_timestep)\n for _ in range(max_simulator_step):\n self.simulator_step()\n if len(p.getContactPoints(bodyA=body_id)) > 0:\n land_success = True\n break\n\n if not land_success:\n print(\"WARNING: Failed to land\")\n\n if obj_type == 'robot':\n obj.robot_specific_reset()\n\n def reset_variables(self):\n \"\"\"\n Reset bookkeeping variables for the next new episode\n \"\"\"\n self.current_episode += 1\n self.current_step = 0\n self.collision_step = 0\n self.path_length = 0.0\n self.episode_return = 0.0\n self.geodesic_dist = self.get_geodesic_potential()\n\n def reset(self):\n \"\"\"\n Reset episode\n \"\"\"\n self.reset_agent()\n self.obs_dir = [np.random.choice([0,1]) for i in range(self.num_obstacles)]\n self.obs_positions = [[1.5*(i+1), np.random.uniform(-0.7,0.7), np.random.choice([1.2,0.075])] for i in range(self.num_obstacles)]\n #self.obs_positions = [[1.5*(i+1), np.random.choice([-0.7,0.7]), np.random.choice([1.2,0.075])] for i in range(self.num_obstacles)]\n\n for i in range(self.num_obstacles):\n self.obstacles[i].set_position_orientation(self.obs_positions[i], [0,0,0,1])\n\n #self.walls[0].set_position_orientation([-2.0, 0, 1.0], [0, 0, 0, 1])\n #self.walls[1].set_position_orientation([8.0, 0, 1.0], [0, 0, 0, 1])\n #self.walls[2].set_position_orientation([3.0, 1.6, 1.0], [0, 0, 0, 1])\n #self.walls[3].set_position_orientation([3.0, -1.6, 1.0], [0, 0, 0, 1])\n\n self.step_visualization()\n\n self.simulator.sync()\n # default camera mask indices is 1, 0 degree\n self.last_camera_mask_indices = 1\n\n state = self.get_state()\n if self.reward_type == 'l2':\n self.potential = self.get_l2_potential()\n elif self.reward_type == 'geodesic':\n self.potential = self.get_geodesic_potential()\n self.reset_variables()\n\n return state\n\n\nclass NavigateRandomEnv(NavigateEnv):\n def __init__(\n self,\n config_file,\n model_id=None,\n mode='headless',\n action_timestep=1 / 10.0,\n physics_timestep=1 / 240.0,\n automatic_reset=False,\n random_height=True,\n device_idx=0,\n render_to_tensor=False\n ):\n \"\"\"\n :param config_file: config_file path\n :param model_id: override model_id in config file\n :param mode: headless or gui mode\n :param action_timestep: environment executes action per action_timestep second\n :param physics_timestep: physics timestep for pybullet\n :param automatic_reset: whether to automatic reset after an episode finishes\n :param random_height: whether to randomize height for target position (for reaching task)\n :param device_idx: device_idx: which GPU to run the simulation and rendering on\n \"\"\"\n super(NavigateRandomEnv, self).__init__(config_file,\n model_id=model_id,\n mode=mode,\n action_timestep=action_timestep,\n physics_timestep=physics_timestep,\n automatic_reset=automatic_reset,\n device_idx=device_idx,\n render_to_tensor=render_to_tensor)\n self.random_height = random_height\n\n self.target_dist_min = self.config.get('target_dist_min', 1.0)\n self.target_dist_max = self.config.get('target_dist_max', 10.0)\n\n def reset_initial_and_target_pos(self):\n \"\"\"\n Reset initial_pos, initial_orn and target_pos through randomization\n The geodesic distance (or L2 distance if traversable map graph is not built)\n between initial_pos and target_pos has to be between [self.target_dist_min, self.target_dist_max]\n \"\"\"\n '''\n _, self.initial_pos = self.scene.get_random_point_floor(self.floor_num, self.random_height)\n max_trials = 100\n dist = 0.0\n for _ in range(max_trials):\n _, self.target_pos = self.scene.get_random_point_floor(self.floor_num, self.random_height)\n if self.scene.build_graph:\n _, dist = self.get_shortest_path(from_initial_pos=True)\n else:\n dist = l2_distance(self.initial_pos, self.target_pos)\n if self.target_dist_min < dist < self.target_dist_max:\n break\n if not (self.target_dist_min < dist < self.target_dist_max):\n print(\"WARNING: Failed to sample initial and target positions\")\n self.initial_orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])\n '''\n self.initial_pos = np.array(self.config.get('initial_pos', [0, 0, 0]))\n self.initial_orn = np.array(self.config.get('initial_orn', [0, 0, 0]))\n self.target_pos = np.array(self.config.get('target_pos', [5, 5, 0]))\n self.target_orn = np.array(self.config.get('target_orn', [0, 0, 0]))\n\n self.target_pos[0] = np.random.uniform(5.0, 6.0)\n self.target_pos[1] = np.random.uniform(-0.5, 0.5)\n\n if self.config['task'] == 'pointgoal':\n self.target_pos[2] = 0.0\n elif self.config['task'] == 'reaching':\n self.target_pos[2] = np.random.uniform(0.5, 1.0)\n\n def reset(self):\n \"\"\"\n Reset episode\n \"\"\"\n self.floor_num = self.scene.get_random_floor()\n\n if self.scene.is_interactive:\n # reset scene objects\n self.scene.reset_scene_objects()\n else:\n # reset \"virtual floor\" to the correct height\n self.scene.reset_floor(floor=self.floor_num, additional_elevation=0.02)\n\n state = super(NavigateRandomEnv, self).reset()\n return state\n\nclass NavigateRandomHeightEnv(NavigateEnv):\n def __init__(\n self,\n config_file,\n model_id=None,\n mode='headless',\n action_timestep=1 / 10.0,\n physics_timestep=1 / 240.0,\n automatic_reset=False,\n random_height=True,\n device_idx=0,\n render_to_tensor=False\n ):\n \"\"\"\n :param config_file: config_file path\n :param model_id: override model_id in config file\n :param mode: headless or gui mode\n :param action_timestep: environment executes action per action_timestep second\n :param physics_timestep: physics timestep for pybullet\n :param automatic_reset: whether to automatic reset after an episode finishes\n :param random_height: whether to randomize height for target position (for reaching task)\n :param device_idx: device_idx: which GPU to run the simulation and rendering on\n \"\"\"\n super(NavigateRandomHeightEnv, self).__init__(config_file,\n model_id=model_id,\n mode=mode,\n action_timestep=action_timestep,\n physics_timestep=physics_timestep,\n automatic_reset=automatic_reset,\n device_idx=device_idx,\n render_to_tensor=render_to_tensor)\n\n def reset_initial_and_target_pos(self):\n \"\"\"\n Reset initial_pos, initial_orn and target_pos through randomization\n The geodesic distance (or L2 distance if traversable map graph is not built)\n between initial_pos and target_pos has to be between [self.target_dist_min, self.target_dist_max]\n \"\"\"\n self.initial_pos = np.array(self.config.get('initial_pos', [0, 0, 0]))\n self.initial_orn = np.array(self.config.get('initial_orn', [0, 0, 0]))\n self.target_pos = np.array(self.config.get('target_pos', [5, 5, 0]))\n self.target_orn = np.array(self.config.get('target_orn', [0, 0, 0]))\n\n self.target_pos[2] = np.random.uniform(0.5, 1.0)\n\n def reset(self):\n \"\"\"\n Reset episode\n \"\"\"\n self.floor_num = self.scene.get_random_floor()\n\n if self.scene.is_interactive:\n # reset scene objects\n self.scene.reset_scene_objects()\n else:\n # reset \"virtual floor\" to the correct height\n self.scene.reset_floor(floor=self.floor_num, additional_elevation=0.02)\n\n state = super(NavigateRandomHeightEnv, self).reset()\n return state\n\n\nclass NavigateRandomEnvSim2Real(NavigateRandomEnv):\n def __init__(self,\n config_file,\n model_id=None,\n mode='headless',\n action_timestep=1 / 10.0,\n physics_timestep=1 / 240.0,\n device_idx=0,\n render_to_tensor=False,\n automatic_reset=False,\n collision_reward_weight=0.0,\n track='static'\n ):\n super(NavigateRandomEnvSim2Real, self).__init__(config_file,\n model_id=model_id,\n mode=mode,\n action_timestep=action_timestep,\n physics_timestep=physics_timestep,\n automatic_reset=automatic_reset,\n random_height=False,\n device_idx=device_idx,\n render_to_tensor=render_to_tensor)\n self.collision_reward_weight = collision_reward_weight\n\n assert track in ['static', 'interactive', 'dynamic'], 'unknown track'\n self.track = track\n\n if self.track == 'interactive':\n self.interactive_objects_num_dups = 2\n self.interactive_objects = self.load_interactive_objects()\n # does not penalize collision with these interactive objects\n self.collision_ignore_body_b_ids |= set([obj.body_id for obj in self.interactive_objects])\n elif self.track == 'dynamic':\n self.num_dynamic_objects = 1\n self.dynamic_objects = []\n self.dynamic_objects_last_actions = []\n for _ in range(self.num_dynamic_objects):\n robot = Turtlebot(self.config)\n self.simulator.import_robot(robot)\n self.dynamic_objects.append(robot)\n self.dynamic_objects_last_actions.append(robot.action_space.sample())\n\n # dynamic objects will repeat their actions for 10 action timesteps\n self.dynamic_objects_action_repeat = 10\n\n def load_interactive_objects(self):\n \"\"\"\n Load interactive objects\n :return: a list of interactive objects\n \"\"\"\n interactive_objects = []\n interactive_objects_path = [\n 'object_2eZY2JqYPQE.urdf',\n 'object_lGzQi2Pk5uC.urdf',\n 'object_ZU6u5fvE8Z1.urdf',\n 'object_H3ygj6efM8V.urdf',\n 'object_RcqC01G24pR.urdf'\n ]\n\n for _ in range(self.interactive_objects_num_dups):\n for urdf_model in interactive_objects_path:\n obj = InteractiveObj(os.path.join(gibson2.assets_path, 'models/sample_urdfs', urdf_model))\n self.simulator.import_object(obj)\n interactive_objects.append(obj)\n return interactive_objects\n\n def reset_interactive_objects(self):\n \"\"\"\n Reset the poses of interactive objects to have no collisions with the scene mesh\n \"\"\"\n max_trials = 100\n for obj in self.interactive_objects:\n reset_success = False\n for _ in range(max_trials):\n _, pos = self.scene.get_random_point_floor(self.floor_num, self.random_height)\n orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])\n if self.test_valid_position('obj', obj, pos, orn):\n reset_success = True\n break\n\n if not reset_success:\n print(\"WARNING: Failed to reset interactive obj without collision\")\n\n self.land('obj', obj, pos, orn)\n\n def reset_dynamic_objects(self):\n \"\"\"\n Reset the poses of dynamic objects to have no collisions with the scene mesh\n \"\"\"\n max_trials = 100\n shortest_path, _ = self.get_shortest_path(entire_path=True)\n floor_height = 0.0 if self.floor_num is None else self.scene.get_floor_height(self.floor_num)\n for robot in self.dynamic_objects:\n reset_success = False\n for _ in range(max_trials):\n pos = shortest_path[np.random.choice(shortest_path.shape[0])]\n pos = np.array([pos[0], pos[1], floor_height])\n orn = np.array([0, 0, np.random.uniform(0, np.pi * 2)])\n if self.test_valid_position('robot', robot, pos, orn):\n reset_success = True\n break\n\n if not reset_success:\n print(\"WARNING: Failed to reset dynamic obj without collision\")\n\n self.land('robot', robot, pos, orn)\n\n def step_dynamic_objects(self):\n \"\"\"\n Apply actions to dynamic objects (default: temporally extended random walk)\n \"\"\"\n if self.current_step % self.dynamic_objects_action_repeat == 0:\n self.dynamic_objects_last_actions = [robot.action_space.sample() for robot in self.dynamic_objects]\n for robot, action in zip(self.dynamic_objects, self.dynamic_objects_last_actions):\n robot.apply_action(action)\n\n def step(self, action):\n \"\"\"\n Step dynamic objects as well\n \"\"\"\n if self.track == 'dynamic':\n self.step_dynamic_objects()\n\n return super(NavigateRandomEnvSim2Real, self).step(action)\n\n def reset(self):\n \"\"\"\n Reset episode\n \"\"\"\n self.floor_num = self.scene.get_random_floor()\n\n if self.scene.is_interactive:\n # reset scene objects\n self.scene.reset_scene_objects()\n else:\n # reset \"virtual floor\" to the correct height\n self.scene.reset_floor(floor=self.floor_num, additional_elevation=0.02)\n\n if self.track == 'interactive':\n self.reset_interactive_objects()\n\n state = NavigateEnv.reset(self)\n\n if self.track == 'dynamic':\n self.reset_dynamic_objects()\n state = self.get_state()\n\n return state\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--config',\n '-c',\n help='which config file to use [default: use yaml files in examples/configs]')\n parser.add_argument('--mode',\n '-m',\n choices=['headless', 'gui'],\n default='headless',\n help='which mode for simulation (default: headless)')\n parser.add_argument('--env_type',\n choices=['deterministic', 'random', 'sim2real'],\n default='deterministic',\n help='which environment type (deterministic | random | sim2real)')\n parser.add_argument('--sim2real_track',\n choices=['static', 'interactive', 'dynamic'],\n default='static',\n help='which sim2real track (static | interactive | dynamic)')\n args = parser.parse_args()\n\n if args.env_type == 'deterministic':\n nav_env = NavigateEnv(config_file=args.config,\n mode=args.mode,\n action_timestep=1.0 / 10.0,\n physics_timestep=1.0 / 40.0)\n elif args.env_type == 'random':\n nav_env = NavigateRandomEnv(config_file=args.config,\n mode=args.mode,\n action_timestep=1.0 / 10.0,\n physics_timestep=1.0 / 40.0)\n elif args.env_type == 'sim2real':\n nav_env = NavigateRandomEnvSim2Real(config_file=args.config,\n mode=args.mode,\n action_timestep=1.0 / 10.0,\n physics_timestep=1.0 / 40.0,\n track=args.sim2real_track)\n\n step_time_list = []\n for episode in range(100):\n print('Episode: {}'.format(episode))\n start = time.time()\n nav_env.reset()\n for _ in range(100): # 10 seconds\n action = nav_env.action_space.sample()\n state, reward, done, _ = nav_env.step(action)\n print('reward', reward)\n if done:\n break\n print('Episode finished after {} timesteps, took {} seconds.'.format(nav_env.current_step, time.time() - start))\n nav_env.clean()" ]
[ [ "numpy.zeros", "numpy.multiply" ], [ "numpy.expand_dims", "numpy.sqrt", "numpy.random.choice", "numpy.arange", "numpy.tile", "numpy.cos", "numpy.sin", "numpy.concatenate", "numpy.append", "torch.no_grad", "numpy.random.uniform", "torch.nn.DataParallel", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YuJungHeo/kbvqa-public
[ "c04bed5c60085ac3a551a8c196e6269befce1e5b" ]
[ "main.py" ]
[ "import os\nimport time\nimport torch\nimport argparse\nfrom tqdm import tqdm\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils import (\n load_files,\n save_pickle,\n fix_seed,\n print_model,\n CosineAnnealingWarmUpRestarts,\n)\nfrom model import (\n BAN,\n HAN,\n GGNN,\n GCN,\n MemNet,\n HypergraphTransformer,\n HypergraphTransformer_wohe,\n HypergraphTransformer_qsetkhe,\n HypergraphTransformer_qhekset,\n)\nfrom modules.logger import setup_logger, get_rank\nfrom dataloader import KVQA, PQnPQL, load_PQnPQL_data, FVQA, load_FVQA_data\n\n\ndef eval_epoch(model, loader, args):\n model.eval()\n total_right = 0\n total_right_aset = 0\n total_num = 0\n\n for b_idx, batch in enumerate(tqdm(loader)):\n batch = [b.cuda() for b in batch]\n labels = batch[-1]\n\n pred = model(batch)\n pred_score, pred_ans = pred.max(1)\n\n nz_idxs = labels.nonzero()\n right = labels[nz_idxs] == pred_ans[nz_idxs]\n total_right += right.sum().item()\n total_num += len(labels)\n\n if \"fvqa\" in args.data_name:\n _, top3_indices = torch.topk(pred, 3)\n for idx, indices in enumerate(top3_indices):\n if labels[idx] in indices:\n total_right_aset += 1\n\n if \"pq\" in args.data_name:\n aset = batch[-2]\n for idx, pred in enumerate(pred_ans):\n if pred in aset[idx]:\n total_right_aset += 1\n\n return total_right, total_right_aset, total_num\n\n\ndef inference(model, test_loader, ckpt_path, args, task_idx=-1, res=False):\n last_ckpt = os.path.join(ckpt_path, \"ckpt_best.pth.tar\")\n checkpoint = torch.load(last_ckpt)\n\n if list(checkpoint[\"state_dict\"].keys())[0].startswith(\"module.\"):\n checkpoint[\"state_dict\"] = {\n k[7:]: v for k, v in checkpoint[\"state_dict\"].items()\n }\n\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\"load: %s\" % (last_ckpt))\n\n total_right, total_right_aset, total_num = eval_epoch(model, test_loader, args)\n accuracy = total_right / total_num\n\n if \"pq\" in args.data_name:\n accuracy = total_right_aset / total_num\n\n return accuracy\n\n\ndef main():\n \"\"\"parse config file\"\"\"\n parser = argparse.ArgumentParser(description=\"experiments\")\n parser.add_argument(\"--model_name\", default=\"ht\")\n parser.add_argument(\"--data_name\", default=\"kvqa\")\n parser.add_argument(\"--cfg\", default=\"ht\")\n parser.add_argument(\"--exp_name\", default=\"dev\")\n parser.add_argument(\"--inference\", action=\"store_true\")\n parser.add_argument(\"--per_cate\", action=\"store_true\")\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--schedule\", action=\"store_true\")\n parser.add_argument(\"--selected\", action=\"store_true\")\n parser.add_argument(\"--abl_only_ga\", action=\"store_true\")\n parser.add_argument(\"--abl_only_sa\", action=\"store_true\")\n parser.add_argument(\"--abl_ans_fc\", action=\"store_true\")\n parser.add_argument(\"--split_seed\", type=int, default=1234)\n parser.add_argument(\"--wd\", type=float, default=0.0)\n parser.add_argument(\"--num_workers\", type=int, default=4)\n parser.add_argument(\"--max_epoch\", type=int, default=1000)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n parser.add_argument(\"--q_opt\", type=str, default=\"org\")\n parser.add_argument(\"--n_hop\", type=int, default=1)\n args = parser.parse_args()\n\n config_file = \"configs/%s.yaml\" % (args.cfg)\n model_cfg = load_files(config_file)\n\n fix_seed(model_cfg[\"MODEL\"][\"SEED\"])\n\n if args.debug == False:\n summary_path = model_cfg[\"RES\"][\"TB\"] + args.exp_name\n summary = SummaryWriter(summary_path)\n\n log_path = model_cfg[\"RES\"][\"LOG\"] + args.exp_name\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n\n ckpt_path = model_cfg[\"RES\"][\"CKPT\"] + args.exp_name\n if not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n\n logger = setup_logger(args.exp_name, log_path, get_rank())\n logger.info(model_cfg[\"MODEL\"])\n logger.info(args)\n\n # ------------ Construct Dataset Class ------------------------------------\n datasets = {}\n if args.data_name == \"kvqa\":\n modes = [\"train\", \"val\", \"test\"]\n n_node_lists = []\n for mode in modes:\n fname = ckpt_path + \"/%s_cache.pkl\" % (mode)\n if os.path.isfile(fname):\n datasets[mode] = load_files(fname)\n else:\n data = KVQA(model_cfg, args, mode)\n datasets[mode] = data\n save_pickle(data, fname)\n n_node_lists.append(max(datasets[mode].n_node))\n max_n_node = max(n_node_lists)\n\n for mode in modes:\n datasets[mode].max_n_node = max_n_node\n\n elif \"fvqa\" in args.data_name:\n train, test = load_FVQA_data(model_cfg, args)\n datasets[\"train\"] = FVQA(model_cfg, args, train)\n datasets[\"test\"] = FVQA(model_cfg, args, test)\n\n elif \"pq\" in args.data_name:\n train, val, test = load_PQnPQL_data(model_cfg, args)\n datasets[\"train\"] = PQnPQL(model_cfg, args, train)\n datasets[\"val\"] = PQnPQL(model_cfg, args, val)\n datasets[\"test\"] = PQnPQL(model_cfg, args, test)\n\n train_loader = DataLoader(\n datasets[\"train\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=True,\n )\n if \"fvqa\" in args.data_name:\n val_loader = DataLoader(\n datasets[\"test\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=True,\n )\n else:\n val_loader = DataLoader(\n datasets[\"val\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=True,\n )\n test_loader = DataLoader(\n datasets[\"test\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=False,\n )\n\n # ------------ Model -----------------------\n if args.model_name == \"ht\":\n model = HypergraphTransformer(model_cfg, args).cuda()\n elif args.model_name == \"ht_abl_wohe\":\n model = HypergraphTransformer_wohe(model_cfg, args).cuda()\n elif args.model_name == \"ht_abl_qset_khe\":\n model = HypergraphTransformer_qsetkhe(model_cfg, args).cuda()\n elif args.model_name == \"ht_abl_qhe_kset\":\n model = HypergraphTransformer_qhekset(model_cfg, args).cuda()\n elif args.model_name == \"ggnn\":\n model = GGNN(model_cfg, args, max_n_node).cuda()\n elif args.model_name == \"han\":\n model = HAN(model_cfg, args).cuda()\n elif args.model_name == \"ban\":\n model = BAN(model_cfg, args).cuda()\n elif args.model_name == \"memnet\":\n model = MemNet(model_cfg, args).cuda()\n elif args.model_name == \"gcn\":\n model = GCN(model_cfg, args).cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n lr_scheduler = CosineAnnealingWarmUpRestarts(\n optimizer, T_0=150, T_mult=1, eta_max=0.001, T_up=10, gamma=0.5\n )\n model.cuda()\n\n # ------------ Evaluate -----------------------\n if args.inference == True:\n if args.per_cate == False:\n test_acc_final = inference(model, test_loader, ckpt_path, args, res=False)\n logger.info(\"test accuracy (final) : %f\" % (test_acc_final))\n\n else: # analysis on question types (KVQA only)\n if args.data_name == \"kvqa\":\n cate_accu_test = []\n qtypes = load_files(model_cfg[\"DATASET\"][\"IDX2QTYPE\"])\n for task_idx in range(10):\n test = KVQA(model_cfg, args, \"test\", task_idx)\n test.max_n_node = max_n_node\n test_loader = DataLoader(\n test,\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=False,\n )\n accu = inference(\n model, test_loader, ckpt_path, args, task_idx=task_idx, res=True\n )\n cate_accu_test.append(accu)\n print(qtypes[:10])\n print(cate_accu_test)\n else:\n raise NotImplementedError(\n \"Datasets except KVQA do not have categories for questions. Set per_cate as False.\"\n )\n return 0\n\n # ------------ Training -----------------------\n train_loss = []\n best_acc = 0.0\n\n for e_idx in range(0, args.max_epoch):\n model.train()\n total_right = 0\n total_num = 0\n total_right_aset = 0\n for b_idx, batch in enumerate(tqdm(train_loader)):\n batch = [b.cuda() for b in batch]\n labels = batch[-1]\n pred = model(batch)\n pred_score, pred_ans = pred.max(1)\n loss = F.nll_loss(pred, labels)\n train_loss.append(loss.item())\n\n nz_idxs = labels.nonzero()\n right = labels[nz_idxs] == pred_ans[nz_idxs]\n total_right += right.sum().item()\n total_num += len(labels)\n\n if \"fvqa\" in args.data_name:\n _, top3_indices = torch.topk(pred, 3)\n for idx, indices in enumerate(top3_indices):\n if labels[idx] in indices:\n if labels[idx] != 0:\n total_right_aset += 1 # top-3 accuracy\n\n if \"pq\" in args.data_name:\n aset = batch[-2]\n for idx, pred in enumerate(pred_ans):\n if pred in aset[idx]:\n total_right_aset += 1\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if args.debug == False:\n summary.add_scalar(\n \"loss/train\", loss.item(), e_idx * len(train_loader) + b_idx\n )\n\n if args.schedule:\n lr_scheduler.step()\n\n if args.debug == False:\n tr_accu = total_right / total_num\n tr_accu_aset = total_right_aset / total_num\n summary.add_scalar(\"accu/train\", tr_accu, e_idx)\n\n if \"pq\" in args.data_name:\n summary.add_scalar(\"accu_aset/train\", tr_accu_aset, e_idx)\n logger.info(\n \"epoch %i train accuracy : %f, %i/%i / %f, %i/%i \"\n % (\n e_idx,\n tr_accu,\n total_right,\n total_num,\n tr_accu_aset,\n total_right_aset,\n total_num,\n )\n )\n else:\n logger.info(\n \"epoch %i train accuracy : %f, %i/%i\"\n % (e_idx, tr_accu, total_right, total_num)\n )\n\n with torch.no_grad():\n total_right_val, total_right_aset_val, total_num_val = eval_epoch(\n model, val_loader, args\n )\n\n if args.debug == False:\n val_acc = total_right_val / total_num_val\n val_acc_aset = total_right_aset_val / total_num_val\n summary.add_scalar(\"accu/val\", val_acc, e_idx)\n\n if \"pq\" in args.data_name:\n summary.add_scalar(\"accu_aset/val\", val_acc_aset, e_idx)\n logger.info(\n \"epoch %i val accuracy : %f, %i/%i / %f, %i/%i\"\n % (\n e_idx,\n val_acc,\n total_right_val,\n total_num_val,\n val_acc_aset,\n total_right_aset_val,\n total_num_val,\n )\n )\n val_acc = val_acc_aset\n else:\n logger.info(\n \"epoch %i val accuracy : %f, %i/%i\"\n % (e_idx, val_acc, total_right_val, total_num_val)\n )\n\n if val_acc >= best_acc:\n best_acc = val_acc\n torch.save(\n {\n \"epoch_idx\": e_idx,\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n },\n os.path.join(ckpt_path, \"ckpt_best.pth.tar\"),\n )\n logger.info(\"## Current VAL Best : %f\" % (best_acc))\n\n test_acc_final = inference(model, test_loader, ckpt_path, args)\n logger.info(\"## Test accuracy : %f\" % (test_acc_final))\n if \"pq\" in args.data_name:\n summary.add_scalar(\"accu_aset/test\", test_acc_final, 0)\n else:\n summary.add_scalar(\"accu/test\", test_acc_final, 0)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.functional.nll_loss", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.topk" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zfar-/BaselineWithNoise
[ "ca18d1f2aed36d571c50ed8e630eb38a87c79265" ]
[ "baselines/run.py" ]
[ "import sys\nimport multiprocessing\nimport os.path as osp\nimport gym\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\n\nfrom baselines.common.vec_env.vec_video_recorder import VecVideoRecorder\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nfrom baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\nfrom baselines.common.tf_util import get_session\nfrom baselines import logger\nfrom importlib import import_module\n\nfrom baselines.common.vec_env.vec_normalize import VecNormalize\n\ntry:\n from mpi4py import MPI\nexcept ImportError:\n print(\"can't import MPI \")\n MPI = None\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\n\ntry:\n import roboschool\nexcept ImportError:\n roboschool = None\n\n_game_envs = defaultdict(set)\nfor env in gym.envs.registry.all():\n # TODO: solve this with regexes\n env_type = env._entry_point.split(':')[0].split('.')[-1]\n _game_envs[env_type].add(env.id)\n\n# reading benchmark names directly from retro requires\n# importing retro here, and for some reason that crashes tensorflow\n# in ubuntu\n_game_envs['retro'] = {\n 'BubbleBobble-Nes',\n 'SuperMarioBros-Nes',\n 'TwinBee3PokoPokoDaimaou-Nes',\n 'SpaceHarrier-Nes',\n 'SonicTheHedgehog-Genesis',\n 'Vectorman-Genesis',\n 'FinalFight-Snes',\n 'SpaceInvaders-Snes',\n}\n\n\ndef train(args, extra_args):\n\n env_type, env_id = get_env_type(args.env)\n print(\"In the train function with env_type {} env_id {}\".format(env_type , env_id))\n # print('env_type: {}'.format(env_type))\n\n total_timesteps = int(args.num_timesteps)\n seed = args.seed\n\n learn = get_learn_function(args.alg)\n alg_kwargs = get_learn_function_defaults(args.alg, env_type)\n alg_kwargs.update(extra_args)\n\n\n\n print(\"Now called build_env env function with arg :: \",args)\n env = build_env(args)\n if args.save_video_interval != 0:\n env = VecVideoRecorder(env, osp.join(logger.Logger.CURRENT.dir, \"videos\"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)\n\n if args.network:\n alg_kwargs['network'] = args.network\n else:\n if alg_kwargs.get('network') is None:\n alg_kwargs['network'] = get_default_network(env_type)\n\n print('Training {} on {}:{} with arguments \\n{}'.format(args.alg, env_type, env_id, alg_kwargs))\n\n model = learn(\n env=env,\n seed=seed,\n total_timesteps=total_timesteps,\n **alg_kwargs\n )\n\n return model, env\n\n\ndef random_agent_ob_mean_std(env, nsteps=10000):\n ob = np.asarray(env.reset())\n if MPI.COMM_WORLD.Get_rank() == 0:\n obs = [ob]\n print(\"::: Entered the random action for 1000 steps ::: \")\n for _ in range(nsteps):\n ac = env.action_space.sample() # random action \n ob, _, done, _ = env.step(ac)\n if done:\n ob = env.reset()\n obs.append(np.asarray(ob))\n mean = np.mean(obs, 0).astype(np.float32)\n std = np.std(obs, 0).mean().astype(np.float32)\n else:\n mean = np.empty(shape=ob.shape, dtype=np.float32)\n std = np.empty(shape=(), dtype=np.float32)\n MPI.COMM_WORLD.Bcast(mean, root=0)\n MPI.COMM_WORLD.Bcast(std, root=0)\n return mean, std\n\ndef build_env(args):\n\n ncpu = multiprocessing.cpu_count()\n if sys.platform == 'darwin': ncpu //= 2\n nenv = args.num_env or ncpu\n alg = args.alg\n seed = args.seed\n\n env_type, env_id = get_env_type(args.env)\n print(\"In the build_env function with alg :: \",alg)\n if env_type in {'atari', 'retro'}:\n if alg == 'deepq':\n env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})\n elif alg == 'trpo_mpi':\n env = make_env(env_id, env_type, seed=seed)\n else:\n frame_stack_size = 4\n print(\"make_vec_env arguments env_id {} , env_type {} , nenv {} ,seed {} , gamestate {} reward_scale {}\".format(\n env_id , env_type , nenv , seed , args.gamestate , args.reward_scale))\n \n #>\n # print(\"Called environment for mean and std\")\n # env = make_vec_env(env_id, env_type, 1, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)\n # # env = VecFrameStack(env, frame_stack_size) ## No need for frame stacking while calculation of mean and std\n # ob_mean, ob_std = random_agent_ob_mean_std(env)\n # print(\" environment complete with mean {} and std {}\".format(ob_mean , ob_std))\n # del env \n #>\n\n env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)\n \n # print(\"Received env from make_vec_env type env {} and env \".format(\n # type(env) , env))\n print(\"ob_space {} and ac_space {} \".format(env.observation_space, env.action_space))\n env = VecFrameStack(env, frame_stack_size)\n\n\n print(\"After Frame stacking env would become \" )\n\n else:\n config = tf.ConfigProto(allow_soft_placement=True,\n intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n config.gpu_options.allow_growth = True\n get_session(config=config)\n\n env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale)\n\n if env_type == 'mujoco':\n env = VecNormalize(env)\n\n return env #, ob_mean, ob_std\n\n\ndef get_env_type(env_id):\n if env_id in _game_envs.keys():\n env_type = env_id\n env_id = [g for g in _game_envs[env_type]][0]\n else:\n env_type = None\n for g, e in _game_envs.items():\n if env_id in e:\n env_type = g\n break\n assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())\n\n return env_type, env_id\n\n\ndef get_default_network(env_type):\n if env_type in {'atari', 'retro'}:\n return 'cnn'\n else:\n return 'mlp'\n\ndef get_alg_module(alg, submodule=None):\n submodule = submodule or alg\n try:\n # first try to import the alg module from baselines\n alg_module = import_module('.'.join(['baselines', alg, submodule]))\n except ImportError:\n # then from rl_algs\n alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))\n\n return alg_module\n\n\ndef get_learn_function(alg):\n return get_alg_module(alg).learn\n\n\ndef get_learn_function_defaults(alg, env_type):\n try:\n alg_defaults = get_alg_module(alg, 'defaults')\n kwargs = getattr(alg_defaults, env_type)()\n except (ImportError, AttributeError):\n kwargs = {}\n return kwargs\n\n\ndef parse_cmdline_kwargs(args):\n '''\n convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible\n '''\n def parse(v):\n\n assert isinstance(v, str)\n try:\n return eval(v)\n except (NameError, SyntaxError):\n return v\n\n return {k: parse(v) for k,v in parse_unknown_args(args).items()}\n\n\n\ndef main(args):\n # configure logger, disable logging in child MPI processes (with rank > 0)\n\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args(args)\n extra_args = parse_cmdline_kwargs(unknown_args)\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n rank = 0\n logger.configure()\n else:\n logger.configure(format_strs=[])\n rank = MPI.COMM_WORLD.Get_rank()\n\n print(\"Called the trained function\")\n model, env = train(args, extra_args)\n\n env.close()\n\n if args.save_path is not None and rank == 0:\n save_path = osp.expanduser(args.save_path)\n model.save(save_path)\n\n if args.play:\n logger.log(\"Running trained model\")\n env ,ob_mean , ob_std = build_env(args)\n obs = env.reset()\n def initialize_placeholders(nlstm=128,**kwargs):\n return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))\n state, dones = initialize_placeholders(**extra_args)\n while True:\n actions, _, state, _ = model.step(obs,S=state, M=dones)\n obs, _, done, _ = env.step(actions)\n env.render()\n done = done.any() if isinstance(done, np.ndarray) else done\n\n if done:\n obs = env.reset()\n\n env.close()\n\n return model\n\nif __name__ == '__main__':\n main(sys.argv)\n" ]
[ [ "numpy.asarray", "tensorflow.ConfigProto", "numpy.std", "numpy.mean", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
99starman/fairseq
[ "a098a52f5c961dffd06fd9a14c4cf6b657f2f52d" ]
[ "fairseq/tasks/translation.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom dataclasses import dataclass, field\nimport itertools\nimport json\nimport logging\nimport os\nfrom typing import Optional\nfrom argparse import Namespace\nfrom omegaconf import II\n\nimport numpy as np\nfrom fairseq import metrics, utils\nfrom fairseq.data import (\n AppendTokenDataset,\n ConcatDataset,\n LanguagePairDataset,\n PrependTokenDataset,\n StripTokenDataset,\n TruncateDataset,\n data_utils,\n encoders,\n indexed_dataset,\n)\nfrom fairseq.data.indexed_dataset import get_available_dataset_impl\nfrom fairseq.dataclass import ChoiceEnum, FairseqDataclass\nfrom fairseq.tasks import FairseqTask, register_task\n\n\nEVAL_BLEU_ORDER = 4\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_langpair_dataset(\n data_path,\n split,\n src,\n src_dict,\n tgt,\n tgt_dict,\n combine,\n dataset_impl,\n upsample_primary,\n left_pad_source,\n left_pad_target,\n max_source_positions,\n max_target_positions,\n prepend_bos=False,\n load_alignments=False,\n truncate_source=False,\n append_source_id=False,\n num_buckets=0,\n shuffle=True,\n pad_to_multiple=1,\n prepend_bos_src=None,\n):\n def split_exists(split, src, tgt, lang, data_path):\n filename = os.path.join(data_path, \"{}.{}-{}.{}\".format(split, src, tgt, lang))\n return indexed_dataset.dataset_exists(filename, impl=dataset_impl)\n\n src_datasets = []\n tgt_datasets = []\n\n for k in itertools.count():\n split_k = split + (str(k) if k > 0 else \"\")\n\n # infer langcode\n if split_exists(split_k, src, tgt, src, data_path):\n prefix = os.path.join(data_path, \"{}.{}-{}.\".format(split_k, src, tgt))\n elif split_exists(split_k, tgt, src, src, data_path):\n prefix = os.path.join(data_path, \"{}.{}-{}.\".format(split_k, tgt, src))\n else:\n if k > 0:\n break\n else:\n raise FileNotFoundError(\n \"Dataset not found: {} ({})\".format(split, data_path)\n )\n\n src_dataset = data_utils.load_indexed_dataset(\n prefix + src, src_dict, dataset_impl\n )\n if truncate_source:\n src_dataset = AppendTokenDataset(\n TruncateDataset(\n StripTokenDataset(src_dataset, src_dict.eos()),\n max_source_positions - 1,\n ),\n src_dict.eos(),\n )\n src_datasets.append(src_dataset)\n\n tgt_dataset = data_utils.load_indexed_dataset(\n prefix + tgt, tgt_dict, dataset_impl\n )\n if tgt_dataset is not None:\n tgt_datasets.append(tgt_dataset)\n\n logger.info(\n \"{} {} {}-{} {} examples\".format(\n data_path, split_k, src, tgt, len(src_datasets[-1])\n )\n )\n\n if not combine:\n break\n\n assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0\n\n if len(src_datasets) == 1:\n src_dataset = src_datasets[0]\n tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None\n else:\n sample_ratios = [1] * len(src_datasets)\n sample_ratios[0] = upsample_primary\n src_dataset = ConcatDataset(src_datasets, sample_ratios)\n if len(tgt_datasets) > 0:\n tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)\n else:\n tgt_dataset = None\n\n if prepend_bos:\n assert hasattr(src_dict, \"bos_index\") and hasattr(tgt_dict, \"bos_index\")\n src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())\n if tgt_dataset is not None:\n tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())\n elif prepend_bos_src is not None:\n logger.info(f\"prepending src bos: {prepend_bos_src}\")\n src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)\n\n eos = None\n if append_source_id:\n src_dataset = AppendTokenDataset(\n src_dataset, src_dict.index(\"[{}]\".format(src))\n )\n if tgt_dataset is not None:\n tgt_dataset = AppendTokenDataset(\n tgt_dataset, tgt_dict.index(\"[{}]\".format(tgt))\n )\n eos = tgt_dict.index(\"[{}]\".format(tgt))\n\n align_dataset = None\n if load_alignments:\n align_path = os.path.join(data_path, \"{}.align.{}-{}\".format(split, src, tgt))\n if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):\n align_dataset = data_utils.load_indexed_dataset(\n align_path, None, dataset_impl\n )\n\n tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None\n return LanguagePairDataset(\n src_dataset,\n src_dataset.sizes,\n src_dict,\n tgt_dataset,\n tgt_dataset_sizes,\n tgt_dict,\n left_pad_source=left_pad_source,\n left_pad_target=left_pad_target,\n align_dataset=align_dataset,\n eos=eos,\n num_buckets=num_buckets,\n shuffle=shuffle,\n pad_to_multiple=pad_to_multiple,\n )\n\n\n@dataclass\nclass TranslationConfig(FairseqDataclass):\n data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"colon separated path to data directories list, will be iterated upon during epochs \"\n \"in round-robin manner; however, valid and test data are always in the first directory \"\n \"to avoid the need for repeating them in all directories\"\n },\n )\n source_lang: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"source language\",\n \"argparse_alias\": \"-s\",\n },\n )\n target_lang: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"target language\",\n \"argparse_alias\": \"-t\",\n },\n )\n lang: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"customized argument: language\",\n \"argparse_alias\": \"-lan\",\n },\n )\n load_alignments: bool = field(\n default=False, metadata={\"help\": \"load the binarized alignments\"}\n )\n left_pad_source: bool = field(\n default=True, metadata={\"help\": \"pad the source on the left\"}\n )\n left_pad_target: bool = field(\n default=False, metadata={\"help\": \"pad the target on the left\"}\n )\n max_source_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the source sequence\"}\n )\n max_target_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the target sequence\"}\n )\n upsample_primary: int = field(\n default=-1, metadata={\"help\": \"the amount of upsample primary dataset\"}\n )\n truncate_source: bool = field(\n default=False, metadata={\"help\": \"truncate source to max-source-positions\"}\n )\n num_batch_buckets: int = field(\n default=0,\n metadata={\n \"help\": \"if >0, then bucket source and target lengths into \"\n \"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations\"\n },\n )\n train_subset: str = II(\"dataset.train_subset\")\n dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(\n \"dataset.dataset_impl\"\n )\n required_seq_len_multiple: int = II(\"dataset.required_seq_len_multiple\")\n\n # options for reporting BLEU during validation\n eval_bleu: bool = field(\n default=False, metadata={\"help\": \"evaluation with BLEU scores\"}\n )\n eval_bleu_args: Optional[str] = field(\n default=\"{}\",\n metadata={\n \"help\": 'generation args for BLUE scoring, e.g., \\'{\"beam\": 4, \"lenpen\": 0.6}\\', as JSON string'\n },\n )\n eval_bleu_detok: str = field(\n default=\"space\",\n metadata={\n \"help\": \"detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; \"\n \"use 'space' to disable detokenization; see fairseq.data.encoders for other options\"\n },\n )\n eval_bleu_detok_args: Optional[str] = field(\n default=\"{}\",\n metadata={\"help\": \"args for building the tokenizer, if needed, as JSON string\"},\n )\n eval_tokenized_bleu: bool = field(\n default=False, metadata={\"help\": \"compute tokenized BLEU instead of sacrebleu\"}\n )\n eval_bleu_remove_bpe: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"remove BPE before computing BLEU\",\n \"argparse_const\": \"@@ \",\n },\n )\n eval_bleu_print_samples: bool = field(\n default=False, metadata={\"help\": \"print sample generations during validation\"}\n )\n\n\n@register_task(\"translation\", dataclass=TranslationConfig)\nclass TranslationTask(FairseqTask):\n \"\"\"\n Translate from one (source) language to another (target) language.\n\n Args:\n src_dict (~fairseq.data.Dictionary): dictionary for the source language\n tgt_dict (~fairseq.data.Dictionary): dictionary for the target language\n\n .. note::\n\n The translation task is compatible with :mod:`fairseq-train`,\n :mod:`fairseq-generate` and :mod:`fairseq-interactive`.\n \"\"\"\n\n cfg: TranslationConfig\n\n def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):\n super().__init__(cfg)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, cfg: TranslationConfig, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n paths = utils.split_paths(cfg.data)\n assert len(paths) > 0\n # find language pair automatically\n if cfg.source_lang is None or cfg.target_lang is None:\n cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])\n if cfg.source_lang is None or cfg.target_lang is None:\n raise Exception(\n \"Could not infer language pair, please provide it explicitly\"\n )\n\n # load dictionaries\n src_dict = cls.load_dictionary(\n os.path.join(paths[0], \"dict.{}.txt\".format(cfg.source_lang))\n )\n tgt_dict = cls.load_dictionary(\n os.path.join(paths[0], \"dict.{}.txt\".format(cfg.target_lang))\n )\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n logger.info(\"[{}] dictionary: {} types\".format(cfg.source_lang, len(src_dict)))\n logger.info(\"[{}] dictionary: {} types\".format(cfg.target_lang, len(tgt_dict)))\n\n return cls(cfg, src_dict, tgt_dict)\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n paths = utils.split_paths(self.cfg.data)\n assert len(paths) > 0\n if split != self.cfg.train_subset:\n # if not training data set, use the first shard for valid and test\n paths = paths[:1]\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n src, tgt = self.cfg.source_lang, self.cfg.target_lang\n\n self.datasets[split] = load_langpair_dataset(\n data_path,\n split,\n src,\n self.src_dict,\n tgt,\n self.tgt_dict,\n combine=combine,\n dataset_impl=self.cfg.dataset_impl,\n upsample_primary=self.cfg.upsample_primary,\n left_pad_source=self.cfg.left_pad_source,\n left_pad_target=self.cfg.left_pad_target,\n max_source_positions=self.cfg.max_source_positions,\n max_target_positions=self.cfg.max_target_positions,\n load_alignments=self.cfg.load_alignments,\n truncate_source=self.cfg.truncate_source,\n num_buckets=self.cfg.num_batch_buckets,\n shuffle=(split != \"test\"),\n pad_to_multiple=self.cfg.required_seq_len_multiple,\n )\n\n def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):\n return LanguagePairDataset(\n src_tokens,\n src_lengths,\n self.source_dictionary,\n tgt_dict=self.target_dictionary,\n constraints=constraints,\n )\n\n def build_model(self, cfg, from_checkpoint=False):\n model = super().build_model(cfg, from_checkpoint)\n if self.cfg.eval_bleu:\n detok_args = json.loads(self.cfg.eval_bleu_detok_args)\n self.tokenizer = encoders.build_tokenizer(\n Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)\n )\n\n gen_args = json.loads(self.cfg.eval_bleu_args)\n self.sequence_generator = self.build_generator(\n [model], Namespace(**gen_args)\n )\n return model\n\n def valid_step(self, sample, model, criterion):\n loss, sample_size, logging_output = super().valid_step(sample, model, criterion)\n if self.cfg.eval_bleu:\n bleu = self._inference_with_bleu(self.sequence_generator, sample, model)\n logging_output[\"_bleu_sys_len\"] = bleu.sys_len\n logging_output[\"_bleu_ref_len\"] = bleu.ref_len\n # we split counts into separate entries so that they can be\n # summed efficiently across workers using fast-stat-sync\n assert len(bleu.counts) == EVAL_BLEU_ORDER\n for i in range(EVAL_BLEU_ORDER):\n logging_output[\"_bleu_counts_\" + str(i)] = bleu.counts[i]\n logging_output[\"_bleu_totals_\" + str(i)] = bleu.totals[i]\n return loss, sample_size, logging_output\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n if self.cfg.eval_bleu:\n\n def sum_logs(key):\n import torch\n\n result = sum(log.get(key, 0) for log in logging_outputs)\n if torch.is_tensor(result):\n result = result.cpu()\n return result\n\n counts, totals = [], []\n for i in range(EVAL_BLEU_ORDER):\n counts.append(sum_logs(\"_bleu_counts_\" + str(i)))\n totals.append(sum_logs(\"_bleu_totals_\" + str(i)))\n\n if max(totals) > 0:\n # log counts as numpy arrays -- log_scalar will sum them correctly\n metrics.log_scalar(\"_bleu_counts\", np.array(counts))\n metrics.log_scalar(\"_bleu_totals\", np.array(totals))\n metrics.log_scalar(\"_bleu_sys_len\", sum_logs(\"_bleu_sys_len\"))\n metrics.log_scalar(\"_bleu_ref_len\", sum_logs(\"_bleu_ref_len\"))\n\n def compute_bleu(meters):\n import inspect\n\n try:\n from sacrebleu.metrics import BLEU\n\n comp_bleu = BLEU.compute_bleu\n except ImportError:\n # compatibility API for sacrebleu 1.x\n import sacrebleu\n\n comp_bleu = sacrebleu.compute_bleu\n\n fn_sig = inspect.getfullargspec(comp_bleu)[0]\n if \"smooth_method\" in fn_sig:\n smooth = {\"smooth_method\": \"exp\"}\n else:\n smooth = {\"smooth\": \"exp\"}\n bleu = comp_bleu(\n correct=meters[\"_bleu_counts\"].sum,\n total=meters[\"_bleu_totals\"].sum,\n sys_len=meters[\"_bleu_sys_len\"].sum,\n ref_len=meters[\"_bleu_ref_len\"].sum,\n **smooth,\n )\n return round(bleu.score, 2)\n\n metrics.log_derived(\"bleu\", compute_bleu)\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.cfg.max_source_positions, self.cfg.max_target_positions)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict\n\n def _inference_with_bleu(self, generator, sample, model):\n import sacrebleu\n\n def decode(toks, escape_unk=False):\n s = self.tgt_dict.string(\n toks.int().cpu(),\n self.cfg.eval_bleu_remove_bpe,\n # The default unknown string in fairseq is `<unk>`, but\n # this is tokenized by sacrebleu as `< unk >`, inflating\n # BLEU scores. Instead, we use a somewhat more verbose\n # alternative that is unlikely to appear in the real\n # reference, but doesn't get split into multiple tokens.\n unk_string=(\"UNKNOWNTOKENINREF\" if escape_unk else \"UNKNOWNTOKENINHYP\"),\n )\n if self.tokenizer:\n s = self.tokenizer.decode(s)\n return s\n\n gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)\n hyps, refs = [], []\n for i in range(len(gen_out)):\n hyps.append(decode(gen_out[i][0][\"tokens\"]))\n refs.append(\n decode(\n utils.strip_pad(sample[\"target\"][i], self.tgt_dict.pad()),\n escape_unk=True, # don't count <unk> as matches to the hypo\n )\n )\n if self.cfg.eval_bleu_print_samples:\n logger.info(\"example hypothesis: \" + hyps[0])\n logger.info(\"example reference: \" + refs[0])\n if self.cfg.eval_tokenized_bleu:\n return sacrebleu.corpus_bleu(hyps, [refs], tokenize=\"none\")\n else:\n return sacrebleu.corpus_bleu(hyps, [refs])\n" ]
[ [ "numpy.array", "torch.is_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fietensen/FlappyAI
[ "f8bff24e2ee62edf97a9b061183e28bf4924db09" ]
[ "game/game.py" ]
[ "from game.pole import PolesObject\nfrom game.agent import Agent\nfrom pygame import Rect\nimport pygame, struct\nimport numpy as np\n\nclass Game:\n def __init__(self, resolution):\n self.resolution = resolution\n self.screen = pygame.display.set_mode(resolution) # init window\n self.playerpos = (0, resolution[1]) # initial player position\n self.poles = []\n self.poles.append(PolesObject(resolution))\n self.agents = []\n self.birdimg = pygame.image.load(\"graphics/flappybird.png\")\n self.birdimg = pygame.transform.scale(self.birdimg, (resolution[0]//20, resolution[0]//25))\n\n def step(self):\n self.screen.fill((51,255,255))\n remove_poles = []\n for index, pole in enumerate(self.poles):\n if pole.x+pole.width < 0:\n remove_poles.append(index)\n else:\n pole.move()\n pole.display(self.screen)\n\n for remove_pole in remove_poles:\n self.poles.pop(remove_pole)\n\n if self.poles[-1].x+self.poles[-1].width < self.resolution[0]-np.random.uniform(\n self.resolution[0]//3,\n self.resolution[0]//2):\n self.poles.append(PolesObject(self.resolution))\n\n #view = pygame.surfarray.array2d(self.screen)&0xFF\n for agent in self.agents:\n agent.move()\n for pole in self.poles:\n pole_upper = Rect((pole.x, 0), (pole.width, pole.height))\n pole_lower = Rect((pole.x, pole.height+pole.gapsize),\n (pole.width, pole.resolution[1] - pole.height+pole.gapsize))\n\n if Rect(agent.rect).colliderect(pole_upper) or Rect(agent.rect).colliderect(pole_lower):\n agent.dead = True\n elif agent.y < 0 or agent.y > self.resolution[1]:\n agent.dead = True\n elif not agent.dead:\n agent.fitness += .001\n self.screen.blit(self.birdimg, agent.rect)\n\n pygame.display.flip()\n" ]
[ [ "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
soraros/nutils
[ "91119b12bdebf12a85eecb6a2247be2415f60e6f", "cb47070fc8aaf1caeb38c1d90d19ef3c107f114a" ]
[ "nutils/evaluable.py", "nutils/util.py" ]
[ "# Copyright (c) 2014 Evalf\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nThe function module defines the :class:`Evaluable` class and derived objects,\ncommonly referred to as nutils functions. They represent mappings from a\n:mod:`nutils.topology` onto Python space. The notabe class of :class:`Array`\nobjects map onto the space of Numpy arrays of predefined dimension and shape.\nMost functions used in nutils applicatons are of this latter type, including the\ngeometry and function bases for analysis.\n\nNutils functions are essentially postponed python functions, stored in a tree\nstructure of input/output dependencies. Many :class:`Array` objects have\ndirectly recognizable numpy equivalents, such as :class:`Sin` or\n:class:`Inverse`. By not evaluating directly but merely stacking operations,\ncomplex operations can be defined prior to entering a quadrature loop, allowing\nfor a higher level style programming. It also allows for automatic\ndifferentiation and code optimization.\n\nIt is important to realize that nutils functions do not map for a physical\nxy-domain but from a topology, where a point is characterized by the combination\nof an element and its local coordinate. This is a natural fit for typical finite\nelement operations such as quadrature. Evaluation from physical coordinates is\npossible only via inverting of the geometry function, which is a fundamentally\nexpensive and currently unsupported operation.\n\"\"\"\n\nimport typing\nif typing.TYPE_CHECKING:\n from typing_extensions import Protocol\nelse:\n Protocol = object\n\nfrom . import debug_flags, util, types, numeric, cache, warnings, parallel, sparse\nfrom ._graph import Node, RegularNode, DuplicatedLeafNode, InvisibleNode, Subgraph\nimport numpy, sys, itertools, functools, operator, inspect, numbers, builtins, re, types as builtin_types, abc, collections.abc, math, treelog as log, weakref, time, contextlib, subprocess, os\n\ngraphviz = os.environ.get('NUTILS_GRAPHVIZ')\n\nisevaluable = lambda arg: isinstance(arg, Evaluable)\n\ndef strictevaluable(value):\n if not isinstance(value, Evaluable):\n raise ValueError('expected an object of type {!r} but got {!r} with type {!r}'.format(Evaluable.__qualname__, value, type(value).__qualname__))\n return value\n\ndef simplified(value):\n return strictevaluable(value).simplified\n\nasdtype = lambda arg: arg if any(arg is dtype for dtype in (bool, int, float, complex)) else {'f': float, 'i': int, 'b': bool, 'c': complex}[numpy.dtype(arg).kind]\n\ndef asarray(arg):\n if hasattr(type(arg), 'as_evaluable_array'):\n return arg.as_evaluable_array\n if _containsarray(arg):\n return stack(arg, axis=0)\n else:\n return Constant(arg)\n\nasarrays = types.tuple[asarray]\n\ndef asindex(arg):\n arg = asarray(arg)\n if arg.ndim or arg.dtype != int:\n raise ValueError('argument is not an index: {}'.format(arg))\n if arg._intbounds[0] < 0:\n raise ValueError('index must be non-negative')\n return arg\n\[email protected]_annotations\ndef equalindex(n:asindex, m:asindex):\n '''Compare two array indices.\n\n Returns `True` if the two indices are certainly equal, `False` if they are\n certainly not equal, or `None` if equality cannot be determined at compile\n time.\n '''\n\n if n is m:\n return True\n n = n.simplified\n m = m.simplified\n if n is m:\n return True\n if n.arguments != m.arguments:\n return False\n if n.isconstant: # implies m.isconstant\n return int(n) == int(m)\n\nasshape = types.tuple[asindex]\n\[email protected]_annotations\ndef equalshape(N:asshape, M:asshape):\n '''Compare two array shapes.\n\n Returns `True` if all indices are certainly equal, `False` if any indices are\n certainly not equal, or `None` if equality cannot be determined at compile\n time.\n '''\n\n if N == M:\n return True\n if len(N) != len(M):\n return False\n retval = True\n for eq in map(equalindex, N, M):\n if eq == False:\n return False\n if eq == None:\n retval = None\n return retval\n\nclass ExpensiveEvaluationWarning(warnings.NutilsInefficiencyWarning): pass\n\ndef replace(func=None, depthfirst=False, recursive=False, lru=4):\n '''decorator for deep object replacement\n\n Generates a deep replacement method for general objects based on a callable\n that is applied (recursively) on individual constructor arguments.\n\n Args\n ----\n func\n Callable which maps an object onto a new object, or `None` if no\n replacement is made. It must have one positional argument for the object,\n and may have any number of additional positional and/or keyword\n arguments.\n depthfirst : :class:`bool`\n If `True`, decompose each object as far a possible, then apply `func` to\n all arguments as the objects are reconstructed. Otherwise apply `func`\n directly on each new object that is encountered in the decomposition,\n proceding only if the return value is `None`.\n recursive : :class:`bool`\n If `True`, repeat replacement for any object returned by `func` until it\n returns `None`. Otherwise perform a single, non-recursive sweep.\n lru : :class:`int`\n Maximum size of the least-recently-used cache. A persistent weak-key\n dictionary is maintained for every unique set of function arguments. When\n the size of `lru` is reached, the least recently used cache is dropped.\n\n Returns\n -------\n :any:`callable`\n The method that searches the object to perform the replacements.\n '''\n\n if func is None:\n return functools.partial(replace, depthfirst=depthfirst, recursive=recursive, lru=lru)\n\n signature = inspect.signature(func)\n arguments = [] # list of past function arguments, least recently used last\n caches = [] # list of weak-key dictionaries matching arguments (above)\n\n remember = object() # token to signal that rstack[-1] can be cached as the replacement of fstack[-1]\n recreate = object() # token to signal that all arguments for object recreation are ready on rstack\n pending = object() # token to hold the place of a cachable object pending creation\n identity = object() # token to hold the place of the cache value in case it matches key, to avoid circular references\n\n @functools.wraps(func)\n def wrapped(target, *funcargs, **funckwargs):\n\n # retrieve or create a weak-key dictionary\n bound = signature.bind(None, *funcargs, **funckwargs)\n bound.apply_defaults()\n try:\n index = arguments.index(bound.arguments) # by using index, arguments need not be hashable\n except ValueError:\n index = -1\n cache = weakref.WeakKeyDictionary()\n else:\n cache = caches[index]\n if index != 0: # function arguments are not the most recent (possibly new)\n if index > 0 or len(arguments) >= lru:\n caches.pop(index) # pop matching (or oldest) item\n arguments.pop(index)\n caches.insert(0, cache) # insert popped (or new) item to front\n arguments.insert(0, bound.arguments)\n\n fstack = [target] # stack of unprocessed objects and command tokens\n rstack = [] # stack of processed objects\n _stack = fstack if recursive else rstack\n\n try:\n while fstack:\n obj = fstack.pop()\n\n if obj is recreate:\n args = [rstack.pop() for obj in range(fstack.pop())]\n f = fstack.pop()\n r = f(*args)\n if depthfirst:\n newr = func(r, *funcargs, **funckwargs)\n if newr is not None:\n _stack.append(newr)\n continue\n rstack.append(r)\n continue\n\n if obj is remember:\n obj = fstack.pop()\n cache[obj] = rstack[-1] if rstack[-1] is not obj else identity\n continue\n\n if isinstance(obj, (tuple, list, dict, set, frozenset)):\n if not obj:\n rstack.append(obj) # shortcut to avoid recreation of empty container\n else:\n fstack.append(lambda *x, T=type(obj): T(x))\n fstack.append(len(obj))\n fstack.append(recreate)\n fstack.extend(obj if not isinstance(obj, dict) else obj.items())\n continue\n\n try:\n r = cache[obj]\n except KeyError: # object can be weakly cached, but isn't\n cache[obj] = pending\n fstack.append(obj)\n fstack.append(remember)\n except TypeError: # object cannot be referenced or is not hashable\n pass\n else: # object is in cache\n if r is pending:\n pending_objs = [k for k, v in cache.items() if v is pending]\n index = pending_objs.index(obj)\n raise Exception('{}@replace caught in a circular dependence\\n'.format(func.__name__) + Tuple(pending_objs[index:]).asciitree().split('\\n', 1)[1])\n rstack.append(r if r is not identity else obj)\n continue\n\n if not depthfirst:\n newr = func(obj, *funcargs, **funckwargs)\n if newr is not None:\n _stack.append(newr)\n continue\n\n try:\n f, args = obj.__reduce__()\n except: # obj cannot be reduced into a constructor and its arguments\n rstack.append(obj)\n else:\n fstack.append(f)\n fstack.append(len(args))\n fstack.append(recreate)\n fstack.extend(args)\n\n assert len(rstack) == 1\n\n finally:\n while fstack:\n if fstack.pop() is remember:\n assert cache.pop(fstack.pop()) is pending\n\n return rstack[0]\n\n return wrapped\n\nclass Evaluable(types.Singleton):\n 'Base class'\n\n __slots__ = '__args'\n __cache__ = 'dependencies', 'arguments', 'ordereddeps', 'dependencytree', 'optimized_for_numpy', '_loop_concatenate_deps'\n\n @types.apply_annotations\n def __init__(self, args:types.tuple[strictevaluable]):\n super().__init__()\n self.__args = args\n\n def evalf(self, *args):\n raise NotImplementedError('Evaluable derivatives should implement the evalf method')\n\n def evalf_withtimes(self, times, *args):\n with times[self]:\n return self.evalf(*args)\n\n @property\n def dependencies(self):\n '''collection of all function arguments'''\n deps = {}\n for func in self.__args:\n funcdeps = func.dependencies\n deps.update(funcdeps)\n deps[func] = len(funcdeps)\n return types.frozendict(deps)\n\n @property\n def arguments(self):\n 'a frozenset of all arguments of this evaluable'\n return frozenset().union(*(child.arguments for child in self.__args))\n\n @property\n def isconstant(self):\n return EVALARGS not in self.dependencies\n\n @property\n def ordereddeps(self):\n '''collection of all function arguments such that the arguments to\n dependencies[i] can be found in dependencies[:i]'''\n deps = self.dependencies.copy()\n deps.pop(EVALARGS, None)\n return tuple([EVALARGS] + sorted(deps, key=deps.__getitem__))\n\n @property\n def dependencytree(self):\n '''lookup table of function arguments into ordereddeps, such that\n ordereddeps[i].__args[j] == ordereddeps[dependencytree[i][j]], and\n self.__args[j] == ordereddeps[dependencytree[-1][j]]'''\n args = self.ordereddeps\n return tuple(tuple(map(args.index, func.__args)) for func in args+(self,))\n\n @property\n def serialized(self):\n return zip(self.ordereddeps[1:]+(self,), self.dependencytree[1:])\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n args = tuple(arg._node(cache, subgraph, times) for arg in self.__args)\n label = '\\n'.join(filter(None, (type(self).__name__, self._node_details)))\n cache[self] = node = RegularNode(label, args, {}, (type(self).__name__, times[self]), subgraph)\n return node\n\n @property\n def _node_details(self):\n return ''\n\n def asciitree(self, richoutput=False):\n 'string representation'\n\n return self._node({}, None, collections.defaultdict(_Stats)).generate_asciitree(richoutput)\n\n def __str__(self):\n return self.__class__.__name__\n\n def eval(self, **evalargs):\n '''Evaluate function on a specified element, point set.'''\n\n values = [evalargs]\n try:\n values.extend(op.evalf(*[values[i] for i in indices]) for op, indices in self.serialized)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n raise EvaluationError(self, values) from e\n else:\n return values[-1]\n\n def eval_withtimes(self, times, **evalargs):\n '''Evaluate function on a specified element, point set while measure time of each step.'''\n\n values = [evalargs]\n try:\n values.extend(op.evalf_withtimes(times, *[values[i] for i in indices]) for op, indices in self.serialized)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n raise EvaluationError(self, values) from e\n else:\n return values[-1]\n\n @contextlib.contextmanager\n def session(self, graphviz):\n if graphviz is None:\n yield self.eval\n return\n stats = collections.defaultdict(_Stats)\n def eval(**args):\n return self.eval_withtimes(stats, **args)\n with log.context('eval'):\n yield eval\n node = self._node({}, None, stats)\n maxtime = builtins.max(n.metadata[1].time for n in node.walk(set()))\n tottime = builtins.sum(n.metadata[1].time for n in node.walk(set()))\n aggstats = tuple((key, builtins.sum(v.time for v in values), builtins.sum(v.ncalls for v in values)) for key, values in util.gather(n.metadata for n in node.walk(set())))\n fill_color = (lambda node: '0,{:.2f},1'.format(node.metadata[1].time/maxtime)) if maxtime else None\n node.export_graphviz(fill_color=fill_color, dot_path=graphviz)\n log.info('total time: {:.0f}ms\\n'.format(tottime/1e6) + '\\n'.join('{:4.0f} {} ({} calls, avg {:.3f} per call)'.format(t / 1e6, k, n, t / (1e6*n))\n for k, t, n in sorted(aggstats, reverse=True, key=lambda item: item[1]) if n))\n\n def _stack(self, values):\n lines = [' %0 = EVALARGS']\n for (op, indices), v in zip(self.serialized, values):\n lines[-1] += ' --> ' + type(v).__name__\n if numeric.isarray(v):\n lines[-1] += '({})'.format(','.join(map(str, v.shape)))\n try:\n code = op.evalf.__code__\n offset = 1 if getattr(op.evalf, '__self__', None) is not None else 0\n names = code.co_varnames[offset:code.co_argcount]\n names += tuple('{}[{}]'.format(code.co_varnames[code.co_argcount], n) for n in range(len(indices) - len(names)))\n args = map(' {}=%{}'.format, names, indices)\n except:\n args = map(' %{}'.format, indices)\n lines.append(' %{} = {}:{}'.format(len(lines), op, ','.join(args)))\n return lines\n\n @property\n @replace(depthfirst=True, recursive=True)\n def simplified(obj):\n if isinstance(obj, Evaluable):\n retval = obj._simplified()\n if retval is not None and isinstance(obj, Array):\n assert isinstance(retval, Array) and equalshape(retval.shape, obj.shape) and retval.dtype == obj.dtype, '{} --simplify--> {}'.format(obj, retval)\n return retval\n\n def _simplified(self):\n return\n\n @property\n def optimized_for_numpy(self):\n retval = self._optimized_for_numpy1() or self\n return retval._combine_loop_concatenates(frozenset())\n\n @types.apply_annotations\n @replace(depthfirst=True, recursive=True)\n def _optimized_for_numpy1(obj: simplified.fget):\n if isinstance(obj, Evaluable):\n retval = obj._simplified() or obj._optimized_for_numpy()\n if retval is not None and isinstance(obj, Array):\n assert isinstance(retval, Array) and equalshape(retval.shape, obj.shape), '{0}._optimized_for_numpy or {0}._simplified resulted in shape change'.format(type(obj).__name__)\n return retval\n\n def _optimized_for_numpy(self):\n return\n\n @property\n def _loop_concatenate_deps(self):\n deps = []\n for arg in self.__args:\n deps += [dep for dep in arg._loop_concatenate_deps if dep not in deps]\n return tuple(deps)\n\n def _combine_loop_concatenates(self, outer_exclude):\n while True:\n exclude = set(outer_exclude)\n combine = {}\n # Collect all top-level `LoopConcatenate` instances in `combine` and all\n # their dependent `LoopConcatenate` instances in `exclude`.\n for lc in self._loop_concatenate_deps:\n lcs = combine.setdefault(lc.index, [])\n if lc not in lcs:\n lcs.append(lc)\n exclude.update(set(lc._loop_concatenate_deps) - {lc})\n # Combine top-level `LoopConcatenate` instances excluding those in\n # `exclude`.\n replacements = {}\n for index, lcs in combine.items():\n lcs = [lc for lc in lcs if lc not in exclude]\n if not lcs:\n continue\n # We're extracting data from `LoopConcatenate` in favor of using\n # `loop_concatenate_combined(lcs, ...)` because the later requires\n # reapplying simplifications that are already applied in the former.\n # For example, in `loop_concatenate_combined` the offsets (used by\n # start, stop and the concatenation length) are formed by\n # `loop_concatenate`-ing `func.shape[-1]`. If the shape is constant,\n # this can be simplified to a `Range`.\n data = Tuple((Tuple(lc.funcdata) for lc in lcs))\n # Combine `LoopConcatenate` instances in `data` excluding\n # `outer_exclude` and those that will be processed in a subsequent loop\n # (the remainder of `exclude`). The latter consists of loops that are\n # invariant w.r.t. the current loop `index`.\n data = data._combine_loop_concatenates(exclude)\n combined = LoopConcatenateCombined(data, index._name, index.length)\n for i, lc in enumerate(lcs):\n intbounds = dict(zip(('_lower', '_upper'), lc._intbounds)) if lc.dtype == int else {}\n replacements[lc] = ArrayFromTuple(combined, i, lc.shape, lc.dtype, **intbounds)\n if replacements:\n self = replace(lambda key: replacements.get(key) if isinstance(key, LoopConcatenate) else None, recursive=False, depthfirst=False)(self)\n else:\n return self\n\nclass EvaluationError(Exception):\n def __init__(self, f, values):\n super().__init__('evaluation failed in step {}/{}\\n'.format(len(values), len(f.dependencies)) + '\\n'.join(f._stack(values)))\n\nclass EVALARGS(Evaluable):\n def __init__(self):\n super().__init__(args=())\n def _node(self, cache, subgraph, times):\n return InvisibleNode((type(self).__name__, _Stats()))\n\nEVALARGS = EVALARGS()\n\nclass EvaluableConstant(Evaluable):\n '''Evaluate to the given constant value.\n\n Parameters\n ----------\n value\n The return value of ``eval``.\n '''\n\n __slots__ = 'value'\n\n def __init__(self, value):\n self.value = value\n super().__init__(())\n\n def evalf(self):\n return self.value\n\n @property\n def _node_details(self):\n s = repr(self.value)\n if '\\n' in s:\n s = s.split('\\n', 1)[0] + '...'\n if len(s) > 20:\n s = s[:17] + '...'\n return s\n\nclass Tuple(Evaluable):\n\n __slots__ = 'items'\n\n @types.apply_annotations\n def __init__(self, items: types.tuple[strictevaluable]):\n self.items = items\n super().__init__(items)\n\n def evalf(self, *items):\n return items\n\n def __iter__(self):\n 'iterate'\n\n return iter(self.items)\n\n def __len__(self):\n 'length'\n\n return len(self.items)\n\n def __getitem__(self, item):\n 'get item'\n\n return self.items[item]\n\n def __add__(self, other):\n 'add'\n\n return Tuple(self.items + tuple(other))\n\n def __radd__(self, other):\n 'add'\n\n return Tuple(tuple(other) + self.items)\n\nclass SparseArray(Evaluable):\n 'sparse array'\n\n @types.apply_annotations\n def __init__(self, chunks:types.tuple[asarrays], shape:asarrays, dtype:asdtype):\n self._shape = shape\n self._dtype = dtype\n super().__init__(args=[Tuple(shape), *map(Tuple, chunks)])\n\n def evalf(self, shape, *chunks):\n length = builtins.sum(values.size for *indices, values in chunks)\n data = numpy.empty((length,), dtype=sparse.dtype(tuple(map(int, shape)), self._dtype))\n start = 0\n for *indices, values in chunks:\n stop = start + values.size\n d = data[start:stop].reshape(values.shape)\n d['value'] = values\n for idim, ii in enumerate(indices):\n d['index']['i'+str(idim)] = ii\n start = stop\n return data\n\n# ARRAYFUNC\n#\n# The main evaluable. Closely mimics a numpy array.\n\ndef add(a, b):\n a, b = _numpy_align(a, b)\n return Add([a, b])\n\ndef multiply(a, b):\n a, b = _numpy_align(a, b)\n return Multiply([a, b])\n\ndef sum(arg, axis=None):\n '''Sum array elements over a given axis.'''\n\n if axis is None:\n return Sum(arg)\n axes = (axis,) if numeric.isint(axis) else axis\n summed = Transpose.to_end(arg, *axes)\n for i in range(len(axes)):\n summed = Sum(summed)\n return summed\n\ndef product(arg, axis):\n return Product(Transpose.to_end(arg, axis))\n\ndef power(arg, n):\n arg, n = _numpy_align(arg, n)\n return Power(arg, n)\n\ndef dot(a, b, axes):\n '''\n Contract ``a`` and ``b`` along ``axes``.\n '''\n\n return multiply(a, b).sum(axes)\n\ndef transpose(arg, trans=None):\n arg = asarray(arg)\n if trans is None:\n normtrans = range(arg.ndim-1, -1, -1)\n else:\n normtrans = _normdims(arg.ndim, trans)\n assert sorted(normtrans) == list(range(arg.ndim))\n return Transpose(arg, normtrans)\n\ndef swapaxes(arg, axis1, axis2):\n arg = asarray(arg)\n trans = numpy.arange(arg.ndim)\n trans[axis1], trans[axis2] = trans[axis2], trans[axis1]\n return transpose(arg, trans)\n\ndef align(arg, where, shape):\n '''Align array to target shape.\n\n The align operation can be considered the opposite of transpose: instead of\n specifying for each axis of the return value the original position in the\n argument, align specifies for each axis of the argument the new position in\n the return value. In addition, the return value may be of higher dimension,\n with new axes being inserted according to the ``shape`` argument.\n\n Args\n ----\n arg : :class:`Array`\n Original array.\n where : :class:`tuple` of integers\n New axis positions.\n shape : :class:`tuple`\n Shape of the aligned array.\n\n Returns\n -------\n :class:`Array`\n The aligned array.\n '''\n\n where = list(where)\n for i, length in enumerate(shape):\n if i not in where:\n arg = InsertAxis(arg, length)\n where.append(i)\n if where != list(range(len(shape))):\n arg = Transpose(arg, numpy.argsort(where))\n assert equalshape(arg.shape, shape)\n return arg\n\ndef unalign(*args):\n '''Remove (joint) inserted axes.\n\n Given one or more equally shaped array arguments, return the shortest common\n axis vector along with function arguments such that the original arrays can\n be recovered by :func:`align`.\n '''\n\n assert args\n if len(args) == 1:\n return args[0]._unaligned\n if any(arg.ndim != args[0].ndim for arg in args[1:]):\n raise ValueError('varying dimensions in unalign')\n nonins = functools.reduce(operator.or_, [set(arg._unaligned[1]) for arg in args])\n if len(nonins) == args[0].ndim:\n return (*args, tuple(range(args[0].ndim)))\n ret = []\n for arg in args:\n unaligned, where = arg._unaligned\n for i in sorted(nonins - set(where)):\n unaligned = InsertAxis(unaligned, args[0].shape[i])\n where += i,\n if not ret: # first argument\n commonwhere = where\n elif where != commonwhere:\n unaligned = Transpose(unaligned, map(where.index, commonwhere))\n ret.append(unaligned)\n return (*ret, commonwhere)\n\n# ARRAYS\n\n_ArrayMeta = type(Evaluable)\n\nif debug_flags.sparse:\n def _chunked_assparse_checker(orig):\n assert isinstance(orig, property)\n @property\n def _assparse(self):\n chunks = orig.fget(self)\n assert isinstance(chunks, tuple)\n assert all(isinstance(chunk, tuple) for chunk in chunks)\n assert all(all(isinstance(item, Array) for item in chunk) for chunk in chunks)\n if self.ndim:\n for *indices, values in chunks:\n assert len(indices) == self.ndim\n assert all(idx.dtype == int for idx in indices)\n assert all(equalshape(idx.shape, values.shape) for idx in indices)\n elif chunks:\n assert len(chunks) == 1\n chunk, = chunks\n assert len(chunk) == 1\n values, = chunk\n assert values.shape == ()\n return chunks\n return _assparse\n\n class _ArrayMeta(_ArrayMeta):\n def __new__(mcls, name, bases, namespace):\n if '_assparse' in namespace:\n namespace['_assparse'] = _chunked_assparse_checker(namespace['_assparse'])\n return super().__new__(mcls, name, bases, namespace)\n\nif debug_flags.evalf:\n class _evalf_checker:\n def __init__(self, orig):\n self.evalf_obj = getattr(orig, '__get__', lambda *args: orig)\n def __get__(self, instance, owner):\n evalf = self.evalf_obj(instance, owner)\n @functools.wraps(evalf)\n def evalf_with_check(*args, **kwargs):\n res = evalf(*args, **kwargs)\n assert not hasattr(instance, 'dtype') or asdtype(res.dtype) == instance.dtype, ((instance.dtype, res.dtype), instance, res)\n assert not hasattr(instance, 'ndim') or res.ndim == instance.ndim\n assert not hasattr(instance, 'shape') or all(m == n for m, n in zip(res.shape, instance.shape) if isinstance(n, int)), 'shape mismatch'\n return res\n return evalf_with_check\n\n class _ArrayMeta(_ArrayMeta):\n def __new__(mcls, name, bases, namespace):\n if 'evalf' in namespace:\n namespace['evalf'] = _evalf_checker(namespace['evalf'])\n return super().__new__(mcls, name, bases, namespace)\n\nclass AsEvaluableArray(Protocol):\n 'Protocol for conversion into an :class:`Array`.'\n\n @property\n def as_evaluable_array(self) -> 'Array':\n 'Lower this object to a :class:`nutils.evaluable.Array`.'\n\nclass Array(Evaluable, metaclass=_ArrayMeta):\n '''\n Base class for array valued functions.\n\n Attributes\n ----------\n shape : :class:`tuple` of :class:`int`\\\\s\n The shape of this array function.\n ndim : :class:`int`\n The number of dimensions of this array array function. Equal to\n ``len(shape)``.\n dtype : :class:`int`, :class:`float`\n The dtype of the array elements.\n '''\n\n __slots__ = 'shape', 'dtype', '__index'\n __cache__ = 'assparse', '_assparse', '_intbounds'\n\n __array_priority__ = 1. # http://stackoverflow.com/questions/7042496/numpy-coercion-problem-for-left-sided-binary-operator/7057530#7057530\n\n @types.apply_annotations\n def __init__(self, args:types.tuple[strictevaluable], shape:asshape, dtype:asdtype):\n self.shape = shape\n self.dtype = dtype\n super().__init__(args=args)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n def __getitem__(self, item):\n if not isinstance(item, tuple):\n item = item,\n if ... in item:\n iell = item.index(...)\n if ... in item[iell+1:]:\n raise IndexError('an index can have only a single ellipsis')\n # replace ellipsis by the appropriate number of slice(None)\n item = item[:iell] + (slice(None),)*(self.ndim-len(item)+1) + item[iell+1:]\n if len(item) > self.ndim:\n raise IndexError('too many indices for array')\n array = self\n for axis, it in reversed(tuple(enumerate(item))):\n array = get(array, axis, item=it) if numeric.isint(it) \\\n else _takeslice(array, it, axis) if isinstance(it, slice) \\\n else take(array, it, axis)\n return array\n\n def __bool__(self):\n return True\n\n def __len__(self):\n if self.ndim == 0:\n raise TypeError('len() of unsized object')\n return self.shape[0]\n\n def __index__(self):\n try:\n index = self.__index\n except AttributeError:\n if self.ndim or self.dtype not in (int, bool) or not self.isconstant:\n raise TypeError('cannot convert {!r} to int'.format(self))\n index = self.__index = int(self.simplified.eval())\n return index\n\n size = property(lambda self: util.product(self.shape) if self.ndim else 1)\n T = property(lambda self: transpose(self))\n\n __add__ = __radd__ = add\n __sub__ = lambda self, other: subtract(self, other)\n __rsub__ = lambda self, other: subtract(other, self)\n __mul__ = __rmul__ = multiply\n __truediv__ = lambda self, other: divide(self, other)\n __rtruediv__ = lambda self, other: divide(other, self)\n __pos__ = lambda self: self\n __neg__ = lambda self: negative(self)\n __pow__ = power\n __abs__ = lambda self: abs(self)\n __mod__ = lambda self, other: mod(self, other)\n __int__ = __index__\n __str__ = __repr__ = lambda self: '{}.{}<{}>'.format(type(self).__module__, type(self).__name__, self._shape_str(form=str))\n _shape_str = lambda self, form: '{}:{}'.format(self.dtype.__name__[0] if hasattr(self, 'dtype') else '?', ','.join(str(int(length)) if length.isconstant else '?' for length in self.shape) if hasattr(self, 'shape') else '?')\n\n sum = sum\n prod = product\n dot = dot\n swapaxes = swapaxes\n transpose = transpose\n choose = lambda self, choices: Choose(self, choices)\n\n @property\n def assparse(self):\n 'Convert to a :class:`SparseArray`.'\n\n return SparseArray(self.simplified._assparse, self.shape, self.dtype)\n\n @property\n def _assparse(self):\n # Convert to a sequence of sparse COO arrays. The returned data is a tuple\n # of `(*indices, values)` tuples, where `values` is an `Array` with the\n # same dtype as `self`, but this is not enforced yet, and each index in\n # `indices` is an `Array` with dtype `int` and the exact same shape as\n # `values`. The length of `indices` equals `self.ndim`. In addition, if\n # `self` is 0d the length of `self._assparse` is at most one and the\n # `values` array must be 0d as well.\n #\n # The sparse data can be reassembled after evaluation by\n #\n # dense = numpy.zeros(self.shape)\n # for I0,...,Ik,V in self._assparse:\n # for i0,...,ik,v in zip(I0.eval().ravel(),...,Ik.eval().ravel(),V.eval().ravel()):\n # dense[i0,...,ik] = v\n\n indices = [prependaxes(appendaxes(Range(length), self.shape[i+1:]), self.shape[:i]) for i, length in enumerate(self.shape)]\n return (*indices, self),\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n args = tuple(arg._node(cache, subgraph, times) for arg in self._Evaluable__args)\n bounds = '[{},{}]'.format(*self._intbounds) if self.dtype == int else None\n label = '\\n'.join(filter(None, (type(self).__name__, self._node_details, self._shape_str(form=repr), bounds)))\n cache[self] = node = RegularNode(label, args, {}, (type(self).__name__, times[self]), subgraph)\n return node\n\n # simplifications\n _multiply = lambda self, other: None\n _transpose = lambda self, axes: None\n _insertaxis = lambda self, axis, length: None\n _power = lambda self, n: None\n _add = lambda self, other: None\n _sum = lambda self, axis: None\n _take = lambda self, index, axis: None\n _rtake = lambda self, index, axis: None\n _determinant = lambda self, axis1, axis2: None\n _inverse = lambda self, axis1, axis2: None\n _takediag = lambda self, axis1, axis2: None\n _diagonalize = lambda self, axis: None\n _product = lambda self: None\n _sign = lambda self: None\n _eig = lambda self, symmetric: None\n _inflate = lambda self, dofmap, length, axis: None\n _rinflate = lambda self, func, length, axis: None\n _unravel = lambda self, axis, shape: None\n _ravel = lambda self, axis: None\n _loopsum = lambda self, loop_index: None # NOTE: type of `loop_index` is `_LoopIndex`\n\n @property\n def _unaligned(self):\n return self, tuple(range(self.ndim))\n\n _diagonals = ()\n _inflations = ()\n\n def _derivative(self, var, seen):\n if self.dtype in (bool, int) or var not in self.dependencies:\n return Zeros(self.shape + var.shape, dtype=self.dtype)\n raise NotImplementedError('derivative not defined for {}'.format(self.__class__.__name__))\n\n @property\n def as_evaluable_array(self):\n 'return self'\n\n return self\n\n @property\n def _intbounds(self):\n # inclusive lower and upper bounds\n if self.ndim == 0 and self.dtype == int and self.isconstant:\n value = self.__index__()\n return value, value\n else:\n lower, upper = self._intbounds_impl()\n assert isinstance(lower, int) or lower == float('-inf') or lower == float('inf')\n assert isinstance(upper, int) or upper == float('-inf') or upper == float('inf')\n assert lower <= upper\n return lower, upper\n\n def _intbounds_impl(self):\n return float('-inf'), float('inf')\n\nclass NPoints(Array):\n 'The length of the points axis.'\n\n __slots__ = ()\n\n def __init__(self):\n super().__init__(args=[EVALARGS], shape=(), dtype=int)\n\n def evalf(self, evalargs):\n points = evalargs['_points'].coords\n return types.frozenarray(points.shape[0])\n\n def _intbounds_impl(self):\n return 0, float('inf')\n\nclass Points(Array):\n\n __slots__ = ()\n\n def __init__(self, npoints, ndim):\n super().__init__(args=[EVALARGS], shape=(npoints, ndim), dtype=float)\n\n def evalf(self, evalargs):\n return evalargs['_points'].coords\n\nclass Weights(Array):\n\n __slots__ = ()\n\n def __init__(self, npoints):\n super().__init__(args=[EVALARGS], shape=(npoints,), dtype=float)\n\n def evalf(self, evalargs):\n weights = evalargs['_points'].weights\n assert numeric.isarray(weights) and weights.ndim == 1\n return weights\n\nclass Normal(Array):\n 'normal'\n\n __slots__ = 'lgrad',\n\n @types.apply_annotations\n def __init__(self, lgrad:asarray):\n assert lgrad.ndim >= 2 and equalindex(lgrad.shape[-2], lgrad.shape[-1])\n self.lgrad = lgrad\n super().__init__(args=[lgrad], shape=lgrad.shape[:-1], dtype=float)\n\n def _simplified(self):\n if equalindex(self.shape[-1], 1):\n return Sign(Take(self.lgrad, 0))\n unaligned, where = unalign(self.lgrad)\n for axis in self.ndim - 1, self.ndim:\n if axis not in where:\n unaligned = InsertAxis(unaligned, self.lgrad.shape[axis])\n where += axis,\n if len(where) < self.ndim + 1:\n if where[-2:] != (self.ndim - 1, self.ndim):\n unaligned = Transpose(unaligned, numpy.argsort(where))\n where = tuple(sorted(where))\n return align(Normal(unaligned), where[:-1], self.shape)\n\n def evalf(self, lgrad):\n n = lgrad[...,-1]\n # orthonormalize n to G\n G = lgrad[...,:-1]\n GG = numpy.einsum('...ki,...kj->...ij', G, G)\n v1 = numpy.einsum('...ij,...i->...j', G, n)\n v2 = numpy.linalg.solve(GG, v1)\n v3 = numpy.einsum('...ij,...j->...i', G, v2)\n return numeric.normalize(n - v3)\n\n def _derivative(self, var, seen):\n if equalindex(self.shape[-1], 1):\n return zeros(self.shape + var.shape)\n G = self.lgrad[...,:-1]\n invGG = inverse(einsum('Aki,Akj->Aij', G, G))\n return -einsum('Ail,Alj,Ak,AkjB->AiB', G, invGG, self, derivative(G, var, seen))\n\nclass Constant(Array):\n\n __slots__ = 'value',\n __cache__ = '_isunit'\n\n @types.apply_annotations\n def __init__(self, value:types.arraydata):\n self.value = numpy.asarray(value)\n super().__init__(args=[], shape=value.shape, dtype=value.dtype)\n\n def _simplified(self):\n if not self.value.any():\n return zeros_like(self)\n for i, sh in enumerate(self.shape):\n # Find and replace invariant axes with InsertAxis. Since `self.value.any()`\n # is False for arrays with a zero-length axis, we can arrive here only if all\n # axes have at least length one, hence the following statement should work.\n first, *others = numpy.rollaxis(self.value, i)\n if all(numpy.equal(first, other).all() for other in others):\n return insertaxis(Constant(first), i, sh)\n\n def evalf(self):\n return self.value\n\n def _node(self, cache, subgraph, times):\n if self.ndim:\n return super()._node(cache, subgraph, times)\n elif self in cache:\n return cache[self]\n else:\n label = '{}'.format(self.value[()])\n if len(label) > 9:\n label = '~{:.2e}'.format(self.value[()])\n cache[self] = node = DuplicatedLeafNode(label, (type(self).__name__, times[self]))\n return node\n\n @property\n def _isunit(self):\n return numpy.equal(self.value, 1).all()\n\n def _transpose(self, axes):\n return Constant(self.value.transpose(axes))\n\n def _sum(self, axis):\n return Constant(numpy.sum(self.value, axis))\n\n def _add(self, other):\n if isinstance(other, Constant):\n return Constant(numpy.add(self.value, other.value))\n\n def _inverse(self, axis1, axis2):\n value = numpy.transpose(self.value, tuple(i for i in range(self.ndim) if i != axis1 and i != axis2) + (axis1, axis2))\n return Constant(numpy.linalg.inv(value))\n\n def _product(self):\n return Constant(self.value.prod(-1))\n\n def _multiply(self, other):\n if self._isunit:\n return other\n if isinstance(other, Constant):\n return Constant(numpy.multiply(self.value, other.value))\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n return Constant(numpy.einsum('...kk->...k', numpy.transpose(self.value,\n list(range(axis1)) + list(range(axis1+1, axis2)) + list(range(axis2+1, self.ndim)) + [axis1, axis2])))\n\n def _take(self, index, axis):\n if index.isconstant:\n index_ = index.eval()\n return Constant(self.value.take(index_, axis))\n\n def _power(self, n):\n if isinstance(n, Constant):\n return Constant(numpy.power(self.value, n.value))\n\n def _eig(self, symmetric):\n eigval, eigvec = (numpy.linalg.eigh if symmetric else numpy.linalg.eig)(self.value)\n return Tuple((Constant(eigval), Constant(eigvec)))\n\n def _sign(self):\n return Constant(numpy.sign(self.value))\n\n def _unravel(self, axis, shape):\n shape = self.value.shape[:axis] + shape + self.value.shape[axis+1:]\n return Constant(self.value.reshape(shape))\n\n def _determinant(self, axis1, axis2):\n value = numpy.transpose(self.value, tuple(i for i in range(self.ndim) if i != axis1 and i != axis2) + (axis1, axis2))\n return Constant(numpy.linalg.det(value))\n\n def _intbounds_impl(self):\n if self.dtype == int and self.value.size:\n return int(self.value.min()), int(self.value.max())\n else:\n return super()._intbounds_impl()\n\nclass InsertAxis(Array):\n\n __slots__ = 'func', 'length'\n __cache__ = '_unaligned', '_inflations'\n\n @types.apply_annotations\n def __init__(self, func:asarray, length:asindex):\n self.func = func\n self.length = length\n super().__init__(args=[func, length], shape=(*func.shape, length), dtype=func.dtype)\n\n @property\n def _diagonals(self):\n return self.func._diagonals\n\n @property\n def _inflations(self):\n return tuple((axis, types.frozendict((dofmap, InsertAxis(func, self.length)) for dofmap, func in parts.items())) for axis, parts in self.func._inflations)\n\n @property\n def _unaligned(self):\n return self.func._unaligned\n\n def _simplified(self):\n return self.func._insertaxis(self.ndim-1, self.length)\n\n def evalf(self, func, length):\n if length == 1:\n return func[...,numpy.newaxis]\n try:\n return numpy.ndarray(buffer=func, dtype=func.dtype, shape=(*func.shape, length), strides=(*func.strides, 0))\n except ValueError: # non-contiguous data\n return numpy.repeat(func[...,numpy.newaxis], length, -1)\n\n def _derivative(self, var, seen):\n return insertaxis(derivative(self.func, var, seen), self.ndim-1, self.length)\n\n def _sum(self, i):\n if i == self.ndim - 1:\n return self.func * self.length\n return InsertAxis(sum(self.func, i), self.length)\n\n def _product(self):\n return self.func**self.length\n\n def _power(self, n):\n unaligned1, unaligned2, where = unalign(self, n)\n if len(where) != self.ndim:\n return align(unaligned1 ** unaligned2, where, self.shape)\n\n def _add(self, other):\n unaligned1, unaligned2, where = unalign(self, other)\n if len(where) != self.ndim:\n return align(unaligned1 + unaligned2, where, self.shape)\n\n def _diagonalize(self, axis):\n if axis < self.ndim - 1:\n return insertaxis(diagonalize(self.func, axis, self.ndim - 1), self.ndim - 1, self.length)\n\n def _inflate(self, dofmap, length, axis):\n if axis + dofmap.ndim < self.ndim:\n return InsertAxis(_inflate(self.func, dofmap, length, axis), self.length)\n elif axis == self.ndim:\n return insertaxis(Inflate(self.func, dofmap, length), self.ndim - 1, self.length)\n\n def _insertaxis(self, axis, length):\n if axis == self.ndim - 1:\n return InsertAxis(InsertAxis(self.func, length), self.length)\n\n def _take(self, index, axis):\n if axis == self.ndim - 1:\n return appendaxes(self.func, index.shape)\n return InsertAxis(_take(self.func, index, axis), self.length)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 == self.ndim-1:\n return Transpose.to_end(self.func, axis1)\n else:\n return insertaxis(_takediag(self.func, axis1, axis2), self.ndim-3, self.length)\n\n def _unravel(self, axis, shape):\n if axis == self.ndim - 1:\n return InsertAxis(InsertAxis(self.func, shape[0]), shape[1])\n else:\n return InsertAxis(unravel(self.func, axis, shape), self.length)\n\n def _sign(self):\n return InsertAxis(Sign(self.func), self.length)\n\n def _determinant(self, axis1, axis2):\n if axis1 < self.ndim-1 and axis2 < self.ndim-1:\n return InsertAxis(determinant(self.func, (axis1, axis2)), self.length)\n\n def _inverse(self, axis1, axis2):\n if axis1 < self.ndim-1 and axis2 < self.ndim-1:\n return InsertAxis(inverse(self.func, (axis1, axis2)), self.length)\n\n def _loopsum(self, index):\n return InsertAxis(loop_sum(self.func, index), self.length)\n\n @property\n def _assparse(self):\n return tuple((*(InsertAxis(idx, self.length) for idx in indices), prependaxes(Range(self.length), values.shape), InsertAxis(values, self.length)) for *indices, values in self.func._assparse)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Transpose(Array):\n\n __slots__ = 'func', 'axes'\n __cache__ = '_invaxes', '_unaligned', '_diagonals', '_inflations'\n\n @classmethod\n @types.apply_annotations\n def _end(cls, array:asarray, axes, invert=False):\n axes = [numeric.normdim(array.ndim, axis) for axis in axes]\n if all(a == b for a, b in enumerate(axes, start=array.ndim-len(axes))):\n return array\n trans = [i for i in range(array.ndim) if i not in axes]\n trans.extend(axes)\n if len(trans) != array.ndim:\n raise Exception('duplicate axes')\n return cls(array, numpy.argsort(trans) if invert else trans)\n\n @classmethod\n def from_end(cls, array, *axes):\n return cls._end(array, axes, invert=True)\n\n @classmethod\n def to_end(cls, array, *axes):\n return cls._end(array, axes, invert=False)\n\n @types.apply_annotations\n def __init__(self, func:asarray, axes:types.tuple[types.strictint]):\n assert sorted(axes) == list(range(func.ndim))\n self.func = func\n self.axes = axes\n super().__init__(args=[func], shape=[func.shape[n] for n in axes], dtype=func.dtype)\n\n @property\n def _diagonals(self):\n return tuple(frozenset(self._invaxes[i] for i in axes) for axes in self.func._diagonals)\n\n @property\n def _inflations(self):\n return tuple((self._invaxes[axis], types.frozendict((dofmap, Transpose(func, self._axes_for(dofmap.ndim, self._invaxes[axis]))) for dofmap, func in parts.items())) for axis, parts in self.func._inflations)\n\n @property\n def _unaligned(self):\n unaligned, where = unalign(self.func)\n return unaligned, tuple(self._invaxes[i] for i in where)\n\n @property\n def _invaxes(self):\n return tuple(numpy.argsort(self.axes))\n\n def _simplified(self):\n if self.axes == tuple(range(self.ndim)):\n return self.func\n return self.func._transpose(self.axes)\n\n def evalf(self, arr):\n return arr.transpose(self.axes)\n\n @property\n def _node_details(self):\n return ','.join(map(str, self.axes))\n\n def _transpose(self, axes):\n if axes == self._invaxes:\n # NOTE: While we could leave this particular simplification to be dealt\n # with by Transpose, the benefit of handling it directly is that _add and\n # _multiply can rely on _transpose for the right hand side without having\n # to separately account for the trivial case.\n return self.func\n newaxes = [self.axes[i] for i in axes]\n return Transpose(self.func, newaxes)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n orig1, orig2 = sorted(self.axes[axis] for axis in [axis1, axis2])\n if orig1 == self.ndim-2:\n return Transpose(TakeDiag(self.func), (*self.axes[:axis1], *self.axes[axis1+1:axis2], *self.axes[axis2+1:], self.ndim-2))\n trytakediag = self.func._takediag(orig1, orig2)\n if trytakediag is not None:\n return Transpose(trytakediag, [ax-(ax>orig1)-(ax>orig2) for ax in self.axes[:axis1] + self.axes[axis1+1:axis2] + self.axes[axis2+1:]] + [self.ndim-2])\n\n def _sum(self, i):\n axis = self.axes[i]\n trysum = self.func._sum(axis)\n if trysum is not None:\n axes = [ax-(ax>axis) for ax in self.axes if ax != axis]\n return Transpose(trysum, axes)\n if axis == self.ndim - 1:\n return Transpose(Sum(self.func), self._axes_for(0, i))\n\n def _derivative(self, var, seen):\n return transpose(derivative(self.func, var, seen), self.axes+tuple(range(self.ndim, self.ndim+var.ndim)))\n\n def _multiply(self, other):\n other_trans = other._transpose(self._invaxes)\n if other_trans is not None and not isinstance(other_trans, Transpose):\n # The second clause is to avoid infinite recursions; see\n # tests.test_evaluable.simplify.test_multiply_transpose.\n return Transpose(Multiply([self.func, other_trans]), self.axes)\n trymultiply = self.func._multiply(Transpose(other, self._invaxes))\n if trymultiply is not None:\n return Transpose(trymultiply, self.axes)\n\n def _add(self, other):\n other_trans = other._transpose(self._invaxes)\n if other_trans is not None and not isinstance(other_trans, Transpose):\n # The second clause is to avoid infinite recursions\n return Transpose(self.func + other_trans, self.axes)\n tryadd = self.func._add(Transpose(other, self._invaxes))\n if tryadd is not None:\n return Transpose(tryadd, self.axes)\n\n def _take(self, indices, axis):\n trytake = self.func._take(indices, self.axes[axis])\n if trytake is not None:\n return Transpose(trytake, self._axes_for(indices.ndim, axis))\n if self.axes[axis] == self.ndim - 1:\n return Transpose(Take(self.func, indices), self._axes_for(indices.ndim, axis))\n\n def _axes_for(self, ndim, axis):\n funcaxis = self.axes[axis]\n axes = [ax+(ax>funcaxis)*(ndim-1) for ax in self.axes if ax != funcaxis]\n axes[axis:axis] = range(funcaxis, funcaxis + ndim)\n return axes\n\n def _power(self, n):\n n_trans = Transpose(n, self._invaxes)\n return Transpose(Power(self.func, n_trans), self.axes)\n\n def _sign(self):\n return Transpose(Sign(self.func), self.axes)\n\n def _unravel(self, axis, shape):\n orig_axis = self.axes[axis]\n tryunravel = self.func._unravel(orig_axis, shape)\n if tryunravel is not None:\n axes = [ax + (ax>orig_axis) for ax in self.axes]\n axes.insert(axis+1, orig_axis+1)\n return Transpose(tryunravel, axes)\n\n def _product(self):\n if self.axes[-1] == self.ndim-1:\n return Transpose(Product(self.func), self.axes[:-1])\n\n def _determinant(self, axis1, axis2):\n orig1, orig2 = self.axes[axis1], self.axes[axis2]\n trydet = self.func._determinant(orig1, orig2)\n if trydet:\n axes = [ax-(ax>orig1)-(ax>orig2) for ax in self.axes if ax != orig1 and ax != orig2]\n return Transpose(trydet, axes)\n\n def _inverse(self, axis1, axis2):\n tryinv = self.func._inverse(self.axes[axis1], self.axes[axis2])\n if tryinv:\n return Transpose(tryinv, self.axes)\n\n def _ravel(self, axis):\n if self.axes[axis] == self.ndim-2 and self.axes[axis+1] == self.ndim-1:\n return Transpose(Ravel(self.func), self.axes[:-1])\n\n def _inflate(self, dofmap, length, axis):\n i = self.axes[axis] if dofmap.ndim else self.func.ndim\n if self.axes[axis:axis+dofmap.ndim] == tuple(range(i,i+dofmap.ndim)):\n tryinflate = self.func._inflate(dofmap, length, i)\n if tryinflate is not None:\n axes = [ax-(ax>i)*(dofmap.ndim-1) for ax in self.axes]\n axes[axis:axis+dofmap.ndim] = i,\n return Transpose(tryinflate, axes)\n\n def _diagonalize(self, axis):\n trydiagonalize = self.func._diagonalize(self.axes[axis])\n if trydiagonalize is not None:\n return Transpose(trydiagonalize, self.axes + (self.ndim,))\n\n def _insertaxis(self, axis, length):\n return Transpose(InsertAxis(self.func, length), self.axes[:axis] + (self.ndim,) + self.axes[axis:])\n\n def _loopsum(self, index):\n return Transpose(loop_sum(self.func, index), self.axes)\n\n @property\n def _assparse(self):\n return tuple((*(indices[i] for i in self.axes), values) for *indices, values in self.func._assparse)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Product(Array):\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert func.dtype != bool, 'Product({})'.format(func)\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-1], dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.func.shape[-1], 1):\n return get(self.func, self.ndim, 0)\n return self.func._product()\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+1\n return numpy.product(arr, axis=-1)\n\n def _derivative(self, var, seen):\n grad = derivative(self.func, var, seen)\n funcs = Product(insertaxis(self.func, -2, self.func.shape[-1]) + Diagonalize(1 - self.func)) # replace diagonal entries by 1\n return einsum('Ai,AiB->AB', funcs, grad)\n\n def _take(self, indices, axis):\n return Product(_take(self.func, indices, axis))\n\n def _takediag(self, axis1, axis2):\n return product(_takediag(self.func, axis1, axis2), self.ndim-2)\n\nclass Inverse(Array):\n '''\n Matrix inverse of ``func`` over the last two axes. All other axes are\n treated element-wise.\n '''\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert func.ndim >= 2 and equalindex(func.shape[-1], func.shape[-2])\n self.func = func\n super().__init__(args=[func], shape=func.shape, dtype=complex if func.dtype == complex else float)\n\n def _simplified(self):\n result = self.func._inverse(self.ndim-2, self.ndim-1)\n if result is not None:\n return result\n if equalindex(self.func.shape[-1], 1):\n return reciprocal(self.func)\n\n def evalf(self, arr):\n return numeric.inv(arr)\n\n def _derivative(self, var, seen):\n return -einsum('Aij,AjkB,Akl->AilB', self, derivative(self.func, var, seen), self)\n\n def _eig(self, symmetric):\n eigval, eigvec = Eig(self.func, symmetric)\n return Tuple((reciprocal(eigval), eigvec))\n\n def _determinant(self, axis1, axis2):\n if sorted([axis1, axis2]) == [self.ndim-2, self.ndim-1]:\n return reciprocal(Determinant(self.func))\n\n def _take(self, indices, axis):\n if axis < self.ndim - 2:\n return Inverse(_take(self.func, indices, axis))\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 < self.ndim-2:\n return inverse(_takediag(self.func, axis1, axis2), (self.ndim-4, self.ndim-3))\n\n def _unravel(self, axis, shape):\n if axis < self.ndim-2:\n return Inverse(unravel(self.func, axis, shape))\n\nclass Interpolate(Array):\n 'interpolate uniformly spaced data; stepwise for now'\n\n __slots__ = 'xp', 'fp', 'left', 'right'\n\n @types.apply_annotations\n def __init__(self, x:asarray, xp:types.arraydata, fp:types.arraydata, left:types.strictfloat=None, right:types.strictfloat=None):\n xp = numpy.asarray(xp)\n fp = numpy.asarray(fp)\n assert xp.ndim == fp.ndim == 1\n if not numpy.greater(numpy.diff(xp), 0).all():\n warnings.warn('supplied x-values are non-increasing')\n assert x.ndim == 0\n self.xp = xp\n self.fp = fp\n self.left = left\n self.right = right\n super().__init__(args=[x], shape=(), dtype=float)\n\n def evalf(self, x):\n return numpy.interp(x, self.xp, self.fp, self.left, self.right)\n\nclass Determinant(Array):\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert isarray(func) and func.ndim >= 2 and equalindex(func.shape[-1], func.shape[-2])\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-2], dtype=complex if func.dtype == complex else float)\n\n def _simplified(self):\n result = self.func._determinant(self.ndim, self.ndim+1)\n if result is not None:\n return result\n if equalindex(self.func.shape[-1], 1):\n return Take(Take(self.func, zeros((), int)), zeros((), int))\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+2\n return numpy.linalg.det(arr)\n\n def _derivative(self, var, seen):\n return einsum('A,Aji,AijB->AB', self, inverse(self.func), derivative(self.func, var, seen))\n\n def _take(self, index, axis):\n return Determinant(_take(self.func, index, axis))\n\n def _takediag(self, axis1, axis2):\n return determinant(_takediag(self.func, axis1, axis2), (self.ndim-2, self.ndim-1))\n\nclass Multiply(Array):\n\n __slots__ = 'funcs',\n\n @types.apply_annotations\n def __init__(self, funcs:types.frozenmultiset[asarray]):\n self.funcs = funcs\n func1, func2 = funcs\n assert equalshape(func1.shape, func2.shape) and func1.dtype == func2.dtype != bool, 'Multiply({}, {})'.format(func1, func2)\n super().__init__(args=self.funcs, shape=func1.shape, dtype=func1.dtype)\n\n def _simplified(self):\n func1, func2 = self.funcs\n if isuniform(func1, 1):\n return func2\n if isuniform(func2, 1):\n return func1\n unaligned1, unaligned2, where = unalign(func1, func2)\n if len(where) != self.ndim:\n return align(unaligned1 * unaligned2, where, self.shape)\n for axis1, axis2, *other in map(sorted, func1._diagonals or func2._diagonals):\n return diagonalize(Multiply(takediag(func, axis1, axis2) for func in self.funcs), axis1, axis2)\n for i, parts in func1._inflations:\n return util.sum(_inflate(f * _take(func2, dofmap, i), dofmap, self.shape[i], i) for dofmap, f in parts.items())\n for i, parts in func2._inflations:\n return util.sum(_inflate(_take(func1, dofmap, i) * f, dofmap, self.shape[i], i) for dofmap, f in parts.items())\n return func1._multiply(func2) or func2._multiply(func1)\n\n def _optimized_for_numpy(self):\n func1, func2 = self.funcs\n if isuniform(func1, -1) and func2.dtype != bool:\n return Negative(func2)\n if isuniform(func2, -1) and func1.dtype != bool:\n return Negative(func1)\n if func1 == sign(func2):\n return Absolute(func2)\n if func2 == sign(func1):\n return Absolute(func1)\n if not self.ndim:\n return\n unaligned1, where1 = unalign(func1)\n unaligned2, where2 = unalign(func2)\n return Einsum((unaligned1, unaligned2), (where1, where2), tuple(range(self.ndim)))\n\n def evalf(self, arr1, arr2):\n return arr1 * arr2\n\n def _sum(self, axis):\n func1, func2 = self.funcs\n unaligned, where = unalign(func1)\n if axis not in where:\n return align(unaligned, [i-(i>axis) for i in where], self.shape[:axis]+self.shape[axis+1:]) * sum(func2, axis)\n unaligned, where = unalign(func2)\n if axis not in where:\n return sum(func1, axis) * align(unaligned, [i-(i>axis) for i in where], self.shape[:axis]+self.shape[axis+1:])\n\n def _add(self, other):\n func1, func2 = self.funcs\n if isinstance(other, Multiply):\n for common in self.funcs & other.funcs:\n return common * Add(self.funcs + other.funcs - [common, common])\n\n def _determinant(self, axis1, axis2):\n func1, func2 = self.funcs\n axis1, axis2 = sorted([axis1, axis2])\n if equalindex(self.shape[axis1], 1) and equalindex(self.shape[axis2], 1):\n return Multiply([determinant(func1, (axis1, axis2)), determinant(func2, (axis1, axis2))])\n unaligned1, where1 = unalign(func1)\n if {axis1, axis2}.isdisjoint(where1):\n d2 = determinant(func2, (axis1, axis2))\n d1 = align(unaligned1**self.shape[axis1], [i-(i>axis1)-(i>axis2) for i in where1 if i not in (axis1, axis2)], d2.shape)\n return d1 * d2\n unaligned2, where2 = unalign(func2)\n if {axis1, axis2}.isdisjoint(where2):\n d1 = determinant(func1, (axis1, axis2))\n d2 = align(unaligned2**self.shape[axis1], [i-(i>axis1)-(i>axis2) for i in where2 if i not in (axis1, axis2)], d1.shape)\n return d1 * d2\n\n def _product(self):\n func1, func2 = self.funcs\n return Multiply([Product(func1), Product(func2)])\n\n def _multiply(self, other):\n func1, func2 = self.funcs\n func1_other = func1._multiply(other)\n if func1_other is not None:\n return Multiply([func1_other, func2])\n func2_other = func2._multiply(other)\n if func2_other is not None:\n return Multiply([func1, func2_other])\n # Reorder the multiplications such that the amount of flops is minimized.\n # The flops are counted based on the lower int bounds of the shape and loop\n # lengths, excluding common inserted axes and invariant loops of the inner\n # product.\n sizes = []\n unaligned = tuple(map(unalign, (func1, func2, other)))\n for (f1, w1), (f2, w2) in itertools.combinations(unaligned, 2):\n lengths = [self.shape[i] for i in set(w1) | set(w2)]\n lengths += [arg.length for arg in f1.arguments | f2.arguments if isinstance(arg, _LoopIndex)]\n sizes.append(util.product((max(1, length._intbounds[0]) for length in lengths), 1))\n min_size = min(sizes)\n if sizes[0] == min_size:\n return # status quo\n elif sizes[1] == min_size:\n return (func1 * other) * func2\n elif sizes[2] == min_size:\n return (func2 * other) * func1\n\n def _derivative(self, var, seen):\n func1, func2 = self.funcs\n return einsum('A,AB->AB', func1, derivative(func2, var, seen)) \\\n + einsum('A,AB->AB', func2, derivative(func1, var, seen))\n\n def _takediag(self, axis1, axis2):\n func1, func2 = self.funcs\n return Multiply([_takediag(func1, axis1, axis2), _takediag(func2, axis1, axis2)])\n\n def _take(self, index, axis):\n func1, func2 = self.funcs\n return Multiply([_take(func1, index, axis), _take(func2, index, axis)])\n\n def _sign(self):\n return Multiply([Sign(func) for func in self.funcs])\n\n def _unravel(self, axis, shape):\n return Multiply([unravel(func, axis, shape) for func in self.funcs])\n\n def _inverse(self, axis1, axis2):\n func1, func2 = self.funcs\n if set(unalign(func1)[1]).isdisjoint((axis1, axis2)):\n return divide(inverse(func2, (axis1, axis2)), func1)\n if set(unalign(func2)[1]).isdisjoint((axis1, axis2)):\n return divide(inverse(func1, (axis1, axis2)), func2)\n\n @property\n def _assparse(self):\n func1, func2 = self.funcs\n uninserted1, where1 = unalign(func1)\n uninserted2, where2 = unalign(func2)\n if not set(where1) & set(where2):\n sparse = []\n for *indices1, values1 in uninserted1._assparse:\n for *indices2, values2 in uninserted2._assparse:\n indices = [None] * self.ndim\n for i, j in enumerate(where1):\n indices[j] = appendaxes(indices1[i], values2.shape)\n for i, j in enumerate(where2):\n indices[j] = prependaxes(indices2[i], values1.shape)\n assert all(indices)\n values = appendaxes(values1, values2.shape) * prependaxes(values2, values1.shape)\n sparse.append((*indices, values))\n return tuple(sparse)\n return super()._assparse\n\n def _intbounds_impl(self):\n func1, func2 = self.funcs\n extrema = [b1 and b2 and b1 * b2 for b1 in func1._intbounds for b2 in func2._intbounds]\n return min(extrema), max(extrema)\n\nclass Add(Array):\n\n __slots__ = 'funcs',\n __cache__ = '_inflations'\n\n @types.apply_annotations\n def __init__(self, funcs:types.frozenmultiset[asarray]):\n self.funcs = funcs\n func1, func2 = funcs\n assert equalshape(func1.shape, func2.shape) and func1.dtype == func2.dtype != bool, 'Add({}, {})'.format(func1, func2)\n super().__init__(args=self.funcs, shape=func1.shape, dtype=func1.dtype)\n\n @property\n def _inflations(self):\n func1, func2 = self.funcs\n func2_inflations = dict(func2._inflations)\n inflations = []\n for axis, parts1 in func1._inflations:\n if axis not in func2_inflations:\n continue\n parts2 = func2_inflations[axis]\n dofmaps = set(parts1) | set(parts2)\n if (len(parts1) < len(dofmaps) and len(parts2) < len(dofmaps) # neither set is a subset of the other; total may be dense\n and self.shape[axis].isconstant and all(dofmap.isconstant for dofmap in dofmaps)):\n mask = numpy.zeros(int(self.shape[axis]), dtype=bool)\n for dofmap in dofmaps:\n mask[dofmap.eval()] = True\n if mask.all(): # axis adds up to dense\n continue\n inflations.append((axis, types.frozendict((dofmap, util.sum(parts[dofmap] for parts in (parts1, parts2) if dofmap in parts)) for dofmap in dofmaps)))\n return tuple(inflations)\n\n def _simplified(self):\n func1, func2 = self.funcs\n if func1 == func2:\n return multiply(func1, 2)\n for axes1 in func1._diagonals:\n for axes2 in func2._diagonals:\n if len(axes1 & axes2) >= 2:\n axes = sorted(axes1 & axes2)[:2]\n return diagonalize(takediag(func1, *axes) + takediag(func2, *axes), *axes)\n # NOTE: While it is tempting to use the _inflations attribute to push\n # additions through common inflations, doing so may result in infinite\n # recursion in case two or more axes are inflated. This mechanism is\n # illustrated in the following schematic, in which <I> and <J> represent\n # inflations along axis 1 and <K> and <L> inflations along axis 2:\n #\n # A B C D E F G H\n # <I> <J> <I> <J> <I> <J> <I> <J>\n # .-- \\+/ \\+/ \\+/ \\+/ <--.\n # | \\__<K>__/ \\__<L>__/ |\n # | \\_______+_______/ |\n # | |\n # | A E C G B F D H |\n # | <K> <L> <K> <L> <K> <L> <K> <L> |\n # '--> \\+/ \\+/ \\+/ \\+/ --'\n # \\__<I>__/ \\__<J>__/\n # \\_______+_______/\n #\n # We instead rely on Inflate._add to handle this situation.\n return func1._add(func2) or func2._add(func1)\n\n def evalf(self, arr1, arr2=None):\n return arr1 + arr2\n\n def _sum(self, axis):\n return Add([sum(func, axis) for func in self.funcs])\n\n def _derivative(self, var, seen):\n func1, func2 = self.funcs\n return derivative(func1, var, seen) + derivative(func2, var, seen)\n\n def _takediag(self, axis1, axis2):\n func1, func2 = self.funcs\n return Add([_takediag(func1, axis1, axis2), _takediag(func2, axis1, axis2)])\n\n def _take(self, index, axis):\n func1, func2 = self.funcs\n return Add([_take(func1, index, axis), _take(func2, index, axis)])\n\n def _add(self, other):\n func1, func2 = self.funcs\n func1_other = func1._add(other)\n if func1_other is not None:\n return Add([func1_other, func2])\n func2_other = func2._add(other)\n if func2_other is not None:\n return Add([func1, func2_other])\n\n def _unravel(self, axis, shape):\n return Add([unravel(func, axis, shape) for func in self.funcs])\n\n def _loopsum(self, index):\n if any(index not in func.arguments for func in self.funcs):\n return Add([loop_sum(func, index) for func in self.funcs])\n\n def _multiply(self, other):\n func1, func2 = self.funcs\n if (func1._inflations or func1._diagonals) and (func2._inflations or func2._diagonals):\n # NOTE: As this operation is the precise opposite of Multiply._add, there\n # appears to be a great risk of recursion. However, since both factors\n # are sparse, we can be certain that subsequent simpifications will\n # irreversibly process the new terms before reaching this point.\n return (func1 * other) + (func2 * other)\n\n @property\n def _assparse(self):\n func1, func2 = self.funcs\n return _gathersparsechunks(itertools.chain(func1._assparse, func2._assparse))\n\n def _intbounds_impl(self):\n func1, func2 = self.funcs\n lower1, upper1 = func1._intbounds\n lower2, upper2 = func2._intbounds\n return lower1 + lower2, upper1 + upper2\n\nclass Einsum(Array):\n\n __slots__ = 'args', 'out_idx', 'args_idx', '_einsumfmt', '_has_summed_axes'\n\n @types.apply_annotations\n def __init__(self, args:asarrays, args_idx:types.tuple[types.tuple[types.strictint]], out_idx:types.tuple[types.strictint]):\n if len(args_idx) != len(args):\n raise ValueError('Expected one list of indices for every argument, but got {} and {}, respectively.'.format(len(args_idx), len(args)))\n for iarg, (idx, arg) in enumerate(zip(args_idx, args), 1):\n if len(idx) != arg.ndim:\n raise ValueError('Expected one index for every axis of argument {}, but got {} and {}, respectively.'.format(iarg, len(idx), arg.ndim))\n dtype = args[0].dtype\n if dtype == bool or any(arg.dtype != dtype for arg in args[1:]):\n raise ValueError('Inconsistent or invalid dtypes.')\n if len(out_idx) != len(set(out_idx)):\n raise ValueError('Repeated output indices.')\n lengths = {}\n for idx, arg in zip(args_idx, args):\n for i, length in zip(idx, arg.shape):\n if i not in lengths:\n lengths[i] = length\n elif not equalindex(lengths[i], length):\n raise ValueError('Axes with index {} have different lengths.'.format(i))\n try:\n shape = [lengths[i] for i in out_idx]\n except KeyError:\n raise ValueError('Output axis {} is not listed in any of the arguments.'.format(', '.join(i for i in out_idx if i not in lengths)))\n self.args = args\n self.args_idx = args_idx\n self.out_idx = out_idx\n self._einsumfmt = ','.join(''.join(chr(97+i) for i in idx) for idx in args_idx) + '->' + ''.join(chr(97+i) for i in out_idx)\n self._has_summed_axes = len(lengths) > len(out_idx)\n super().__init__(args=self.args, shape=shape, dtype=dtype)\n\n def evalf(self, *args):\n if self._has_summed_axes:\n args = tuple(numpy.asarray(arg, order='F') for arg in args)\n return numpy.core.multiarray.c_einsum(self._einsumfmt, *args)\n\n @property\n def _node_details(self):\n return self._einsumfmt\n\n def _simplified(self):\n for i, arg in enumerate(self.args):\n if isinstance(arg, Transpose): # absorb `Transpose`\n idx = tuple(map(self.args_idx[i].__getitem__, numpy.argsort(arg.axes)))\n return Einsum(self.args[:i]+(arg.func,)+self.args[i+1:], self.args_idx[:i]+(idx,)+self.args_idx[i+1:], self.out_idx)\n\n def _sum(self, axis):\n if not (0 <= axis < self.ndim):\n raise IndexError('Axis out of range.')\n return Einsum(self.args, self.args_idx, self.out_idx[:axis] + self.out_idx[axis+1:])\n\n def _takediag(self, axis1, axis2):\n if not (0 <= axis1 < axis2 < self.ndim):\n raise IndexError('Axis out of range.')\n ikeep, irm = self.out_idx[axis1], self.out_idx[axis2]\n args_idx = tuple(tuple(ikeep if i == irm else i for i in idx) for idx in self.args_idx)\n return Einsum(self.args, args_idx, self.out_idx[:axis1] + self.out_idx[axis1+1:axis2] + self.out_idx[axis2+1:] + (ikeep,))\n\nclass Sum(Array):\n\n __slots__ = 'func'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert func.dtype != bool, 'Sum({})'.format(func)\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-1], dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.func.shape[-1], 1):\n return Take(self.func, 0)\n return self.func._sum(self.ndim)\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+1\n return numpy.sum(arr, -1)\n\n def _sum(self, axis):\n trysum = self.func._sum(axis)\n if trysum is not None:\n return Sum(trysum)\n\n def _derivative(self, var, seen):\n return sum(derivative(self.func, var, seen), self.ndim)\n\n @property\n def _assparse(self):\n chunks = []\n for *indices, _rmidx, values in self.func._assparse:\n if self.ndim == 0:\n nsum = values.ndim\n else:\n *indices, where = unalign(*indices)\n values = transpose(values, where + tuple(i for i in range(values.ndim) if i not in where))\n nsum = values.ndim - len(where)\n for i in range(nsum):\n values = Sum(values)\n chunks.append((*indices, values))\n return _gathersparsechunks(chunks)\n\n def _intbounds_impl(self):\n lower_func, upper_func = self.func._intbounds\n lower_length, upper_length = self.func.shape[-1]._intbounds\n if upper_length == 0:\n return 0, 0\n elif lower_length == 0:\n return min(0, lower_func * upper_length), max(0, upper_func * upper_length)\n else:\n return min(lower_func * lower_length, lower_func * upper_length), max(upper_func * lower_length, upper_func * upper_length)\n\nclass TakeDiag(Array):\n\n __slots__ = 'func'\n __cache__ = '_assparse'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n if func.ndim < 2:\n raise Exception('takediag requires an argument of dimension >= 2')\n if not equalindex(func.shape[-1], func.shape[-2]):\n raise Exception('takediag axes do not match')\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-1], dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.shape[-1], 1):\n return Take(self.func, 0)\n return self.func._takediag(self.ndim-1, self.ndim)\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+1\n return numpy.einsum('...kk->...k', arr, optimize=False)\n\n def _derivative(self, var, seen):\n return takediag(derivative(self.func, var, seen), self.ndim-1, self.ndim)\n\n def _take(self, index, axis):\n if axis < self.ndim - 1:\n return TakeDiag(_take(self.func, index, axis))\n func = _take(Take(self.func, index), index, self.ndim-1)\n for i in reversed(range(self.ndim-1, self.ndim-1+index.ndim)):\n func = takediag(func, i, i+index.ndim)\n return func\n\n def _sum(self, axis):\n if axis != self.ndim - 1:\n return TakeDiag(sum(self.func, axis))\n\n @property\n def _assparse(self):\n chunks = []\n for *indices, values in self.func._assparse:\n if indices[-2] == indices[-1]:\n chunks.append((*indices[:-1], values))\n else:\n *indices, values = map(_flat, (*indices, values))\n mask = Equal(indices[-2], indices[-1])\n chunks.append(tuple(take(arr, mask, 0) for arr in (*indices[:-1], values)))\n return _gathersparsechunks(chunks)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Take(Array):\n\n __slots__ = 'func', 'indices'\n\n @types.apply_annotations\n def __init__(self, func:asarray, indices:asarray):\n if func.ndim == 0:\n raise Exception('cannot take a scalar function')\n if indices.dtype != int:\n raise Exception('invalid indices argument for take')\n self.func = func\n self.indices = indices\n super().__init__(args=[func,indices], shape=func.shape[:-1]+indices.shape, dtype=func.dtype)\n\n def _simplified(self):\n if self.indices.size == 0:\n return zeros_like(self)\n unaligned, where = unalign(self.indices)\n if len(where) < self.indices.ndim:\n n = self.func.ndim-1\n return align(Take(self.func, unaligned), (*range(n), *(n+i for i in where)), self.shape)\n trytake = self.func._take(self.indices, self.func.ndim-1) or \\\n self.indices._rtake(self.func, self.func.ndim-1)\n if trytake:\n return trytake\n for axis, parts in self.func._inflations:\n if axis == self.func.ndim - 1:\n return util.sum(Inflate(func, dofmap, self.func.shape[-1])._take(self.indices, self.func.ndim - 1) for dofmap, func in parts.items())\n\n def evalf(self, arr, indices):\n return arr[...,indices]\n\n def _derivative(self, var, seen):\n return _take(derivative(self.func, var, seen), self.indices, self.func.ndim-1)\n\n def _take(self, index, axis):\n if axis >= self.func.ndim-1:\n return Take(self.func, _take(self.indices, index, axis-self.func.ndim+1))\n trytake = self.func._take(index, axis)\n if trytake is not None:\n return Take(trytake, self.indices)\n\n def _sum(self, axis):\n if axis < self.func.ndim - 1:\n return Take(sum(self.func, axis), self.indices)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Power(Array):\n\n __slots__ = 'func', 'power'\n\n @types.apply_annotations\n def __init__(self, func:asarray, power:asarray):\n assert equalshape(func.shape, power.shape) and func.dtype == power.dtype != bool, 'Power({}, {})'.format(func, power)\n if power.dtype == int:\n assert power._intbounds[0] >= 0\n self.func = func\n self.power = power\n super().__init__(args=[func,power], shape=func.shape, dtype=func.dtype)\n\n def _simplified(self):\n if iszero(self.power):\n return ones_like(self)\n elif isuniform(self.power, 1):\n return self.func\n elif isuniform(self.power, 2):\n return self.func * self.func\n else:\n return self.func._power(self.power)\n\n def _optimized_for_numpy(self):\n if isuniform(self.power, -1):\n return Reciprocal(self.func)\n elif isuniform(self.power, -2):\n return Reciprocal(self.func * self.func)\n else:\n return self._simplified()\n\n def evalf(self, base, exp):\n return numpy.power(base, exp)\n\n def _derivative(self, var, seen):\n if self.power.isconstant:\n p = self.power.eval()\n return einsum('A,A,AB->AB', p, power(self.func, p - (p!=0)), derivative(self.func, var, seen))\n # self = func**power\n # ln self = power * ln func\n # self` / self = power` * ln func + power * func` / func\n # self` = power` * ln func * self + power * func` * func**(power-1)\n return einsum('A,A,AB->AB', self.power, power(self.func, self.power - 1), derivative(self.func, var, seen)) \\\n + einsum('A,A,AB->AB', ln(self.func), self, derivative(self.power, var, seen))\n\n def _power(self, n):\n func = self.func\n newpower = Multiply([self.power, n])\n if iszero(self.power % 2) and not iszero(newpower % 2):\n func = abs(func)\n return Power(func, newpower)\n\n def _takediag(self, axis1, axis2):\n return Power(_takediag(self.func, axis1, axis2), _takediag(self.power, axis1, axis2))\n\n def _take(self, index, axis):\n return Power(_take(self.func, index, axis), _take(self.power, index, axis))\n\n def _unravel(self, axis, shape):\n return Power(unravel(self.func, axis, shape), unravel(self.power, axis, shape))\n\nclass Pointwise(Array):\n '''\n Abstract base class for pointwise array functions.\n '''\n\n __slots__ = 'args',\n\n deriv = None\n\n @types.apply_annotations\n def __init__(self, *args:asarrays):\n retval = self.evalf(*[numpy.ones((), dtype=arg.dtype) for arg in args])\n shape0 = args[0].shape\n assert all(equalshape(arg.shape, shape0) for arg in args[1:]), 'pointwise arguments have inconsistent shapes'\n self.args = args\n super().__init__(args=args, shape=shape0, dtype=retval.dtype)\n\n @classmethod\n def outer(cls, *args):\n '''Alternative constructor that outer-aligns the arguments.\n\n The output shape of this pointwise function is the sum of all shapes of its\n arguments. When called with multiple arguments, the first argument will be\n appended with singleton axes to match the output shape, the second argument\n will be prepended with as many singleton axes as the dimension of the\n original first argument and appended to match the output shape, and so\n forth and so on.\n '''\n\n args = tuple(map(asarray, args))\n shape = builtins.sum((arg.shape for arg in args), ())\n offsets = numpy.cumsum([0]+[arg.ndim for arg in args])\n return cls(*(prependaxes(appendaxes(arg, shape[r:]), shape[:l]) for arg, l, r in zip(args, offsets[:-1], offsets[1:])))\n\n def _simplified(self):\n if self.isconstant:\n retval = self.eval()\n return Constant(retval)\n if len(self.args) == 1 and isinstance(self.args[0], Transpose):\n arg, = self.args\n return Transpose(self.__class__(arg.func), arg.axes)\n *uninserted, where = unalign(*self.args)\n if len(where) != self.ndim:\n return align(self.__class__(*uninserted), where, self.shape)\n\n def _derivative(self, var, seen):\n if self.deriv is None:\n return super()._derivative(var, seen)\n return util.sum(einsum('A,AB->AB', deriv(*self.args), derivative(arg, var, seen)) for arg, deriv in zip(self.args, self.deriv))\n\n def _takediag(self, axis1, axis2):\n return self.__class__(*[_takediag(arg, axis1, axis2) for arg in self.args])\n\n def _take(self, index, axis):\n return self.__class__(*[_take(arg, index, axis) for arg in self.args])\n\n def _unravel(self, axis, shape):\n return self.__class__(*[unravel(arg, axis, shape) for arg in self.args])\n\nclass Reciprocal(Pointwise):\n __slots__ = ()\n evalf = functools.partial(numpy.reciprocal, dtype=float)\n\nclass Negative(Pointwise):\n __slots__ = ()\n evalf = numpy.negative\n\n def _intbounds_impl(self):\n lower, upper = self.args[0]._intbounds\n return -upper, -lower\n\nclass FloorDivide(Pointwise):\n __slots__ = ()\n evalf = numpy.floor_divide\n\nclass Absolute(Pointwise):\n __slots__ = ()\n evalf = numpy.absolute\n\n def _intbounds_impl(self):\n lower, upper = self.args[0]._intbounds\n extrema = builtins.abs(lower), builtins.abs(upper)\n if lower <= 0 and upper >= 0:\n return 0, max(extrema)\n else:\n return min(extrema), max(extrema)\n\nclass Cos(Pointwise):\n 'Cosine, element-wise.'\n __slots__ = ()\n evalf = numpy.cos\n deriv = lambda x: -Sin(x),\n\nclass Sin(Pointwise):\n 'Sine, element-wise.'\n __slots__ = ()\n evalf = numpy.sin\n deriv = Cos,\n\nclass Tan(Pointwise):\n 'Tangent, element-wise.'\n __slots__ = ()\n evalf = numpy.tan\n deriv = lambda x: Cos(x)**-2,\n\nclass ArcSin(Pointwise):\n 'Inverse sine, element-wise.'\n __slots__ = ()\n evalf = numpy.arcsin\n deriv = lambda x: reciprocal(sqrt(1-x**2)),\n\nclass ArcCos(Pointwise):\n 'Inverse cosine, element-wise.'\n __slots__ = ()\n evalf = numpy.arccos\n deriv = lambda x: -reciprocal(sqrt(1-x**2)),\n\nclass ArcTan(Pointwise):\n 'Inverse tangent, element-wise.'\n __slots__ = ()\n evalf = numpy.arctan\n deriv = lambda x: reciprocal(1+x**2),\n\nclass Exp(Pointwise):\n __slots__ = ()\n evalf = numpy.exp\n deriv = lambda x: Exp(x),\n\nclass Log(Pointwise):\n __slots__ = ()\n evalf = numpy.log\n deriv = lambda x: reciprocal(x),\n\nclass Mod(Pointwise):\n __slots__ = ()\n evalf = numpy.mod\n\n def _intbounds_impl(self):\n dividend, divisor = self.args\n lower_divisor, upper_divisor = divisor._intbounds\n if lower_divisor > 0:\n lower_dividend, upper_dividend = dividend._intbounds\n if 0 <= lower_dividend and upper_dividend < lower_divisor:\n return lower_dividend, upper_dividend\n else:\n return 0, upper_divisor - 1\n else:\n return super()._intbounds_impl()\n\n def _simplified(self):\n dividend, divisor = self.args\n lower_divisor, upper_divisor = divisor._intbounds\n if lower_divisor > 0:\n lower_dividend, upper_dividend = dividend._intbounds\n if 0 <= lower_dividend and upper_dividend < lower_divisor:\n return dividend\n\nclass ArcTan2(Pointwise):\n __slots__ = ()\n evalf = numpy.arctan2\n deriv = lambda x, y: y / (x**2 + y**2), lambda x, y: -x / (x**2 + y**2)\n\nclass Greater(Pointwise):\n __slots__ = ()\n evalf = numpy.greater\n deriv = (lambda a, b: Zeros(a.shape, dtype=int),) * 2\n\nclass Equal(Pointwise):\n __slots__ = ()\n evalf = numpy.equal\n deriv = (lambda a, b: Zeros(a.shape, dtype=int),) * 2\n\nclass Less(Pointwise):\n __slots__ = ()\n evalf = numpy.less\n deriv = (lambda a, b: Zeros(a.shape, dtype=int),) * 2\n\nclass Minimum(Pointwise):\n __slots__ = ()\n evalf = numpy.minimum\n deriv = lambda x, y: .5 - .5 * Sign(x - y), lambda x, y: .5 + .5 * Sign(x - y)\n\n def _simplified(self):\n if self.dtype == int:\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n if upper1 <= lower2:\n return self.args[0]\n elif upper2 <= lower1:\n return self.args[1]\n return super()._simplified()\n\n def _intbounds_impl(self):\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n return min(lower1, lower2), min(upper1, upper2)\n\nclass Maximum(Pointwise):\n __slots__ = ()\n evalf = numpy.maximum\n deriv = lambda x, y: .5 + .5 * Sign(x - y), lambda x, y: .5 - .5 * Sign(x - y)\n\n def _simplified(self):\n if self.dtype == int:\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n if upper2 <= lower1:\n return self.args[0]\n elif upper1 <= lower2:\n return self.args[1]\n return super()._simplified()\n\n def _intbounds_impl(self):\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n return max(lower1, lower2), max(upper1, upper2)\n\nclass AsType(Pointwise):\n\n @types.apply_annotations\n def __init__(self, arg: asarray):\n super().__init__(arg)\n dtypes = bool, int, float, complex\n if self.dtype in dtypes[:dtypes.index(arg.dtype)]:\n raise TypeError('invalid cast from {} to {}'.format(arg.dtype, self.dtype))\n\n def _derivative(self, var, seen):\n arg, = self.args\n return self.__class__(derivative(arg, var, seen))\n\n def _simplified(self):\n arg, = self.args\n if arg.dtype == self.dtype:\n return arg\n if iszero(arg):\n return zeros_like(self)\n for axis, parts in arg._inflations:\n return util.sum(_inflate(self.__class__(func), dofmap, self.shape[axis], axis) for dofmap, func in parts.items())\n return super()._simplified()\n\n def _intbounds_impl(self):\n if self.args[0].dtype == bool:\n return 0, 1\n else:\n return self.args[0]._intbounds\n\nclass Int(AsType):\n evalf = functools.partial(numpy.array, copy=False, dtype=int)\nclass Float(AsType):\n evalf = functools.partial(numpy.array, copy=False, dtype=float)\nclass Complex(AsType):\n evalf = functools.partial(numpy.array, copy=False, dtype=complex)\n\nastype = {int: Int, float: Float, complex: Complex}\n\nclass Sign(Array):\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n self.func = func\n super().__init__(args=[func], shape=func.shape, dtype=func.dtype)\n\n def _simplified(self):\n return self.func._sign()\n\n def evalf(self, arr):\n return numpy.sign(arr)\n\n def _takediag(self, axis1, axis2):\n return Sign(_takediag(self.func, axis1, axis2))\n\n def _take(self, index, axis):\n return Sign(_take(self.func, index, axis))\n\n def _sign(self):\n return self\n\n def _unravel(self, axis, shape):\n return Sign(unravel(self.func, axis, shape))\n\n def _derivative(self, var, seen):\n return Zeros(self.shape + var.shape, dtype=self.dtype)\n\n def _intbounds_impl(self):\n lower, upper = self.func._intbounds\n return int(numpy.sign(lower)), int(numpy.sign(upper))\n\nclass Sampled(Array):\n '''Basis-like identity operator.\n\n Basis-like function that for every point in a predefined set evaluates to the\n unit vector corresponding to its index.\n\n Args\n ----\n points : 1d :class:`Array`\n Present point coordinates.\n expect : 2d :class:`Array`\n Elementwise constant that evaluates to the predefined point coordinates;\n used for error checking and to inherit the shape.\n '''\n\n __slots__ = ()\n\n @types.apply_annotations\n def __init__(self, points:asarray, expect:asarray):\n assert points.ndim == 2\n super().__init__(args=[points, expect], shape=(points.shape[0], expect.shape[0]), dtype=float)\n\n def evalf(self, points, expect):\n assert numpy.equal(points, expect).all(), 'illegal point set'\n return numpy.eye(len(points))\n\[email protected]_annotations\ndef Elemwise(data:types.tuple[types.arraydata], index:asarray, dtype:asdtype):\n unique, indices = util.unique(data)\n if len(unique) == 1:\n return Constant(unique[0])\n # Create shape from data and index, rather than unique and the modified\n # index, in order to avoid potential shape inconsistencies later on.\n shapes = numpy.array([d.shape for d in data])\n shape = [Take(s, index) for s in shapes.T]\n if len(unique) < len(data):\n index = Take(indices, index)\n # Move all axes with constant shape to the left and ravel the remainder.\n is_constant = numpy.all(shapes[1:] == shapes[0], axis=0)\n nconstant = is_constant.sum()\n reorder = numpy.argsort(~is_constant)\n raveled = [numpy.transpose(d, reorder).reshape(*shapes[0, reorder[:nconstant]], -1) for d in unique]\n # Concatenate the raveled axis, take slices, unravel and reorder the axes to\n # the original position.\n concat = numpy.concatenate(raveled, axis=-1)\n if is_constant.all():\n return Take(concat, index)\n var_shape = tuple(shape[i] for i in reorder[nconstant:])\n cumprod = list(var_shape)\n for i in reversed(range(len(var_shape)-1)):\n cumprod[i] *= cumprod[i+1] # work backwards so that the shape check matches in Unravel\n offsets = _SizesToOffsets(asarray([d.shape[-1] for d in raveled]))\n elemwise = Take(concat, Range(cumprod[0]) + Take(offsets, index))\n for i in range(len(var_shape)-1):\n elemwise = Unravel(elemwise, var_shape[i], cumprod[i+1])\n return Transpose(elemwise, tuple(numpy.argsort(reorder)))\n\nclass Eig(Evaluable):\n\n __slots__ = 'symmetric', 'func', '_w_dtype', '_vt_dtype'\n\n @types.apply_annotations\n def __init__(self, func:asarray, symmetric:bool=False):\n assert func.ndim >= 2 and equalindex(func.shape[-1], func.shape[-2])\n self.symmetric = symmetric\n self.func = func\n self._w_dtype = float if symmetric else complex\n self._vt_dtype = float if symmetric and func.dtype != complex else complex\n super().__init__(args=[func])\n\n def __len__(self):\n return 2\n\n def __iter__(self):\n yield ArrayFromTuple(self, index=0, shape=self.func.shape[:-1], dtype=self._w_dtype)\n yield ArrayFromTuple(self, index=1, shape=self.func.shape, dtype=self._vt_dtype)\n\n def _simplified(self):\n return self.func._eig(self.symmetric)\n\n def evalf(self, arr):\n w, vt = (numpy.linalg.eigh if self.symmetric else numpy.linalg.eig)(arr)\n w = w.astype(self._w_dtype, copy=False)\n vt = vt.astype(self._vt_dtype, copy=False)\n return (w, vt)\n\nclass ArrayFromTuple(Array):\n\n __slots__ = 'arrays', 'index', '_lower', '_upper'\n\n @types.apply_annotations\n def __init__(self, arrays:strictevaluable, index:types.strictint, shape:asshape, dtype:asdtype, *, _lower=float('-inf'), _upper=float('inf')):\n self.arrays = arrays\n self.index = index\n self._lower = _lower\n self._upper = _upper\n super().__init__(args=[arrays], shape=shape, dtype=dtype)\n\n def evalf(self, arrays):\n assert isinstance(arrays, tuple)\n return arrays[self.index]\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n elif hasattr(self.arrays, '_node_tuple'):\n cache[self] = node = self.arrays._node_tuple(cache, subgraph, times)[self.index]\n return node\n else:\n return super()._node(cache, subgraph, times)\n\n def _intbounds_impl(self):\n return self._lower, self._upper\n\nclass Zeros(Array):\n 'zero'\n\n __slots__ = ()\n __cache__ = '_assparse', '_unaligned'\n\n @types.apply_annotations\n def __init__(self, shape:asshape, dtype:asdtype):\n super().__init__(args=shape, shape=shape, dtype=dtype)\n\n @property\n def _unaligned(self):\n return Zeros((), self.dtype), ()\n\n def evalf(self, *shape):\n return numpy.zeros(shape, dtype=self.dtype)\n\n def _node(self, cache, subgraph, times):\n if self.ndim:\n return super()._node(cache, subgraph, times)\n elif self in cache:\n return cache[self]\n else:\n cache[self] = node = DuplicatedLeafNode('0', (type(self).__name__, times[self]))\n return node\n\n def _add(self, other):\n return other\n\n def _multiply(self, other):\n return self\n\n def _diagonalize(self, axis):\n return Zeros(self.shape+(self.shape[axis],), dtype=self.dtype)\n\n def _sum(self, axis):\n return Zeros(self.shape[:axis] + self.shape[axis+1:], dtype=int if self.dtype == bool else self.dtype)\n\n def _transpose(self, axes):\n shape = [self.shape[n] for n in axes]\n return Zeros(shape, dtype=self.dtype)\n\n def _insertaxis(self, axis, length):\n return Zeros(self.shape[:axis]+(length,)+self.shape[axis:], self.dtype)\n\n def _takediag(self, axis1, axis2):\n return Zeros(self.shape[:axis1]+self.shape[axis1+1:axis2]+self.shape[axis2+1:self.ndim]+(self.shape[axis1],), dtype=self.dtype)\n\n def _take(self, index, axis):\n return Zeros(self.shape[:axis] + index.shape + self.shape[axis+1:], dtype=self.dtype)\n\n def _inflate(self, dofmap, length, axis):\n return Zeros(self.shape[:axis] + (length,) + self.shape[axis+dofmap.ndim:], dtype=self.dtype)\n\n def _unravel(self, axis, shape):\n shape = self.shape[:axis] + shape + self.shape[axis+1:]\n return Zeros(shape, dtype=self.dtype)\n\n def _ravel(self, axis):\n return Zeros(self.shape[:axis] + (self.shape[axis]*self.shape[axis+1],) + self.shape[axis+2:], self.dtype)\n\n def _determinant(self, axis1, axis2):\n shape = list(self.shape)\n assert axis1 != axis2\n length, = set(map(shape.pop, sorted((axis1, axis2), reverse=True)))\n if iszero(length):\n return ones(shape, self.dtype)\n else:\n return Zeros(shape, self.dtype)\n\n @property\n def _assparse(self):\n return ()\n\n def _intbounds_impl(self):\n return 0, 0\n\nclass Inflate(Array):\n\n __slots__ = 'func', 'dofmap', 'length', 'warn'\n __cache__ = '_assparse', '_diagonals', '_inflations'\n\n @types.apply_annotations\n def __init__(self, func:asarray, dofmap:asarray, length:asindex):\n if not equalshape(func.shape[func.ndim-dofmap.ndim:], dofmap.shape):\n raise Exception('invalid dofmap')\n self.func = func\n self.dofmap = dofmap\n self.length = length\n self.warn = not dofmap.isconstant\n super().__init__(args=[func,dofmap,length], shape=(*func.shape[:func.ndim-dofmap.ndim], length), dtype=func.dtype)\n\n @property\n def _diagonals(self):\n return tuple(axes for axes in self.func._diagonals if all(axis < self.ndim-1 for axis in axes))\n\n @property\n def _inflations(self):\n inflations = [(self.ndim-1, types.frozendict({self.dofmap: self.func}))]\n for axis, parts in self.func._inflations:\n inflations.append((axis, types.frozendict((dofmap, Inflate(func, self.dofmap, self.length)) for dofmap, func in parts.items())))\n return tuple(inflations)\n\n def _simplified(self):\n for axis in range(self.dofmap.ndim):\n if equalindex(self.dofmap.shape[axis], 1):\n return Inflate(_take(self.func, 0, self.func.ndim-self.dofmap.ndim+axis), _take(self.dofmap, 0, axis), self.length)\n for axis, parts in self.func._inflations:\n i = axis - (self.ndim-1)\n if i >= 0:\n return util.sum(Inflate(f, _take(self.dofmap, ind, i), self.length) for ind, f in parts.items())\n if self.dofmap.ndim == 0 and equalindex(self.dofmap, 0) and equalindex(self.length, 1):\n return InsertAxis(self.func, 1)\n return self.func._inflate(self.dofmap, self.length, self.ndim-1) \\\n or self.dofmap._rinflate(self.func, self.length, self.ndim-1)\n\n def evalf(self, array, indices, length):\n assert indices.ndim == self.dofmap.ndim\n assert length.ndim == 0\n if self.warn and int(length) > indices.size:\n warnings.warn('using explicit inflation; this is usually a bug.', ExpensiveEvaluationWarning)\n inflated = numpy.zeros(array.shape[:array.ndim-indices.ndim] + (length,), dtype=self.dtype)\n numpy.add.at(inflated, (slice(None),)*(self.ndim-1)+(indices,), array)\n return inflated\n\n def _inflate(self, dofmap, length, axis):\n if dofmap.ndim == 0 and dofmap == self.dofmap and length == self.length:\n return diagonalize(self, -1, axis)\n\n def _derivative(self, var, seen):\n return _inflate(derivative(self.func, var, seen), self.dofmap, self.length, self.ndim-1)\n\n def _multiply(self, other):\n return Inflate(Multiply([self.func, Take(other, self.dofmap)]), self.dofmap, self.length)\n\n def _add(self, other):\n if isinstance(other, Inflate) and self.dofmap == other.dofmap:\n return Inflate(Add([self.func, other.func]), self.dofmap, self.length)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 == self.ndim-1:\n func = _take(self.func, self.dofmap, axis1)\n for i in range(self.dofmap.ndim):\n func = _takediag(func, axis1, axis2+self.dofmap.ndim-1-i)\n return Inflate(func, self.dofmap, self.length)\n else:\n return _inflate(_takediag(self.func, axis1, axis2), self.dofmap, self.length, self.ndim-3)\n\n def _take(self, index, axis):\n if axis != self.ndim-1:\n return Inflate(_take(self.func, index, axis), self.dofmap, self.length)\n newindex, newdofmap = SwapInflateTake(self.dofmap, index)\n if self.dofmap.ndim:\n func = self.func\n for i in range(self.dofmap.ndim-1):\n func = Ravel(func)\n intersection = Take(func, newindex)\n else: # kronecker; newindex is all zeros (but of varying length)\n intersection = InsertAxis(self.func, newindex.shape[0])\n if index.ndim:\n swapped = Inflate(intersection, newdofmap, index.size)\n for i in range(index.ndim-1):\n swapped = Unravel(swapped, index.shape[i], util.product(index.shape[i+1:]))\n else: # get; newdofmap is all zeros (but of varying length)\n swapped = Sum(intersection)\n return swapped\n\n def _diagonalize(self, axis):\n if axis != self.ndim-1:\n return _inflate(diagonalize(self.func, axis), self.dofmap, self.length, self.ndim-1)\n\n def _sum(self, axis):\n if axis == self.ndim-1:\n func = self.func\n for i in range(self.dofmap.ndim):\n func = Sum(func)\n return func\n return Inflate(sum(self.func, axis), self.dofmap, self.length)\n\n def _unravel(self, axis, shape):\n if axis != self.ndim-1:\n return Inflate(unravel(self.func, axis, shape), self.dofmap, self.length)\n\n def _sign(self):\n if self.dofmap.isconstant and _isunique(self.dofmap.eval()):\n return Inflate(Sign(self.func), self.dofmap, self.length)\n\n @property\n def _assparse(self):\n chunks = []\n flat_dofmap = _flat(self.dofmap)\n keep_dim = self.func.ndim - self.dofmap.ndim\n strides = (1, *itertools.accumulate(self.dofmap.shape[:0:-1], operator.mul))[::-1]\n for *indices, values in self.func._assparse:\n if self.dofmap.ndim:\n inflate_indices = Take(flat_dofmap, functools.reduce(operator.add, map(operator.mul, indices[keep_dim:], strides)))\n else:\n inflate_indices = appendaxes(self.dofmap, values.shape)\n chunks.append((*indices[:keep_dim], inflate_indices, values))\n return tuple(chunks)\n\n def _intbounds_impl(self):\n lower, upper = self.func._intbounds\n return min(lower, 0), max(upper, 0)\n\nclass SwapInflateTake(Evaluable):\n\n def __init__(self, inflateidx, takeidx):\n self.inflateidx = inflateidx\n self.takeidx = takeidx\n super().__init__(args=[inflateidx, takeidx])\n\n def __iter__(self):\n shape = ArrayFromTuple(self, index=2, shape=(), dtype=int, _lower=0),\n return (ArrayFromTuple(self, index=index, shape=shape, dtype=int, _lower=0) for index in range(2))\n\n def evalf(self, inflateidx, takeidx):\n uniqueinflate = _isunique(inflateidx)\n uniquetake = _isunique(takeidx)\n unique = uniqueinflate and uniquetake\n # If both indices are unique (i.e. they do not contain duplicates) then the\n # take and inflate operations can simply be restricted to the intersection,\n # with the the location of the intersection in the original index vectors\n # being the new indices for the swapped operations.\n intersection, subinflate, subtake = numpy.intersect1d(inflateidx, takeidx, return_indices=True, assume_unique=unique)\n if unique:\n return subinflate, subtake, numpy.array(len(intersection))\n # Otherwise, while still limiting the operations to the intersection, we\n # need to add the appropriate duplications on either side. The easiest way\n # to do this is to form the permutation matrix A for take (may contain\n # multiple items per column) and B for inflate (may contain several items\n # per row) and take the product AB for the combined operation. To then\n # decompose AB into the equivalent take followed by inflate we can simply\n # take the two index vectors from AB.nonzero() and form CD = AB. The\n # algorithm below does precisely this without forming AB explicitly.\n newinflate = []\n newtake = []\n for k, n in enumerate(intersection):\n for i in [subtake[k]] if uniquetake else numpy.equal(takeidx.ravel(), n).nonzero()[0]:\n for j in [subinflate[k]] if uniqueinflate else numpy.equal(inflateidx.ravel(), n).nonzero()[0]:\n newinflate.append(i)\n newtake.append(j)\n return numpy.array(newtake, dtype=int), numpy.array(newinflate, dtype=int), numpy.array(len(newtake), dtype=int)\n\nclass Diagonalize(Array):\n\n __slots__ = 'func'\n __cache__ = '_diagonals'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n if func.ndim == 0:\n raise Exception('cannot diagonalize scalar function')\n self.func = func\n super().__init__(args=[func], shape=(*func.shape, func.shape[-1]), dtype=func.dtype)\n\n @property\n def _diagonals(self):\n diagonals = [frozenset([self.ndim-2, self.ndim-1])]\n for axes in self.func._diagonals:\n if axes & diagonals[0]:\n diagonals[0] |= axes\n else:\n diagonals.append(axes)\n return tuple(diagonals)\n\n @property\n def _inflations(self):\n return tuple((axis, types.frozendict((dofmap, Diagonalize(func)) for dofmap, func in parts.items()))\n for axis, parts in self.func._inflations\n if axis < self.ndim-2)\n\n def _simplified(self):\n if self.shape[-1] == 1:\n return InsertAxis(self.func, 1)\n return self.func._diagonalize(self.ndim-2)\n\n def evalf(self, arr):\n result = numpy.zeros(arr.shape+(arr.shape[-1],), dtype=arr.dtype, order='F')\n diag = numpy.core.multiarray.c_einsum('...ii->...i', result)\n diag[:] = arr\n return result\n\n def _derivative(self, var, seen):\n return diagonalize(derivative(self.func, var, seen), self.ndim-2, self.ndim-1)\n\n def _inverse(self, axis1, axis2):\n if sorted([axis1, axis2]) == [self.ndim-2, self.ndim-1]:\n return Diagonalize(reciprocal(self.func))\n\n def _determinant(self, axis1, axis2):\n if sorted([axis1, axis2]) == [self.ndim-2, self.ndim-1]:\n return Product(self.func)\n elif axis1 < self.ndim-2 and axis2 < self.ndim-2:\n return Diagonalize(determinant(self.func, (axis1, axis2)))\n\n def _sum(self, axis):\n if axis >= self.ndim - 2:\n return self.func\n return Diagonalize(sum(self.func, axis))\n\n def _takediag(self, axis1, axis2):\n if axis1 == self.ndim-2: # axis2 == self.ndim-1\n return self.func\n elif axis2 >= self.ndim-2:\n return diagonalize(_takediag(self.func, axis1, self.ndim-2), self.ndim-3, self.ndim-2)\n else:\n return diagonalize(_takediag(self.func, axis1, axis2), self.ndim-4, self.ndim-3)\n\n def _take(self, index, axis):\n if axis < self.ndim - 2:\n return Diagonalize(_take(self.func, index, axis))\n func = _take(self.func, index, self.ndim-2)\n for i in range(index.ndim):\n func = diagonalize(func, self.ndim-2+i)\n return _inflate(func, index, self.func.shape[-1], self.ndim-2 if axis == self.ndim-1 else self.ndim-2+index.ndim)\n\n def _unravel(self, axis, shape):\n if axis >= self.ndim - 2:\n diag = diagonalize(diagonalize(Unravel(self.func, *shape), self.ndim-2, self.ndim), self.ndim-1, self.ndim+1)\n return ravel(diag, self.ndim if axis == self.ndim-2 else self.ndim-2)\n else:\n return Diagonalize(unravel(self.func, axis, shape))\n\n def _sign(self):\n return Diagonalize(Sign(self.func))\n\n def _product(self):\n if numeric.isint(self.shape[-1]) and self.shape[-1] > 1:\n return Zeros(self.shape[:-1], dtype=self.dtype)\n\n def _loopsum(self, index):\n return Diagonalize(loop_sum(self.func, index))\n\n @property\n def _assparse(self):\n return tuple((*indices, indices[-1], values) for *indices, values in self.func._assparse)\n\nclass Guard(Array):\n 'bar all simplifications'\n\n __slots__ = 'fun',\n\n @types.apply_annotations\n def __init__(self, fun:asarray):\n self.fun = fun\n super().__init__(args=[fun], shape=fun.shape, dtype=fun.dtype)\n\n @property\n def isconstant(self):\n return False # avoid simplifications based on fun being constant\n\n @staticmethod\n def evalf(dat):\n return dat\n\n def _derivative(self, var, seen):\n return Guard(derivative(self.fun, var, seen))\n\nclass TrigNormal(Array):\n 'cos, sin'\n\n __slots__ = 'angle',\n\n @types.apply_annotations\n def __init__(self, angle:asarray):\n self.angle = angle\n super().__init__(args=[angle], shape=(*angle.shape, 2), dtype=float)\n\n def _derivative(self, var, seen):\n return einsum('Ai,AB->AiB', TrigTangent(self.angle), derivative(self.angle, var, seen))\n\n def evalf(self, angle):\n return numpy.stack([numpy.cos(angle), numpy.sin(angle)], axis=self.ndim-1)\n\n def _simplified(self):\n if iszero(self.angle):\n return prependaxes(Inflate(1., 0, 2), self.angle.shape)\n\nclass TrigTangent(Array):\n '-sin, cos'\n\n __slots__ = 'angle',\n\n @types.apply_annotations\n def __init__(self, angle:asarray):\n self.angle = angle\n super().__init__(args=[angle], shape=(*angle.shape, 2), dtype=float)\n\n def _derivative(self, var, seen):\n return -einsum('Ai,AB->AiB', TrigNormal(self.angle), derivative(self.angle, var, seen))\n\n def evalf(self, angle):\n return numpy.stack([-numpy.sin(angle), numpy.cos(angle)], axis=self.ndim-1)\n\n def _simplified(self):\n if iszero(self.angle):\n return prependaxes(Inflate(1., 1, 2), self.angle.shape)\n\nclass Find(Array):\n 'indices of boolean index vector'\n\n __slots__ = 'where',\n\n @types.apply_annotations\n def __init__(self, where:asarray):\n assert isarray(where) and where.ndim == 1 and where.dtype == bool\n self.where = where\n super().__init__(args=[where], shape=[Sum(Int(where))], dtype=int)\n\n def evalf(self, where):\n return where.nonzero()[0]\n\n def _simplified(self):\n if self.isconstant:\n return Constant(self.eval())\n\nclass DerivativeTargetBase(Array):\n 'base class for derivative targets'\n\n __slots__ = ()\n\n @property\n def isconstant(self):\n return False\n\nclass WithDerivative(Array):\n '''Wrap the given function and define the derivative to a target.\n\n The wrapper is typically used together with a virtual derivative target like\n :class:`IdentifierDerivativeTarget`. The wrapper is removed in the simplified\n form.\n\n Parameters\n ----------\n func : :class:`Array`\n The function to wrap.\n var : :class:`DerivativeTargetBase`\n The derivative target.\n derivative : :class:`Array`\n The derivative with shape ``func.shape + var.shape``.\n\n See Also\n --------\n :class:`IdentifierDerivativeTarget` : a virtual derivative target\n '''\n\n __slots__ = '_func', '_var', '_deriv'\n\n def __init__(self, func: Array, var: DerivativeTargetBase, derivative: Array) -> None:\n self._func = func\n self._var = var\n self._deriv = derivative\n super().__init__(args=(func,), shape=func.shape, dtype=func.dtype)\n\n @property\n def arguments(self):\n return self._func.arguments | {self._var}\n\n def evalf(self, func: numpy.ndarray) -> numpy.ndarray:\n return func\n\n def _derivative(self, var: DerivativeTargetBase, seen) -> Array:\n if var == self._var:\n return self._deriv\n else:\n return derivative(self._func, var, seen)\n\n def _simplified(self) -> Array:\n return self._func\n\nclass Argument(DerivativeTargetBase):\n '''Array argument, to be substituted before evaluation.\n\n The :class:`Argument` is an :class:`Array` with a known shape, but whose\n values are to be defined later, before evaluation, e.g. using\n :func:`replace_arguments`.\n\n It is possible to take the derivative of an :class:`Array` to an\n :class:`Argument`:\n\n >>> from nutils import evaluable\n >>> a = evaluable.Argument('x', [])\n >>> b = evaluable.Argument('y', [])\n >>> f = a**3 + b**2\n >>> evaluable.derivative(f, a).simplified == (3*a**2).simplified\n True\n\n Args\n ----\n name : :class:`str`\n The Identifier of this argument.\n shape : :class:`tuple` of :class:`int`\\\\s\n The shape of this argument.\n '''\n\n __slots__ = '_name'\n\n @types.apply_annotations\n def __init__(self, name:types.strictstr, shape:asshape, dtype=float):\n self._name = name\n super().__init__(args=[EVALARGS], shape=shape, dtype=dtype)\n\n def evalf(self, evalargs):\n try:\n value = evalargs[self._name]\n except KeyError:\n raise ValueError('argument {!r} missing'.format(self._name))\n else:\n value = numpy.asarray(value)\n assert equalshape(value.shape, self.shape)\n value = value.astype(self.dtype, casting='safe', copy=False)\n return value\n\n def _derivative(self, var, seen):\n if isinstance(var, Argument) and var._name == self._name and self.dtype == float:\n result = _inflate_scalar(1., self.shape)\n for i, sh in enumerate(self.shape):\n result = diagonalize(result, i, i+self.ndim)\n return result\n else:\n return zeros(self.shape+var.shape)\n\n def __str__(self):\n return '{} {!r} <{}>'.format(self.__class__.__name__, self._name, self._shape_str(form=str))\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n else:\n label = '\\n'.join(filter(None, (type(self).__name__, self._name, self._shape_str(form=repr))))\n cache[self] = node = DuplicatedLeafNode(label, (type(self).__name__, times[self]))\n return node\n\n @property\n def arguments(self):\n return frozenset({self})\n\nclass IdentifierDerivativeTarget(DerivativeTargetBase):\n '''Virtual derivative target distinguished by an identifier.\n\n Parameters\n ----------\n identifier : hashable :class:`object`\n The identifier for this derivative target.\n shape : :class:`tuple` of :class:`Array` or :class:`int`\n The shape of this derivative target.\n\n See Also\n --------\n :class:`WithDerivative` : :class:`Array` wrapper with additional derivative\n '''\n\n __slots__ = 'identifier'\n\n @types.apply_annotations\n def __init__(self, identifier, shape:asshape):\n self.identifier = identifier\n super().__init__(args=[], shape=shape, dtype=float)\n\n def evalf(self):\n raise Exception('{} cannot be evaluabled'.format(type(self).__name__))\n\nclass Ravel(Array):\n\n __slots__ = 'func'\n __cache__ = '_inflations'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n if func.ndim < 2:\n raise Exception('cannot ravel function of dimension < 2')\n self.func = func\n super().__init__(args=[func], shape=(*func.shape[:-2], func.shape[-2] * func.shape[-1]), dtype=func.dtype)\n\n @property\n def _inflations(self):\n inflations = []\n stride = self.func.shape[-1]\n n = None\n for axis, old_parts in self.func._inflations:\n if axis == self.ndim - 1 and n is None:\n n = self.func.shape[-1]\n inflations.append((self.ndim - 1, types.frozendict((RavelIndex(dofmap, Range(n), *self.func.shape[-2:]), func) for dofmap, func in old_parts.items())))\n elif axis == self.ndim and n is None:\n n = self.func.shape[-2]\n inflations.append((self.ndim - 1, types.frozendict((RavelIndex(Range(n), dofmap, *self.func.shape[-2:]), func) for dofmap, func in old_parts.items())))\n elif axis < self.ndim - 1:\n inflations.append((axis, types.frozendict((dofmap, Ravel(func)) for dofmap, func in old_parts.items())))\n return tuple(inflations)\n\n def _simplified(self):\n if equalindex(self.func.shape[-2], 1):\n return get(self.func, -2, 0)\n if equalindex(self.func.shape[-1], 1):\n return get(self.func, -1, 0)\n return self.func._ravel(self.ndim-1)\n\n def evalf(self, f):\n return f.reshape(f.shape[:-2] + (f.shape[-2]*f.shape[-1],))\n\n def _multiply(self, other):\n if isinstance(other, Ravel) and equalshape(other.func.shape[-2:], self.func.shape[-2:]):\n return Ravel(Multiply([self.func, other.func]))\n return Ravel(Multiply([self.func, Unravel(other, *self.func.shape[-2:])]))\n\n def _add(self, other):\n return Ravel(self.func + Unravel(other, *self.func.shape[-2:]))\n\n def _sum(self, axis):\n if axis == self.ndim-1:\n return Sum(Sum(self.func))\n return Ravel(sum(self.func, axis))\n\n def _derivative(self, var, seen):\n return ravel(derivative(self.func, var, seen), axis=self.ndim-1)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 <= self.ndim-2:\n return ravel(_takediag(self.func, axis1, axis2), self.ndim-3)\n else:\n unraveled = unravel(self.func, axis1, self.func.shape[-2:])\n return Ravel(_takediag(_takediag(unraveled, axis1, -2), axis1, -2))\n\n def _take(self, index, axis):\n if axis != self.ndim-1:\n return Ravel(_take(self.func, index, axis))\n\n def _rtake(self, func, axis):\n if self.ndim == 1:\n return Ravel(Take(func, self.func))\n\n def _unravel(self, axis, shape):\n if axis != self.ndim-1:\n return Ravel(unravel(self.func, axis, shape))\n elif equalshape(shape, self.func.shape[-2:]):\n return self.func\n\n def _inflate(self, dofmap, length, axis):\n if axis < self.ndim-dofmap.ndim:\n return Ravel(_inflate(self.func, dofmap, length, axis))\n elif dofmap.ndim == 0:\n return ravel(Inflate(self.func, dofmap, length), self.ndim-1)\n else:\n return _inflate(self.func, Unravel(dofmap, *self.func.shape[-2:]), length, axis)\n\n def _diagonalize(self, axis):\n if axis != self.ndim-1:\n return ravel(diagonalize(self.func, axis), self.ndim-1)\n\n def _insertaxis(self, axis, length):\n return ravel(insertaxis(self.func, axis+(axis==self.ndim), length), self.ndim-(axis==self.ndim))\n\n def _power(self, n):\n return Ravel(Power(self.func, Unravel(n, *self.func.shape[-2:])))\n\n def _sign(self):\n return Ravel(Sign(self.func))\n\n def _product(self):\n return Product(Product(self.func))\n\n def _loopsum(self, index):\n return Ravel(loop_sum(self.func, index))\n\n @property\n def _unaligned(self):\n unaligned, where = unalign(self.func)\n for i in self.ndim - 1, self.ndim:\n if i not in where:\n unaligned = InsertAxis(unaligned, self.func.shape[i])\n where += i,\n if where[-2:] != (self.ndim - 1, self.ndim):\n unaligned = Transpose(unaligned, numpy.argsort(where))\n where = tuple(sorted(where))\n return Ravel(unaligned), where[:-1]\n\n @property\n def _assparse(self):\n return tuple((*indices[:-2], indices[-2]*self.func.shape[-1]+indices[-1], values) for *indices, values in self.func._assparse)\n\n def _intbounds_impl(self):\n return self.func._intbounds_impl()\n\nclass Unravel(Array):\n\n __slots__ = 'func'\n\n @types.apply_annotations\n def __init__(self, func:asarray, sh1:asindex, sh2:asindex):\n if func.ndim == 0:\n raise Exception('cannot unravel scalar function')\n if not equalindex(func.shape[-1], sh1 * sh2):\n raise Exception('new shape does not match axis length')\n self.func = func\n super().__init__(args=[func, sh1, sh2], shape=(*func.shape[:-1], sh1, sh2), dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.shape[-2], 1):\n return insertaxis(self.func, self.ndim-2, 1)\n if equalindex(self.shape[-1], 1):\n return insertaxis(self.func, self.ndim-1, 1)\n return self.func._unravel(self.ndim-2, self.shape[-2:])\n\n def _derivative(self, var, seen):\n return unravel(derivative(self.func, var, seen), axis=self.ndim-2, shape=self.shape[-2:])\n\n def evalf(self, f, sh1, sh2):\n return f.reshape(f.shape[:-1] + (sh1, sh2))\n\n def _takediag(self, axis1, axis2):\n if axis2 < self.ndim-2:\n return unravel(_takediag(self.func, axis1, axis2), self.ndim-4, self.shape[-2:])\n\n def _take(self, index, axis):\n if axis < self.ndim - 2:\n return Unravel(_take(self.func, index, axis), *self.shape[-2:])\n\n def _sum(self, axis):\n if axis < self.ndim - 2:\n return Unravel(sum(self.func, axis), *self.shape[-2:])\n\n @property\n def _assparse(self):\n return tuple((*indices[:-1], *divmod(indices[-1], appendaxes(self.shape[-1], values.shape)), values) for *indices, values in self.func._assparse)\n\nclass RavelIndex(Array):\n\n @types.apply_annotations\n def __init__(self, ia:asarray, ib:asarray, na:asindex, nb:asindex):\n self._ia = ia\n self._ib = ib\n self._na = na\n self._nb = nb\n self._length = na * nb\n super().__init__(args=[ia, ib, nb], shape=ia.shape + ib.shape, dtype=int)\n\n def evalf(self, ia, ib, nb):\n return ia[(...,)+(numpy.newaxis,)*ib.ndim] * nb + ib\n\n def _take(self, index, axis):\n if axis < self._ia.ndim:\n return RavelIndex(_take(self._ia, index, axis), self._ib, self._na, self._nb)\n else:\n return RavelIndex(self._ia, _take(self._ib, index, axis - self._ia.ndim), self._na, self._nb)\n\n def _rtake(self, func, axis):\n if equalindex(func.shape[axis], self._length):\n return _take(_take(unravel(func, axis, (self._na, self._nb)), self._ib, axis+1), self._ia, axis)\n\n def _rinflate(self, func, length, axis):\n if equalindex(length, self._length):\n return Ravel(Inflate(_inflate(func, self._ia, self._na, func.ndim - self.ndim), self._ib, self._nb))\n\n def _unravel(self, axis, shape):\n if axis < self._ia.ndim:\n return RavelIndex(unravel(self._ia, axis, shape), self._ib, self._na, self._nb)\n else:\n return RavelIndex(self._ia, unravel(self._ib, axis-self._ia.ndim, shape), self._na, self._nb)\n\n def _intbounds_impl(self):\n nbmin, nbmax = self._nb._intbounds\n iamin, iamax = self._ia._intbounds\n ibmin, ibmax = self._ib._intbounds\n return iamin * nbmin + ibmin, (iamax and nbmax and iamax * nbmax) + ibmax\n\nclass Range(Array):\n\n __slots__ = 'length'\n\n @types.apply_annotations\n def __init__(self, length:asindex):\n self.length = length\n super().__init__(args=[length], shape=[length], dtype=int)\n\n def _take(self, index, axis):\n return InRange(index, self.length)\n\n def _rtake(self, func, axis):\n if equalindex(self.length, func.shape[axis]):\n return func\n\n def _rinflate(self, func, length, axis):\n if length == self.length:\n return func\n\n def evalf(self, length):\n return numpy.arange(length)\n\n def _intbounds_impl(self):\n lower, upper = self.length._intbounds\n assert lower >= 0\n return 0, max(0, upper - 1)\n\nclass InRange(Array):\n\n __slots__ = 'index', 'length'\n\n @types.apply_annotations\n def __init__(self, index:asarray, length:asarray):\n self.index = index\n self.length = length\n super().__init__(args=[index, length], shape=index.shape, dtype=int)\n\n def evalf(self, index, length):\n assert index.size == 0 or 0 <= index.min() and index.max() < length\n return index\n\n def _simplified(self):\n lower_length, upper_length = self.length._intbounds\n lower_index, upper_index = self.index._intbounds\n if 0 <= lower_index <= upper_index < lower_length:\n return self.index\n\n def _intbounds_impl(self):\n lower_index, upper_index = self.index._intbounds\n lower_length, upper_length = self.length._intbounds\n upper = min(upper_index, max(0, upper_length - 1))\n return max(0, min(lower_index, upper)), upper\n\nclass Polyval(Array):\n '''\n Computes the :math:`k`-dimensional array\n\n .. math:: j_0,\\\\dots,j_{k-1} \\\\mapsto \\\\sum_{\\\\substack{i_0,\\\\dots,i_{n-1}\\\\in\\\\mathbb{N}\\\\\\\\i_0+\\\\cdots+i_{n-1}\\\\le d}} p_0^{i_0} \\\\cdots p_{n-1}^{i_{n-1}} c_{j_0,\\\\dots,j_{k-1},i_0,\\\\dots,i_{n-1}},\n\n where :math:`p` are the :math:`n`-dimensional local coordinates and :math:`c`\n is the argument ``coeffs`` and :math:`d` is the degree of the polynomial,\n where :math:`d` is the length of the last :math:`n` axes of ``coeffs``.\n\n .. warning::\n\n All coefficients with a (combined) degree larger than :math:`d` should be\n zero. Failing to do so won't raise an :class:`Exception`, but might give\n incorrect results.\n '''\n\n __slots__ = 'points_ndim', 'coeffs', 'points', 'ngrad'\n\n @types.apply_annotations\n def __init__(self, coeffs:asarray, points:asarray, ngrad:types.strictint=0):\n if points.ndim < 1:\n raise ValueError('argument `points` should have at least one axis')\n if not points.shape[-1].isconstant:\n raise ValueError('the last axis of argument `points` should be a constant integer')\n self.points_ndim = int(points.shape[-1])\n ndim = coeffs.ndim - self.points_ndim\n if ndim < 0:\n raise ValueError('argument `coeffs` should have at least one axis per spatial dimension')\n self.coeffs = coeffs\n self.points = points\n self.ngrad = ngrad\n super().__init__(args=[points, coeffs], shape=points.shape[:-1]+coeffs.shape[:ndim]+(self.points_ndim,)*ngrad, dtype=float)\n\n def evalf(self, points, coeffs):\n for igrad in range(self.ngrad):\n coeffs = numeric.poly_grad(coeffs, self.points_ndim)\n return numeric.poly_eval(coeffs, points)\n\n def _derivative(self, var, seen):\n dpoints = einsum('ABi,AiD->ABD', Polyval(self.coeffs, self.points, self.ngrad+1), derivative(self.points, var, seen), A=self.points.ndim-1)\n dcoeffs = Transpose.from_end(Polyval(Transpose.to_end(derivative(self.coeffs, var, seen), *range(self.coeffs.ndim)), self.points, self.ngrad), *range(self.points.ndim-1, self.ndim))\n return dpoints + dcoeffs\n\n def _take(self, index, axis):\n if axis < self.points.ndim - 1:\n return Polyval(self.coeffs, _take(self.points, index, axis), self.ngrad)\n elif axis < self.points.ndim - 1 + self.coeffs.ndim - self.points_ndim:\n return Polyval(_take(self.coeffs, index, axis - self.points.ndim + 1), self.points, self.ngrad)\n\n def _const_helper(self, *j):\n if len(j) == self.ngrad:\n coeffs = self.coeffs\n for i in reversed(range(self.points_ndim)):\n p = builtins.sum(k==i for k in j)\n coeffs = math.factorial(p)*get(coeffs, i+self.coeffs.ndim-self.points_ndim, p)\n return coeffs\n else:\n return stack([self._const_helper(*j, k) for k in range(self.points_ndim)], axis=self.coeffs.ndim-self.points_ndim+self.ngrad-len(j)-1)\n\n def _simplified(self):\n degree = 0 if self.points_ndim == 0 else self.coeffs.shape[-1]-1 if isinstance(self.coeffs.shape[-1], int) else float('inf')\n if iszero(self.coeffs) or self.ngrad > degree:\n return zeros_like(self)\n elif self.ngrad == degree:\n return prependaxes(self._const_helper(), self.points.shape[:-1])\n points, where = unalign(self.points)\n if points.ndim < self.points.ndim and set(where) != set(range(self.points.ndim-1)):\n if self.points.ndim - 1 not in where:\n points = InsertAxis(points, self.points.shape[-1])\n where += self.points.ndim - 1,\n elif where[-1] != self.points.ndim - 1:\n points = Transpose(points, numpy.argsort(where))\n where = tuple(sorted(where))\n where = where[:-1] + tuple(range(self.points.ndim - 1, self.ndim))\n return align(Polyval(self.coeffs, points, self.ngrad), where, self.shape)\n\nclass PolyOuterProduct(Array):\n\n def __init__(self, left, right):\n nleft = left.shape[1]\n assert all(n == nleft for n in left.shape[2:])\n nright = right.shape[1]\n assert all(n == nright for n in right.shape[2:])\n shape = (left.shape[0] * right.shape[0],) + (nleft + nright - 1,) * (left.ndim + right.ndim - 2)\n super().__init__(args=[left, right], shape=shape, dtype=float)\n\n def evalf(self, left, right):\n return numeric.poly_outer_product(left, right)\n\nclass Legendre(Array):\n '''Series of Legendre polynomial up to and including the given degree.\n\n Parameters\n ---------\n x : :class:`Array`\n The coordinates to evaluate the series at.\n degree : :class:`int`\n The degree of the last polynomial of the series.\n '''\n\n def __init__(self, x: Array, degree: int) -> None:\n assert x.dtype == float\n self._x = x\n self._degree = degree\n super().__init__(args=(x,), shape=(*x.shape, degree+1), dtype=float)\n\n def evalf(self, x: numpy.ndarray) -> numpy.ndarray:\n P = numpy.empty((*x.shape, self._degree+1), dtype=float)\n P[...,0] = 1\n if self._degree:\n P[...,1] = x\n for i in range(2, self._degree+1):\n P[...,i] = (2-1/i)*P[...,1]*P[...,i-1] - (1-1/i)*P[...,i-2]\n return P\n\n def _derivative(self, var, seen):\n d = numpy.zeros((self._degree+1,)*2, dtype=int)\n for i in range(self._degree+1):\n d[i,i+1::2] = 2*i+1\n dself = einsum('Ai,ij->Aj', self, d)\n return einsum('Ai,AB->AiB', dself, derivative(self._x, var, seen))\n\n def _simplified(self):\n unaligned, where = unalign(self._x)\n if where != tuple(range(self._x.ndim)):\n return align(Legendre(unaligned, self._degree), (*where, self.ndim-1), self.shape)\n\n def _takediag(self, axis1, axis2):\n if axis1 < self.ndim - 1 and axis2 < self.ndim - 1:\n return Transpose.to_end(Legendre(_takediag(self._x, axis1, axis2), self._degree), -2)\n\n def _take(self, index, axis):\n if axis < self.ndim - 1:\n return Legendre(_take(self._x, index, axis), self._degree)\n\n def _unravel(self, axis, shape):\n if axis < self.ndim - 1:\n return Legendre(unravel(self._x, axis, shape), self._degree)\n\nclass Choose(Array):\n '''Function equivalent of :func:`numpy.choose`.'''\n\n @types.apply_annotations\n def __init__(self, index:asarray, choices:asarrays):\n if index.dtype != int:\n raise Exception('index must be integer valued')\n dtype = choices[0].dtype\n if any(choice.dtype != dtype for choice in choices[1:]):\n raise Exception('dtypes vary')\n shape = index.shape\n if not all(equalshape(choice.shape, shape) for choice in choices):\n raise Exception('shapes vary')\n self.index = index\n self.choices = choices\n super().__init__(args=(index,)+choices, shape=shape, dtype=dtype)\n\n def evalf(self, index, *choices):\n return numpy.choose(index, choices)\n\n def _derivative(self, var, seen):\n return Choose(appendaxes(self.index, var.shape), [derivative(choice, var, seen) for choice in self.choices])\n\n def _simplified(self):\n if all(choice == self.choices[0] for choice in self.choices[1:]):\n return self.choices[0]\n index, *choices, where = unalign(self.index, *self.choices)\n if len(where) < self.ndim:\n return align(Choose(index, choices), where, self.shape)\n\n def _multiply(self, other):\n if isinstance(other, Choose) and self.index == other.index:\n return Choose(self.index, map(multiply, self.choices, other.choices))\n\n def _get(self, i, item):\n return Choose(get(self.index, i, item), [get(choice, i, item) for choice in self.choices])\n\n def _sum(self, axis):\n unaligned, where = unalign(self.index)\n if axis not in where:\n index = align(unaligned, [i-(i>axis) for i in where], self.shape[:axis]+self.shape[axis+1:])\n return Choose(index, [sum(choice, axis) for choice in self.choices])\n\n def _take(self, index, axis):\n return Choose(_take(self.index, index, axis), [_take(choice, index, axis) for choice in self.choices])\n\n def _takediag(self, axis, rmaxis):\n return Choose(takediag(self.index, axis, rmaxis), [takediag(choice, axis, rmaxis) for choice in self.choices])\n\n def _product(self):\n unaligned, where = unalign(self.index)\n if self.ndim-1 not in where:\n index = align(unaligned, where, self.shape[:-1])\n return Choose(index, [Product(choice) for choice in self.choices])\n\nclass NormDim(Array):\n\n @types.apply_annotations\n def __init__(self, length: asarray, index: asarray):\n assert length.dtype == int\n assert index.dtype == int\n assert equalshape(length.shape, index.shape)\n # The following corner cases makes the assertion fail, hence we can only\n # assert the bounds if the arrays are guaranteed to be unempty:\n #\n # Take(func, NormDim(func.shape[-1], Range(0) + func.shape[-1]))\n if all(n._intbounds[0] > 0 for n in index.shape):\n assert -length._intbounds[1] <= index._intbounds[0] and index._intbounds[1] <= length._intbounds[1] - 1\n self.length = length\n self.index = index\n super().__init__(args=[length, index], shape=index.shape, dtype=index.dtype)\n\n def evalf(self, length, index):\n assert length.shape == index.shape\n assert length.dtype.kind == 'i'\n assert index.dtype.kind == 'i'\n result = numpy.empty(index.shape, dtype=int)\n for i in numpy.ndindex(index.shape):\n result[i] = numeric.normdim(length[i], index[i])\n return result\n\n def _simplified(self):\n lower_length, upper_length = self.length._intbounds\n lower_index, upper_index = self.index._intbounds\n if 0 <= lower_index and upper_index < lower_length:\n return self.index\n if isinstance(lower_length, int) and lower_length == upper_length and -lower_length <= lower_index and upper_index < 0:\n return self.index + lower_length\n if self.length.isconstant and self.index.isconstant:\n return Constant(self.eval())\n\n def _intbounds_impl(self):\n lower_length, upper_length = self.length._intbounds\n lower_index, upper_index = self.index._intbounds\n if lower_index >= 0:\n return min(lower_index, upper_length - 1), min(upper_index, upper_length - 1)\n elif upper_index < 0 and isinstance(lower_length, int) and lower_length == upper_length:\n return max(lower_index + lower_length, 0), max(upper_index + lower_length, 0)\n else:\n return 0, upper_length - 1\n\nclass _LoopIndex(Argument):\n\n __slots__ = 'length'\n\n @types.apply_annotations\n def __init__(self, name: types.strictstr, length: asindex):\n self.length = length\n super().__init__(name, (), int)\n\n def __str__(self):\n try:\n length = self.length.__index__()\n except EvaluationError:\n length = '?'\n return 'LoopIndex({}, length={})'.format(self._name, length)\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n cache[self] = node = RegularNode('LoopIndex', (), dict(length=self.length._node(cache, subgraph, times)), (type(self).__name__, _Stats()), subgraph)\n return node\n\n def _intbounds_impl(self):\n lower_length, upper_length = self.length._intbounds\n return 0, max(0, upper_length - 1)\n\n def _simplified(self):\n if equalindex(self.length, 1):\n return Zeros((), int)\n\nclass LoopSum(Array):\n\n __cache__ = '_serialized'\n\n def prepare_funcdata(arg):\n # separate shape from array to make it simplifiable (annotations are\n # treated as preprocessor, which means the processed value is returned by\n # self.__reduce__)\n if isinstance(arg, tuple):\n return arg\n arg = asarray(arg)\n return (arg, *arg.shape)\n\n @types.apply_annotations\n def __init__(self, funcdata:prepare_funcdata, index_name:types.strictstr, length:asindex):\n shape = Tuple(funcdata[1:])\n self.index = loop_index(index_name, length)\n if self.index in shape.arguments:\n raise ValueError('the shape of the function must not depend on the index')\n self.func = funcdata[0]\n assert self.func.dtype != bool\n self._invariants, self._dependencies = _dependencies_sans_invariants(self.func, self.index)\n super().__init__(args=(shape, length, *self._invariants), shape=self.func.shape, dtype=self.func.dtype)\n\n @property\n def _serialized(self):\n indices = {d: i for i, d in enumerate(itertools.chain([self.index], self._invariants, self._dependencies))}\n return tuple((dep, tuple(map(indices.__getitem__, dep._Evaluable__args))) for dep in self._dependencies)\n\n def evalf(self, shape, length, *args):\n serialized = self._serialized\n result = numpy.zeros(shape, self.dtype)\n for index in range(length):\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf(*[values[i] for i in indices]) for op, indices in serialized)\n result += values[-1]\n return result\n\n def evalf_withtimes(self, times, shape, length, *args):\n serialized = self._serialized\n subtimes = times.setdefault(self, collections.defaultdict(_Stats))\n result = numpy.zeros(shape, self.dtype)\n for index in range(length):\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf_withtimes(subtimes, *[values[i] for i in indices]) for op, indices in serialized)\n result += values[-1]\n return result\n\n def _derivative(self, var, seen):\n return loop_sum(derivative(self.func, var, seen), self.index)\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n subcache = {}\n for arg in self._Evaluable__args:\n subcache[arg] = arg._node(cache, subgraph, times)\n loopgraph = Subgraph('Loop', subgraph)\n subtimes = times.get(self, collections.defaultdict(_Stats))\n sum_kwargs = {'shape[{}]'.format(i): n._node(cache, subgraph, times) for i, n in enumerate(self.shape)}\n sum_kwargs['func'] = self.func._node(subcache, loopgraph, subtimes)\n cache[self] = node = RegularNode('LoopSum', (), sum_kwargs, (type(self).__name__, subtimes['sum']), loopgraph)\n return node\n\n def _simplified(self):\n if iszero(self.func):\n return zeros_like(self)\n elif self.index not in self.func.arguments:\n return self.func * self.index.length\n return self.func._loopsum(self.index)\n\n def _takediag(self, axis1, axis2):\n return loop_sum(_takediag(self.func, axis1, axis2), self.index)\n\n def _take(self, index, axis):\n return loop_sum(_take(self.func, index, axis), self.index)\n\n def _unravel(self, axis, shape):\n return loop_sum(unravel(self.func, axis, shape), self.index)\n\n def _sum(self, axis):\n return loop_sum(sum(self.func, axis), self.index)\n\n def _add(self, other):\n if isinstance(other, LoopSum) and other.index == self.index:\n return loop_sum(self.func + other.func, self.index)\n\n def _multiply(self, other):\n return loop_sum(self.func * other, self.index)\n\n @property\n def _assparse(self):\n chunks = []\n for *elem_indices, elem_values in self.func._assparse:\n if self.ndim == 0:\n values = loop_concatenate(InsertAxis(elem_values, 1), self.index)\n while values.ndim:\n values = Sum(values)\n chunks.append((values,))\n else:\n if elem_values.ndim == 0:\n *elem_indices, elem_values = (InsertAxis(arr, 1) for arr in (*elem_indices, elem_values))\n else:\n # minimize ravels by transposing all variable length axes to the end\n variable = tuple(i for i, n in enumerate(elem_values.shape) if self.index in n.arguments)\n *elem_indices, elem_values = (Transpose.to_end(arr, *variable) for arr in (*elem_indices, elem_values))\n for i in variable[:-1]:\n *elem_indices, elem_values = map(Ravel, (*elem_indices, elem_values))\n assert all(self.index not in n.arguments for n in elem_values.shape[:-1])\n chunks.append(tuple(loop_concatenate(arr, self.index) for arr in (*elem_indices, elem_values)))\n return tuple(chunks)\n\nclass _SizesToOffsets(Array):\n\n def __init__(self, sizes):\n assert sizes.ndim == 1\n assert sizes.dtype == int\n assert sizes._intbounds[0] >= 0\n self._sizes = sizes\n super().__init__(args=[sizes], shape=(sizes.shape[0]+1,), dtype=int)\n\n def evalf(self, sizes):\n return numpy.cumsum([0, *sizes])\n\n def _simplified(self):\n unaligned, where = unalign(self._sizes)\n if not where:\n return Range(self.shape[0]) * appendaxes(unaligned, self.shape[:1])\n\n def _intbounds_impl(self):\n n = self._sizes.size._intbounds[1]\n m = self._sizes._intbounds[1]\n return 0, (0 if n == 0 or m == 0 else n * m)\n\nclass LoopConcatenate(Array):\n\n @types.apply_annotations\n def __init__(self, funcdata:asarrays, index_name:types.strictstr, length:asindex):\n self.funcdata = funcdata\n self.func, self.start, stop, *shape = funcdata\n self.index = loop_index(index_name, length)\n if not self.func.ndim:\n raise ValueError('expected an array with at least one axis')\n if any(self.index in n.arguments for n in shape):\n raise ValueError('the shape of the function must not depend on the index')\n self._lcc = LoopConcatenateCombined((self.funcdata,), index_name, length)\n super().__init__(args=[self._lcc], shape=shape, dtype=self.func.dtype)\n\n def evalf(self, arg):\n return arg[0]\n\n def evalf_withtimes(self, times, arg):\n with times[self]:\n return arg[0]\n\n def _derivative(self, var, seen):\n return Transpose.from_end(loop_concatenate(Transpose.to_end(derivative(self.func, var, seen), self.ndim-1), self.index), self.ndim-1)\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n else:\n cache[self] = node = self._lcc._node_tuple(cache, subgraph, times)[0]\n return node\n\n def _simplified(self):\n if iszero(self.func):\n return zeros_like(self)\n elif self.index not in self.func.arguments:\n return Ravel(Transpose.from_end(InsertAxis(self.func, self.index.length), -2))\n unaligned, where = unalign(self.func)\n if self.ndim-1 not in where:\n # reinsert concatenation axis, at unit length if possible so we can\n # insert the remainder outside of the loop\n unaligned = InsertAxis(unaligned, self.func.shape[-1] if self.index in self.func.shape[-1].arguments else 1)\n where += self.ndim-1,\n elif where[-1] != self.ndim-1:\n # bring concatenation axis to the end\n unaligned = Transpose(unaligned, numpy.argsort(where))\n where = tuple(sorted(where))\n f = loop_concatenate(unaligned, self.index)\n if not equalindex(self.shape[-1], f.shape[-1]):\n # last axis was reinserted at unit length AND it was not unit length\n # originally - if it was unit length originally then we proceed only if\n # there are other insertions to promote, otherwise we'd get a recursion.\n f = Ravel(InsertAxis(f, self.func.shape[-1]))\n elif len(where) == self.ndim:\n return\n return align(f, where, self.shape)\n\n def _takediag(self, axis1, axis2):\n if axis1 < self.ndim-1 and axis2 < self.ndim-1:\n return Transpose.from_end(loop_concatenate(Transpose.to_end(_takediag(self.func, axis1, axis2), -2), self.index), -2)\n\n def _take(self, index, axis):\n if axis < self.ndim-1:\n return loop_concatenate(_take(self.func, index, axis), self.index)\n\n def _unravel(self, axis, shape):\n if axis < self.ndim-1:\n return loop_concatenate(unravel(self.func, axis, shape), self.index)\n\n @property\n def _assparse(self):\n chunks = []\n for *indices, last_index, values in self.func._assparse:\n last_index = last_index + prependaxes(self.start, last_index.shape)\n chunks.append(tuple(loop_concatenate(_flat(arr), self.index) for arr in (*indices, last_index, values)))\n return tuple(chunks)\n\n @property\n def _loop_concatenate_deps(self):\n return (self,) + super()._loop_concatenate_deps\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass LoopConcatenateCombined(Evaluable):\n\n __cache__ = '_serialized'\n\n @types.apply_annotations\n def __init__(self, funcdatas:types.tuple[asarrays], index_name:types.strictstr, length:asindex):\n self._funcdatas = funcdatas\n self._funcs = tuple(func for func, start, stop, *shape in funcdatas)\n self._index_name = index_name\n self._index = loop_index(index_name, length)\n if any(not func.ndim for func in self._funcs):\n raise ValueError('expected an array with at least one axis')\n shapes = [Tuple(shape) for func, start, stop, *shape in funcdatas]\n if any(self._index in shape.arguments for shape in shapes):\n raise ValueError('the shape of the function must not depend on the index')\n self._invariants, self._dependencies = _dependencies_sans_invariants(\n Tuple([Tuple([start, stop, func]) for func, start, stop, *shape in funcdatas]), self._index)\n super().__init__(args=(Tuple(shapes), length, *self._invariants))\n\n @property\n def _serialized(self):\n indices = {d: i for i, d in enumerate(itertools.chain([self._index], self._invariants, self._dependencies))}\n return tuple((dep, tuple(map(indices.__getitem__, dep._Evaluable__args))) for dep in self._dependencies)\n\n def evalf(self, shapes, length, *args):\n serialized = self._serialized\n results = [parallel.shempty(tuple(map(int, shape)), dtype=func.dtype) for func, shape in zip(self._funcs, shapes)]\n with parallel.ctxrange('loop {}'.format(self._index_name), int(length)) as indices:\n for index in indices:\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf(*[values[i] for i in indices]) for op, indices in serialized)\n for result, (start, stop, block) in zip(results, values[-1]):\n result[...,start:stop] = block\n return tuple(results)\n\n def evalf_withtimes(self, times, shapes, length, *args):\n serialized = self._serialized\n subtimes = times.setdefault(self, collections.defaultdict(_Stats))\n results = [parallel.shempty(tuple(map(int, shape)), dtype=func.dtype) for func, shape in zip(self._funcs, shapes)]\n for index in range(length):\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf_withtimes(subtimes, *[values[i] for i in indices]) for op, indices in serialized)\n for func, result, (start, stop, block) in zip(self._funcs, results, values[-1]):\n with subtimes['concat', func]:\n result[...,start:stop] = block\n return tuple(results)\n\n def _node_tuple(self, cache, subgraph, times):\n if (self, 'tuple') in cache:\n return cache[self, 'tuple']\n subcache = {}\n for arg in self._invariants:\n subcache[arg] = arg._node(cache, subgraph, times)\n loopgraph = Subgraph('Loop', subgraph)\n subtimes = times.get(self, collections.defaultdict(_Stats))\n concats = []\n for func, start, stop, *shape in self._funcdatas:\n concat_kwargs = {'shape[{}]'.format(i): n._node(cache, subgraph, times) for i, n in enumerate(shape)}\n concat_kwargs['start'] = start._node(subcache, loopgraph, subtimes)\n concat_kwargs['stop'] = stop._node(subcache, loopgraph, subtimes)\n concat_kwargs['func'] = func._node(subcache, loopgraph, subtimes)\n concats.append(RegularNode('LoopConcatenate', (), concat_kwargs, (type(self).__name__, subtimes['concat', func]), loopgraph))\n cache[self, 'tuple'] = concats = tuple(concats)\n return concats\n\n# AUXILIARY FUNCTIONS (FOR INTERNAL USE)\n\n_ascending = lambda arg: numpy.greater(numpy.diff(arg), 0).all()\n_normdims = lambda ndim, shapes: tuple(numeric.normdim(ndim,sh) for sh in shapes)\n\ndef _gatherblocks(blocks):\n return tuple((ind, util.sum(funcs)) for ind, funcs in util.gather(blocks))\n\ndef _gathersparsechunks(chunks):\n return tuple((*ind, util.sum(funcs)) for ind, funcs in util.gather((tuple(ind), func) for *ind, func in chunks))\n\ndef _numpy_align(a, b):\n '''check shape consistency and inflate scalars'''\n\n a = asarray(a)\n b = asarray(b)\n if a.dtype != b.dtype:\n type_order = bool, int, float, complex\n if type_order.index(a.dtype) < type_order.index(b.dtype):\n a = astype[b.dtype](a)\n else:\n b = astype[a.dtype](b)\n if not a.ndim:\n return _inflate_scalar(a, b.shape), b\n if not b.ndim:\n return a, _inflate_scalar(b, a.shape)\n if equalshape(a.shape, b.shape):\n return a, b\n raise ValueError('incompatible shapes: {} != {}'.format(*[tuple(int(n) if n.isconstant else n for n in arg.shape) for arg in (a, b)]))\n\ndef _inflate_scalar(arg, shape):\n arg = asarray(arg)\n assert arg.ndim == 0\n for idim, length in enumerate(shape):\n arg = insertaxis(arg, idim, length)\n return arg\n\ndef _isunique(array):\n return numpy.unique(array).size == array.size\n\ndef _dependencies_sans_invariants(func, arg):\n invariants = []\n dependencies = []\n _populate_dependencies_sans_invariants(func, arg, invariants, dependencies, {arg})\n assert (dependencies or invariants or [arg])[-1] == func\n return tuple(invariants), tuple(dependencies)\n\ndef _populate_dependencies_sans_invariants(func, arg, invariants, dependencies, cache):\n if func in cache:\n return\n cache.add(func)\n if arg in func.arguments:\n for child in func._Evaluable__args:\n _populate_dependencies_sans_invariants(child, arg, invariants, dependencies, cache)\n dependencies.append(func)\n else:\n invariants.append(func)\n\nclass _Stats:\n\n __slots__ = 'ncalls', 'time', '_start'\n\n def __init__(self, ncalls: int = 0, time: int = 0) -> None:\n self.ncalls = ncalls\n self.time = time\n self._start = None\n\n def __repr__(self):\n return '_Stats(ncalls={}, time={})'.format(self.ncalls, self.time)\n\n def __add__(self, other):\n if not isinstance(other, _Stats):\n return NotImplemented\n return _Stats(self.ncalls+other.ncalls, self.time+other.time)\n\n def __enter__(self) -> None:\n self._start = time.perf_counter_ns()\n\n def __exit__(self, *exc_info) -> None:\n self.time += time.perf_counter_ns() - self._start\n self.ncalls += 1\n\n# FUNCTIONS\n\ndef isarray(arg):\n return isinstance(arg, Array)\n\ndef _containsarray(arg):\n return any(map(_containsarray, arg)) if isinstance(arg, (list, tuple)) else isarray(arg)\n\ndef iszero(arg):\n return isinstance(arg.simplified, Zeros)\n\ndef zeros(shape, dtype=float):\n return Zeros(shape, dtype)\n\ndef zeros_like(arr):\n return zeros(arr.shape, arr.dtype)\n\ndef isuniform(arg, value):\n unaligned, where = unalign(arg)\n return not where and isinstance(unaligned, Constant) and unaligned.value[()] == value\n\ndef ones(shape, dtype=float):\n return _inflate_scalar(numpy.ones((), dtype=dtype), shape)\n\ndef ones_like(arr):\n return ones(arr.shape, arr.dtype)\n\ndef reciprocal(arg):\n return power(arg, -1.)\n\ndef negative(arg):\n return multiply(arg, -1)\n\ndef sin(x):\n return Sin(x)\n\ndef cos(x):\n return Cos(x)\n\ndef tan(x):\n return Tan(x)\n\ndef arcsin(x):\n return ArcSin(x)\n\ndef arccos(x):\n return ArcCos(x)\n\ndef arctan(x):\n return ArcTan(x)\n\ndef exp(x):\n return Exp(x)\n\ndef ln(x):\n return Log(x)\n\ndef divmod(x, y):\n div = FloorDivide(*_numpy_align(x, y))\n mod = x - div * y\n return div, mod\n\ndef mod(arg1, arg2):\n return Mod(*_numpy_align(arg1, arg2))\n\ndef log2(arg):\n return ln(arg) / ln(2)\n\ndef log10(arg):\n return ln(arg) / ln(10)\n\ndef sqrt(arg):\n return power(arg, .5)\n\ndef arctan2(arg1, arg2):\n return ArcTan2(*_numpy_align(arg1, arg2))\n\ndef abs(arg):\n return arg * sign(arg)\n\ndef sinh(arg):\n return .5 * (exp(arg) - exp(-arg))\n\ndef cosh(arg):\n return .5 * (exp(arg) + exp(-arg))\n\ndef tanh(arg):\n return 1 - 2. / (exp(2*arg) + 1)\n\ndef arctanh(arg):\n return .5 * (ln(1+arg) - ln(1-arg))\n\ndef divide(arg1, arg2):\n return multiply(arg1, reciprocal(arg2))\n\ndef subtract(arg1, arg2):\n return add(arg1, negative(arg2))\n\ndef insertaxis(arg, n, length):\n return Transpose.from_end(InsertAxis(arg, length), n)\n\ndef concatenate(args, axis=0):\n lengths = [arg.shape[axis] for arg in args]\n *offsets, totlength = util.cumsum(lengths + [0])\n return Transpose.from_end(util.sum(Inflate(Transpose.to_end(arg, axis), Range(length) + offset, totlength) for arg, length, offset in zip(args, lengths, offsets)), axis)\n\ndef stack(args, axis=0):\n return Transpose.from_end(util.sum(Inflate(arg, i, len(args)) for i, arg in enumerate(args)), axis)\n\ndef repeat(arg, length, axis):\n arg = asarray(arg)\n assert equalindex(arg.shape[axis], 1)\n return insertaxis(get(arg, axis, 0), axis, length)\n\ndef get(arg, iax, item):\n if numeric.isint(item):\n if numeric.isint(arg.shape[iax]):\n item = numeric.normdim(arg.shape[iax], item)\n else:\n assert item >= 0\n return Take(Transpose.to_end(arg, iax), item)\n\ndef determinant(arg, axes=(-2,-1)):\n return Determinant(Transpose.to_end(arg, *axes))\n\ndef grammium(arg, axes=(-2,-1)):\n arg = Transpose.to_end(arg, *axes)\n grammium = einsum('Aki,Akj->Aij', arg, arg)\n return Transpose.from_end(grammium, *axes)\n\ndef sqrt_abs_det_gram(arg, axes=(-2,-1)):\n arg = Transpose.to_end(arg, *axes)\n if equalindex(arg.shape[-1], arg.shape[-2]):\n return abs(Determinant(arg))\n else:\n return sqrt(abs(Determinant(grammium(arg))))\n\ndef inverse(arg, axes=(-2,-1)):\n return Transpose.from_end(Inverse(Transpose.to_end(arg, *axes)), *axes)\n\ndef takediag(arg, axis=-2, rmaxis=-1):\n arg = asarray(arg)\n axis = numeric.normdim(arg.ndim, axis)\n rmaxis = numeric.normdim(arg.ndim, rmaxis)\n assert axis < rmaxis\n return Transpose.from_end(_takediag(arg, axis, rmaxis), axis)\n\ndef _takediag(arg, axis1=-2, axis2=-1):\n return TakeDiag(Transpose.to_end(arg, axis1, axis2))\n\ndef derivative(func, var, seen=None):\n 'derivative'\n\n assert isinstance(var, DerivativeTargetBase), 'invalid derivative target {!r}'.format(var)\n if var.dtype != float or var not in func.arguments:\n return Zeros(func.shape + var.shape, dtype=func.dtype)\n if seen is None:\n seen = {}\n if func in seen:\n result = seen[func]\n else:\n result = func._derivative(var, seen)\n seen[func] = result\n assert equalshape(result.shape, func.shape+var.shape), 'bug in {}._derivative'.format(type(func).__name__)\n return result\n\ndef diagonalize(arg, axis=-1, newaxis=-1):\n arg = asarray(arg)\n axis = numeric.normdim(arg.ndim, axis)\n newaxis = numeric.normdim(arg.ndim+1, newaxis)\n assert axis < newaxis\n return Transpose.from_end(Diagonalize(Transpose.to_end(arg, axis)), axis, newaxis)\n\ndef sign(arg):\n arg = asarray(arg)\n return Sign(arg)\n\ndef eig(arg, axes=(-2,-1), symmetric=False):\n eigval, eigvec = Eig(Transpose.to_end(arg, *axes), symmetric)\n return Tuple(Transpose.from_end(v, *axes) for v in [diagonalize(eigval), eigvec])\n\[email protected]_annotations\ndef _takeslice(arg:asarray, s:types.strict[slice], axis:types.strictint):\n n = arg.shape[axis]\n if s.step == None or s.step == 1:\n start = 0 if s.start is None else s.start if s.start >= 0 else s.start + n\n stop = n if s.stop is None else s.stop if s.stop >= 0 else s.stop + n\n if start == 0 and stop == n:\n return arg\n index = Range(stop-start) + start\n elif n.isconstant:\n index = Constant(numpy.arange(*s.indices(arg.shape[axis])))\n else:\n raise Exception('a non-unit slice requires a constant-length axis')\n return take(arg, index, axis)\n\[email protected]_annotations\ndef take(arg:asarray, index:asarray, axis:types.strictint):\n assert index.ndim == 1\n length = arg.shape[axis]\n if index.dtype == bool:\n assert equalindex(index.shape[0], length)\n index = Find(index)\n elif index.isconstant:\n index_ = index.eval()\n ineg = numpy.less(index_, 0)\n if not length.isconstant:\n if ineg.any():\n raise IndexError('negative indices only allowed for constant-length axes')\n elif ineg.any():\n if numpy.less(index_, -int(length)).any():\n raise IndexError('indices out of bounds: {} < {}'.format(index_, -int(length)))\n return _take(arg, Constant(index_ + ineg * int(length)), axis)\n elif numpy.greater_equal(index_, int(length)).any():\n raise IndexError('indices out of bounds: {} >= {}'.format(index_, int(length)))\n elif numpy.greater(numpy.diff(index_), 0).all():\n return mask(arg, numeric.asboolean(index_, int(length)), axis)\n return _take(arg, index, axis)\n\[email protected]_annotations\ndef _take(arg:asarray, index:asarray, axis:types.strictint):\n axis = numeric.normdim(arg.ndim, axis)\n return Transpose.from_end(Take(Transpose.to_end(arg, axis), index), *range(axis, axis+index.ndim))\n\[email protected]_annotations\ndef _inflate(arg:asarray, dofmap:asarray, length:asindex, axis:types.strictint):\n axis = numeric.normdim(arg.ndim+1-dofmap.ndim, axis)\n assert equalshape(dofmap.shape, arg.shape[axis:axis+dofmap.ndim])\n return Transpose.from_end(Inflate(Transpose.to_end(arg, *range(axis, axis+dofmap.ndim)), dofmap, length), axis)\n\ndef mask(arg, mask, axis=0):\n return take(arg, mask, axis)\n\ndef unravel(func, axis, shape):\n func = asarray(func)\n axis = numeric.normdim(func.ndim, axis)\n assert len(shape) == 2\n return Transpose.from_end(Unravel(Transpose.to_end(func, axis), *shape), axis, axis+1)\n\ndef ravel(func, axis):\n func = asarray(func)\n axis = numeric.normdim(func.ndim-1, axis)\n return Transpose.from_end(Ravel(Transpose.to_end(func, axis, axis+1)), axis)\n\ndef _flat(func):\n func = asarray(func)\n if func.ndim == 0:\n return InsertAxis(func, 1)\n while func.ndim > 1:\n func = Ravel(func)\n return func\n\ndef prependaxes(func, shape):\n 'Prepend axes with specified `shape` to `func`.'\n\n func = asarray(func)\n for i, n in enumerate(shape):\n func = insertaxis(func, i, n)\n return func\n\ndef appendaxes(func, shape):\n 'Append axes with specified `shape` to `func`.'\n\n func = asarray(func)\n for n in shape:\n func = InsertAxis(func, n)\n return func\n\ndef loop_index(name, length):\n return _LoopIndex(name, length)\n\ndef loop_sum(func, index):\n func = asarray(func)\n index = types.strict[_LoopIndex](index)\n return LoopSum(func, index._name, index.length)\n\ndef _loop_concatenate_data(func, index):\n func = asarray(func)\n index = types.strict[_LoopIndex](index)\n chunk_size = func.shape[-1]\n if chunk_size.isconstant:\n chunk_sizes = InsertAxis(chunk_size, index.length)\n else:\n chunk_sizes = loop_concatenate(InsertAxis(func.shape[-1], 1), index)\n offsets = _SizesToOffsets(chunk_sizes)\n start = Take(offsets, index)\n stop = Take(offsets, index+1)\n return (func, start, stop, *func.shape[:-1], Take(offsets, index.length))\n\ndef loop_concatenate(func, index):\n funcdata = _loop_concatenate_data(func, index)\n return LoopConcatenate(funcdata, index._name, index.length)\n\ndef loop_concatenate_combined(funcs, index):\n unique_funcs = []\n unique_funcs.extend(func for func in funcs if func not in unique_funcs)\n unique_func_data = tuple(_loop_concatenate_data(func, index) for func in unique_funcs)\n loop = LoopConcatenateCombined(unique_func_data, index._name, index.length)\n return tuple(ArrayFromTuple(loop, unique_funcs.index(func), shape, func.dtype) for func, start, stop, *shape in unique_func_data)\n\n@replace\ndef replace_arguments(value, arguments):\n '''Replace :class:`Argument` objects in ``value``.\n\n Replace :class:`Argument` objects in ``value`` according to the ``arguments``\n map, taking into account derivatives to the local coordinates.\n\n Args\n ----\n value : :class:`Array`\n Array to be edited.\n arguments : :class:`collections.abc.Mapping` with :class:`Array`\\\\s as values\n :class:`Argument`\\\\s replacements. The key correspond to the ``name``\n passed to an :class:`Argument` and the value is the replacement.\n\n Returns\n -------\n :class:`Array`\n The edited ``value``.\n '''\n if isinstance(value, Argument) and value._name in arguments:\n v = asarray(arguments[value._name])\n assert equalshape(value.shape, v.shape), (value.shape, v.shape)\n assert value.dtype == v.dtype, (value.dtype, v.dtype)\n return v\n\ndef einsum(fmt, *args, **dims):\n '''Multiply and/or contract arrays via format string.\n\n The format string consists of a comma separated list of axis labels, followed\n by ``->`` and the axis labels of the return value. For example, the following\n swaps the axes of a matrix:\n\n >>> einsum('ij->ji', ones([2,3]))\n nutils.evaluable.Transpose<f:3,2>\n\n Axis labels that do not occur in the return value are summed. For example,\n the following performs a dot product of three matrices:\n\n >>> einsum('ij,jk,kl->il', ones([2,3]), ones([3,4]), ones([4,5]))\n nutils.evaluable.Sum<f:2,5>\n\n In case the dimension of the input and output arrays may vary, a variable\n length axes group can be denoted by a capital. Its length is automatically\n established based on the dimension of the input arrays. The following example\n performs a tensor product of an array and a vector:\n\n >>> einsum('A,i->Ai', ones([2,3,4]), ones([5]))\n nutils.evaluable.Multiply<f:2,3,4,5>\n\n The format string may contain multiple variable length axes groups, but their\n lengths must be resolvable from left to right. In case this is not possible,\n lengths may be specified as keyword arguments.\n\n >>> einsum('AjB,i->AijB', ones([2,3,4]), ones([5]), B=1)\n nutils.evaluable.Multiply<f:2,5,3,4>\n '''\n\n sin, sout = fmt.split('->')\n sin = sin.split(',')\n\n if len(sin) != len(args):\n raise ValueError('number of arguments does not match format string')\n\n if any(len(s) != len(set(s)) for s in (*sin, sout)):\n raise ValueError('internal repetitions are not supported')\n\n if any(n < 0 for n in dims.values()):\n raise ValueError('axis group dimensions cannot be negative')\n\n for c in 'abcdefghijklmnopqrstuvwxyz':\n dims.setdefault(c, 1) # lowercase characters default to single dimension\n\n for s, arg in zip(sin, args):\n missing_dims = arg.ndim - builtins.sum(dims.get(c, 0) for c in s)\n unknown_axes = [c for c in s if c not in dims]\n if len(unknown_axes) == 1 and missing_dims >= 0:\n dims[unknown_axes[0]] = missing_dims\n elif len(unknown_axes) > 1:\n raise ValueError('cannot establish length of variable groups {}'.format(', '.join(unknown_axes)))\n elif missing_dims:\n raise ValueError('argument dimensions are inconsistent with format string')\n\n # expand characters to match argument dimension\n *sin, sout = [[(c, d) for c in s for d in range(dims[c])] for s in (*sin, sout)]\n sall = sout + sorted({c for s in sin for c in s if c not in sout})\n\n shapes = {}\n for s, arg in zip(sin, args):\n assert len(s) == arg.ndim\n for c, sh in zip(s, arg.shape):\n if not equalindex(shapes.setdefault(c, sh), sh):\n raise ValueError('shapes do not match for axis {0[0]}{0[1]}'.format(c))\n\n ret = None\n for s, arg in zip(sin, args):\n index = {c: i for i, c in enumerate(s)}\n for c in sall:\n if c not in index:\n index[c] = arg.ndim\n arg = InsertAxis(arg, shapes[c])\n v = Transpose(arg, [index[c] for c in sall])\n ret = v if ret is None else ret * v\n for i in range(len(sout), len(sall)):\n ret = Sum(ret)\n return ret\n\[email protected]_or_multiple\ndef eval_sparse(funcs: AsEvaluableArray, **arguments: typing.Mapping[str, numpy.ndarray]) -> typing.Tuple[numpy.ndarray, ...]:\n '''Evaluate one or several Array objects as sparse data.\n\n Args\n ----\n funcs : :class:`tuple` of Array objects\n Arrays to be evaluated.\n arguments : :class:`dict` (default: None)\n Optional arguments for function evaluation.\n\n Returns\n -------\n results : :class:`tuple` of sparse data arrays\n '''\n\n funcs = tuple(func.as_evaluable_array.assparse for func in funcs)\n with Tuple(funcs).optimized_for_numpy.session(graphviz=graphviz) as eval:\n return eval(**arguments)\n\n\nif __name__ == '__main__':\n # Diagnostics for the development for simplify operations.\n simplify_priority = (\n Transpose, Ravel, # reinterpretation\n InsertAxis, Inflate, Diagonalize, # size increasing\n Multiply, Add, LoopSum, Sign, Power, Inverse, Unravel, # size preserving\n Product, Determinant, TakeDiag, Take, Sum) # size decreasing\n # The simplify priority defines the preferred order in which operations are\n # performed: shape decreasing operations such as Sum and Take should be done\n # as soon as possible, and shape increasing operations such as Inflate and\n # Diagonalize as late as possible. In shuffling the order of operations the\n # two classes might annihilate each other, for example when a Sum passes\n # through a Diagonalize. Any shape increasing operations that remain should\n # end up at the surface, exposing sparsity by means of the assparse method.\n attrs = ['_'+cls.__name__.lower() for cls in simplify_priority]\n # The simplify operations responsible for swapping (a.o.) are methods named\n # '_add', '_multiply', etc. In order to avoid recursions the operations\n # should only be defined in the direction defined by operator priority. The\n # following code warns gainst violations of this rule and lists permissible\n # simplifications that have not yet been implemented.\n for i, cls in enumerate(simplify_priority):\n warn = [attr for attr in attrs[:i] if getattr(cls, attr) is not getattr(Array, attr)]\n if warn:\n print('[!] {} should not define {}'.format(cls.__name__, ', '.join(warn)))\n missing = [attr for attr in attrs[i+1:] if not getattr(cls, attr) is not getattr(Array, attr)]\n if missing:\n print('[ ] {} could define {}'.format(cls.__name__, ', '.join(missing)))\n\n# vim:sw=2:sts=2:et\n", "# Copyright (c) 2014 Evalf\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nThe util module provides a collection of general purpose methods.\n\"\"\"\n\nfrom . import numeric\nimport sys, os, numpy, collections.abc, inspect, functools, operator, numbers, pathlib, ctypes, io, contextlib\n\nsupports_outdirfd = os.open in os.supports_dir_fd and os.listdir in os.supports_fd\n\nsum = functools.partial(functools.reduce, operator.add)\nproduct = functools.partial(functools.reduce, operator.mul)\n\ndef cumsum(seq):\n offset = 0\n for i in seq:\n yield offset\n offset += i\n\ndef gather(items):\n gathered = []\n d = {}\n for key, value in items:\n try:\n values = d[key]\n except KeyError:\n d[key] = values = []\n gathered.append((key, values))\n values.append(value)\n return gathered\n\ndef pairwise(items, *, periodic=False):\n items = iter(items)\n try:\n first = a = next(items)\n except StopIteration:\n return\n for b in items:\n yield a, b\n a = b\n if periodic:\n yield a, first\n\ndef allequal(seq1, seq2):\n seq1 = iter(seq1)\n seq2 = iter(seq2)\n for item1, item2 in zip(seq1, seq2):\n if item1 != item2:\n return False\n if list(seq1) or list(seq2):\n return False\n return True\n\nclass NanVec(numpy.ndarray):\n 'nan-initialized vector'\n\n def __new__(cls, length):\n vec = numpy.empty(length, dtype=float).view(cls)\n vec[:] = numpy.nan\n return vec\n\n @property\n def where(self):\n return ~numpy.isnan(self.view(numpy.ndarray))\n\n def __iand__(self, other):\n if self.dtype != float:\n return self.view(numpy.ndarray).__iand__(other)\n where = self.where\n if numpy.isscalar(other):\n self[where] = other\n else:\n assert numeric.isarray(other) and other.shape == self.shape\n self[where] = other[where]\n return self\n\n def __and__(self, other):\n if self.dtype != float:\n return self.view(numpy.ndarray).__and__(other)\n return self.copy().__iand__(other)\n\n def __ior__(self, other):\n if self.dtype != float:\n return self.view(numpy.ndarray).__ior__(other)\n wherenot = ~self.where\n self[wherenot] = other if numpy.isscalar(other) else other[wherenot]\n return self\n\n def __or__(self, other):\n if self.dtype != float:\n return self.view(numpy.ndarray).__or__(other)\n return self.copy().__ior__(other)\n\n def __invert__(self):\n if self.dtype != float:\n return self.view(numpy.ndarray).__invert__()\n nanvec = NanVec(len(self))\n nanvec[numpy.isnan(self)] = 0\n return nanvec\n\ndef regularize(bbox, spacing, xy=numpy.empty((0,2))):\n xy = numpy.asarray(xy)\n index0 = numeric.floor(bbox[:,0] / (2*spacing)) * 2 - 1\n shape = numeric.ceil(bbox[:,1] / (2*spacing)) * 2 + 2 - index0\n index = numeric.round(xy / spacing) - index0\n keep = numpy.logical_and(numpy.greater_equal(index, 0), numpy.less(index, shape)).all(axis=1)\n mask = numpy.zeros(shape, dtype=bool)\n for i, ind in enumerate(index):\n if keep[i]:\n if not mask[tuple(ind)]:\n mask[tuple(ind)] = True\n else:\n keep[i] = False\n coursex = mask[0:-2:2] | mask[1:-1:2] | mask[2::2]\n coarsexy = coursex[:,0:-2:2] | coursex[:,1:-1:2] | coursex[:,2::2]\n vacant, = (~coarsexy).ravel().nonzero()\n newindex = numpy.array(numpy.unravel_index(vacant, coarsexy.shape)).T * 2 + index0 + 1\n return numpy.concatenate([newindex * spacing, xy[keep]], axis=0)\n\ndef tri_merge(tri, x, mergetol=0):\n '''Create connected triangulation by connecting (near) identical points.\n\n Based on a set of coordinates ``x``, create a modified copy of ``tri`` with\n any occurrence of ``j`` replaced by ``i`` if ``x[i]`` equals ``x[j]`` within\n specified tolerance. The result is a triangulation that remains valid for any\n associated data vector that follows the same equality relations.\n\n Example:\n\n >>> x = [0,0], [1,0], [0,1], [1,0], [1,1] # note: x[1] == x[3])\n >>> tri = [0,1,2], [2,3,4]\n >>> tri_merge(tri, x)\n array([[0, 1, 2],\n [2, 1, 4]])\n\n .. requires:: scipy\n\n Args\n ----\n x : :class:`float` array\n Vertex coordinates.\n tri : :class:`int` array\n Triangulation.\n mergetol : :class:`float` (optional, default 0)\n Distance within which two points are considered equal. If mergetol == 0\n then points are considered equal if and only if their coordinates are\n identical. If mergetol > 0 (required scipy) then points are considered\n equal if they are within euclidian distance < mergetol. If mergetol < 0\n then tri is returned unchanged.\n\n Returns\n -------\n merged_tri : :class:`int` array\n '''\n\n tri = numpy.asarray(tri)\n x = numpy.asarray(x)\n assert tri.dtype == int\n assert x.ndim == tri.ndim == 2\n assert tri.shape[1] == x.shape[1] + 1\n if mergetol < 0:\n return tri\n if mergetol == 0:\n order = numpy.lexsort(x.T)\n keep = numpy.concatenate([[True], numpy.diff(x[order], axis=0).any(axis=1)])\n renumber = numpy.empty(len(x), dtype=int)\n renumber[order] = order[keep][keep.cumsum()-1]\n else:\n import scipy.spatial\n renumber = numpy.arange(len(x))\n for i, j in sorted(scipy.spatial.cKDTree(x).query_pairs(mergetol)):\n assert i < j\n renumber[j] = renumber[i]\n return renumber[tri]\n\nclass tri_interpolator:\n '''Interpolate function values defined in triangulation vertices.\n\n Convenience object that implements 2D interpolation on top of matplotlib's\n triangulation routines. Unlike matplotlib's own ``LinearTriInterpolator``,\n the ``tri_interpolator`` allows for interpolation of multi-dimensional\n arrays, as well as repeated interpolations of different vertex values.\n \n The arguments are identical to :func:`tri_merge`.\n\n After instantiation of the interpolator object, interpolation coordinates are\n specified via the object's getitem operator. The resulting callable performs\n the interpolation:\n\n >>> trix = [0,0], [1,0], [0,1], [1,1] # vertex coordinates\n >>> triu = 0, 0, 10, 0 # vertex values\n >>> interpolate = tri_interpolator([[0,1,2],[1,3,2]], trix)\n >>> x = [.1,.1], [.1,.9], [.9,.9] # interpolation coordinates\n >>> u = interpolate[x](triu) # interpolated values\n\n .. requires:: matplotlib\n '''\n\n def __init__(self, tri, x, mergetol=0):\n x = numpy.asarray(x)\n assert x.ndim == 2\n if x.shape[1] != 2:\n raise NotImplementedError('only 2D interpolation is supported for now')\n import matplotlib.tri\n self.mpltri = matplotlib.tri.Triangulation(x[:,0], x[:,1], tri_merge(tri, x, mergetol))\n def __getitem__(self, x):\n x = numpy.asarray(x)\n assert x.shape[-1] == 2\n itri = self.mpltri.get_trifinder()(x[...,0].ravel(), x[...,1].ravel())\n inside = itri != -1\n itri = itri[inside]\n plane_coords = numpy.concatenate([x.reshape(-1, 2)[inside], numpy.ones([len(itri), 1])], axis=1)\n def interpolate(vtri):\n vtri = numpy.asarray(vtri)\n assert vtri.shape[0] == len(self.mpltri.x)\n vx = numpy.empty(x.shape[:-1] + vtri.shape[1:])\n vx[...] = numpy.nan\n for vx_items, vtri_items in zip(vx.reshape(len(inside), -1).T, vtri.reshape(len(vtri), -1).T):\n plane_coeffs = self.mpltri.calculate_plane_coefficients(vtri_items)\n vx_items[inside] = numeric.contract(plane_coords, plane_coeffs[itri], axis=1)\n return vx\n return interpolate\n\nclass linear_regressor:\n def add(self, x, y, weight=.5):\n y = numpy.asarray(y)\n new = numpy.outer([1, x], [x] + list(y.flat))\n (x_, *y_), (xx_, *xy_) = self.avg = (1-weight) * getattr(self, 'avg', new) + weight * new\n return numpy.dot([[-x_,1], [xx_,-x_]], [y_,xy_]).reshape((2,)+y.shape) / (xx_-x_**2 or numpy.nan)\n\ndef obj2str(obj):\n '''compact, lossy string representation of arbitrary object'''\n return '['+','.join(obj2str(item) for item in obj)+']' if isinstance(obj, collections.abc.Iterable) \\\n else str(obj).strip('0').rstrip('.') or '0' if isinstance(obj, numbers.Real) \\\n else str(obj)\n\nclass single_or_multiple:\n \"\"\"\n Method wrapper, converts first positional argument to tuple: tuples/lists\n are passed on as tuples, other objects are turned into tuple singleton.\n Return values should match the length of the argument list, and are unpacked\n if the original argument was not a tuple/list.\n\n >>> class Test:\n ... @single_or_multiple\n ... def square(self, args):\n ... return [v**2 for v in args]\n ...\n >>> T = Test()\n >>> T.square(2)\n 4\n >>> T.square([2,3])\n (4, 9)\n\n Args\n ----\n f: :any:`callable`\n Method that expects a tuple as first positional argument, and that\n returns a list/tuple of the same length.\n\n Returns\n -------\n :\n Wrapped method.\n \"\"\"\n\n def __init__(self, f):\n functools.update_wrapper(self, f)\n\n def __get__(self, instance, owner):\n return single_or_multiple(self.__wrapped__.__get__(instance, owner))\n\n def __call__(self, *args, **kwargs):\n if not args:\n raise TypeError('{} requires at least 1 positional argument'.format(self.__wrapped__.__name__))\n ismultiple = isinstance(args[0], (list,tuple,map))\n retvals = tuple(self.__wrapped__(tuple(args[0]) if ismultiple else args[:1], *args[1:], **kwargs))\n if not ismultiple:\n retvals, = retvals\n return retvals\n\nclass positional_only:\n '''Change all positional-or-keyword arguments to positional-only.\n\n Python introduces syntax to define positional-only parameters in version 3.8,\n but the same effect can be achieved in older versions by using a wrapper with\n a var-positional argument. The :func:`positional_only` decorator uses this\n technique to treat all positional-or-keyword arguments as positional-only. In\n order to avoid name clashes between the positional-only arguments and\n variable keyword arguments, the wrapper additionally introduces the\n convention that the last argument receives the variable keyword argument\n dictionary in case is has a default value of ... (ellipsis).\n\n Example:\n\n >>> @positional_only\n ... def f(x, *, y):\n ... pass\n >>> inspect.signature(f)\n <Signature (x, /, *, y)>\n\n >>> @positional_only\n ... def f(x, *args, y, kwargs=...):\n ... pass\n >>> inspect.signature(f)\n <Signature (x, /, *args, y, **kwargs)>\n\n Args\n ----\n f : :any:`callable`\n Function to be wrapped.\n '''\n\n def __init__(self, f):\n signature = inspect.signature(f)\n parameters = list(signature.parameters.values())\n keywords = []\n varkw = None\n for i, param in enumerate(parameters):\n if param.kind is param.VAR_KEYWORD:\n raise Exception('positional_only decorated function must use ellipses to mark a variable keyword argument')\n if i == len(parameters)-1 and param.default is ...:\n parameters[i] = param.replace(kind=inspect.Parameter.VAR_KEYWORD, default=inspect.Parameter.empty)\n varkw = param.name\n elif param.kind is param.POSITIONAL_OR_KEYWORD:\n parameters[i] = param.replace(kind=param.POSITIONAL_ONLY)\n elif param.kind is param.KEYWORD_ONLY:\n keywords.append(param.name)\n self.__keywords = tuple(keywords)\n self.__varkw = varkw\n self.__signature__ = signature.replace(parameters=parameters)\n functools.update_wrapper(self, f)\n\n def __get__(self, instance, owner):\n return positional_only(self.__wrapped__.__get__(instance, owner))\n\n def __call__(self, *args, **kwargs):\n wrappedkwargs = {name: kwargs.pop(name) for name in self.__keywords if name in kwargs}\n if self.__varkw:\n wrappedkwargs[self.__varkw] = kwargs\n elif kwargs:\n raise TypeError('{}() got an unexpected keyword argument {!r}'.format(self.__wrapped__.__name__, *kwargs))\n return self.__wrapped__(*args, **wrappedkwargs)\n\ndef loadlib(**libname):\n '''\n Find and load a dynamic library using :any:`ctypes.CDLL`. For each\n (supported) platform the name of the library should be specified as a keyword\n argument, including the extension, where the keywords should match the\n possible values of :any:`sys.platform`.\n\n Example\n -------\n\n To load the Intel MKL runtime library, write::\n\n loadlib(linux='libmkl_rt.so', darwin='libmkl_rt.dylib', win32='mkl_rt.dll')\n '''\n\n try:\n return ctypes.CDLL(libname[sys.platform])\n except (OSError, KeyError):\n pass\n\ndef readtext(path):\n '''Read file and return contents\n\n Args\n ----\n path: :class:`os.PathLike`, :class:`str` or :class:`io.TextIOBase`\n Path-like or file-like object pointing to the data to be read.\n\n Returns\n -------\n :\n File data as :class:`str`.\n '''\n\n if isinstance(path, pathlib.Path):\n with path.open() as f:\n return f.read()\n\n if isinstance(path, str):\n with open(path) as f:\n return f.read()\n\n if isinstance(path, io.TextIOBase):\n return path.read()\n\n raise TypeError('readtext requires a path-like or file-like argument')\n\ndef binaryfile(path):\n '''Open file for binary reading\n\n Args\n ----\n path: :class:`os.PathLike`, :class:`str` or :class:`io.BufferedIOBase`\n Path-like or file-like object pointing to the data to be read.\n\n Returns\n -------\n :\n Context that returns a :class:`io.BufferedReader` upon entry.\n '''\n\n if isinstance(path, pathlib.Path):\n return path.open('rb')\n\n if isinstance(path, str):\n return open(path, 'rb')\n\n if isinstance(path, io.BufferedIOBase):\n return contextlib.nullcontext(path) if hasattr(contextlib, 'nullcontext') \\\n else contextlib.contextmanager(iter)([path]) # Python <= 3.6\n\n raise TypeError('binaryfile requires a path-like or file-like argument')\n\nclass settable:\n '''Context-switchable data container.\n\n A mutable container for a general Python object, which can be changed by\n entering the ``sets`` context. The current value can be accessed via the\n ``value`` attribute.\n\n >>> myprop = settable(2)\n >>> myprop.value\n 2\n >>> with myprop.sets(3):\n ... myprop.value\n 3\n >>> myprop.value\n 2\n '''\n\n __slots__ = 'value'\n\n def __init__(self, value=None):\n self.value = value\n\n @contextlib.contextmanager\n def sets(self, value):\n oldvalue = self.value\n self.value = value\n try:\n yield\n finally:\n self.value = oldvalue\n\ndef index(sequence, item):\n '''Index of first occurrence.\n\n Generalization of `tuple.index`.\n '''\n\n if isinstance(sequence, (list, tuple)):\n return sequence.index(item)\n for i, v in enumerate(sequence):\n if v == item:\n return i\n raise ValueError('index(sequence, item): item not in sequence')\n\ndef unique(items, key=None):\n '''Deduplicate items in sequence.\n\n Return a tuple `(unique, indices)` such that `items[i] == unique[indices[i]]`\n and `unique` does not contain duplicate items. An optional `key` is applied\n to all items before testing for equality.\n '''\n\n seen = {}\n unique = []\n indices = []\n for item in items:\n k = item if key is None else key(item)\n try:\n index = seen[k]\n except KeyError:\n index = seen[k] = len(unique)\n unique.append(item)\n indices.append(index)\n return unique, indices\n\ntry:\n cached_property = functools.cached_property\nexcept AttributeError: # python < 3.8\n def cached_property(func): # minimal backport\n @functools.wraps(func)\n def wrapped(self):\n try:\n val = self.__dict__[func.__name__]\n except KeyError:\n self.__dict__[func.__name__] = val = func(self)\n return val\n return property(wrapped)\n\n# vim:sw=2:sts=2:et\n" ]
[ [ "numpy.rollaxis", "numpy.product", "numpy.einsum", "numpy.asarray", "numpy.cumsum", "numpy.ndarray", "numpy.dtype", "numpy.all", "numpy.concatenate", "numpy.add", "numpy.unique", "numpy.arange", "numpy.less", "numpy.sin", "numpy.linalg.det", "numpy.intersect1d", "numpy.choose", "numpy.diff", "numpy.interp", "numpy.repeat", "numpy.zeros", "numpy.multiply", "numpy.power", "numpy.linalg.inv", "numpy.equal", "numpy.transpose", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.linalg.solve", "numpy.cos", "numpy.ones", "numpy.sign", "numpy.ndindex", "numpy.core.multiarray.c_einsum", "numpy.empty" ], [ "numpy.dot", "numpy.unravel_index", "numpy.asarray", "numpy.isnan", "numpy.less", "numpy.lexsort", "numpy.concatenate", "numpy.greater_equal", "numpy.diff", "numpy.isscalar", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhaoruinan/indy_vision_task_sim
[ "71500c69de53808f8a691d600e56213c1768a9c6" ]
[ "src_ros2/ros2_sim_indy_pybullet/ros2_sim_indy_pybullet/indy7_fixed_cam_test.py" ]
[ "import pybullet as p\nimport time\nimport numpy as np\nobjects = ['apple', 'orange', 'banana', 'milk', 'orange']\np.connect(p.GUI)\np.setGravity(0, 0, -9.8)\n#planeId = p.loadURDF(\"plane.urdf\", [0, 0, 0])\nTableId = p.loadURDF(\"table/table.urdf\", [0.45, 0.35, -0.65])\nindyId= p.loadURDF(\"indy7.urdf\", [0, 0, 0])\nnum_obj = len(objects)\nobj_postions = np.random.rand(num_obj,2)\nz_postion = np.empty(num_obj); z_postion.fill(0.2)\nobj_postions = np.c_[ obj_postions, z_postion ] \nprint(obj_postions)\nfor object in objects:\n obj_path = \"models/urdf/\"+object+\".urdf\"\n objId = p.loadURDF(obj_path, obj_postions[-1,])\n obj_postions = np.delete(obj_postions, -1, 0)\n#appleId = p.loadURDF(\"models/urdf/apple.urdf\", [-0.4, 0, 0.1])\n\n\nviewMatrix = p.computeViewMatrix(\n cameraEyePosition=[0, 0, 3],\n cameraTargetPosition=[0, 0, 0],\n cameraUpVector=[0, 1, 0])\nprojectionMatrix = p.computeProjectionMatrixFOV(\n fov=45.0,\n aspect=1.0,\n nearVal=0.1,\n farVal=3.1)\nwidth, height, rgbImg, depthImg, segImg = p.getCameraImage(\n width=224, \n height=224,\n viewMatrix=viewMatrix,\n projectionMatrix=projectionMatrix)\np.resetBasePositionAndOrientation(indyId, [0, 0, 0.03], [0, 0, 0, 1])\np.setRealTimeSimulation(1) \ntime.sleep(1000)\np.disconnect()" ]
[ [ "numpy.delete", "numpy.random.rand", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RamsteinWR/PneumoniaRSNA1
[ "08bdba51292307a78ef711c6be4a63faea240ddf", "08bdba51292307a78ef711c6be4a63faea240ddf" ]
[ "models/RelationNetworks/relation_rcnn/core/rcnn.py", "models/DeformableConvNets/rfcn/operator_py/box_annotator_ohem.py" ]
[ "\"\"\"\nFast R-CNN:\ndata =\n {'data': [num_images, c, h, w],\n 'rois': [num_rois, 5]}\nlabel =\n {'label': [num_rois],\n 'bbox_target': [num_rois, 4 * num_classes],\n 'bbox_weight': [num_rois, 4 * num_classes]}\nroidb extended format [image_index]\n ['image', 'height', 'width', 'flipped',\n 'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']\n\"\"\"\n\nimport numpy as np\nimport numpy.random as npr\nfrom bbox.bbox_regression import expand_bbox_regression_targets\nfrom bbox.bbox_transform import bbox_overlaps, bbox_transform\nfrom utils.image import get_image, tensor_vstack\n\n\ndef get_rcnn_testbatch(roidb, cfg):\n \"\"\"\n return a dict of testbatch\n :param roidb: ['image', 'flipped'] + ['boxes']\n :return: data, label, im_info\n \"\"\"\n # assert len(roidb) == 1, 'Single batch only'\n imgs, roidb = get_image(roidb, cfg)\n im_array = imgs\n im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]\n\n im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]\n\n if cfg.network.ROIDispatch:\n data = []\n for i in range(len(im_rois)):\n w = im_rois[i][:, 2] - im_rois[i][:, 0] + 1\n h = im_rois[i][:, 3] - im_rois[i][:, 1] + 1\n feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)\n\n rois_0 = im_rois[i][np.where(feat_id == 0)]\n if len(rois_0) == 0:\n rois_0 = np.zeros((1, 4))\n rois_1 = im_rois[i][np.where(feat_id == 1)]\n if len(rois_1) == 0:\n rois_1 = np.zeros((1, 4))\n rois_2 = im_rois[i][np.where(feat_id == 2)]\n if len(rois_2) == 0:\n rois_2 = np.zeros((1, 4))\n rois_3 = im_rois[i][np.where(feat_id == 3)]\n if len(rois_3) == 0:\n rois_3 = np.zeros((1, 4))\n # stack batch index\n data.append({'data': im_array[i],\n 'rois_0': np.hstack((0 * np.ones((rois_0.shape[0], 1)), rois_0)),\n 'rois_1': np.hstack((0 * np.ones((rois_1.shape[0], 1)), rois_1)),\n 'rois_2': np.hstack((0 * np.ones((rois_2.shape[0], 1)), rois_2)),\n 'rois_3': np.hstack((0 * np.ones((rois_3.shape[0], 1)), rois_3))})\n if cfg.TEST.LEARN_NMS:\n data[-1]['im_info'] = im_info[i]\n else:\n rois = im_rois\n rois_array = [np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i])) for i in range(len(rois))]\n\n data = []\n for i in range(len(roidb)):\n data.append({'data': im_array[i],\n 'rois': rois_array[i]})\n if cfg.TEST.LEARN_NMS:\n data[-1]['im_info'] = im_info[i]\n\n label = {}\n\n return data, label, im_info\n\n\ndef get_rcnn_batch(roidb, cfg):\n \"\"\"\n return a dict of multiple images\n :param roidb: a list of dict, whose length controls batch size\n ['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']\n :return: data, label\n \"\"\"\n num_images = len(roidb)\n imgs, roidb = get_image(roidb, cfg)\n im_array = tensor_vstack(imgs)\n\n assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, \\\n 'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)\n\n if cfg.TRAIN.BATCH_ROIS == -1:\n rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])\n fg_rois_per_image = rois_per_image\n else:\n rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES\n fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)\n\n if cfg.network.ROIDispatch:\n rois_array_0 = list()\n rois_array_1 = list()\n rois_array_2 = list()\n rois_array_3 = list()\n else:\n rois_array = list()\n\n gt_labels_array = list()\n labels_array = list()\n bbox_targets_array = list()\n bbox_weights_array = list()\n\n for im_i in range(num_images):\n roi_rec = roidb[im_i]\n\n # infer num_classes from gt_overlaps\n num_classes = roi_rec['gt_overlaps'].shape[1]\n\n # label = class RoI has max overlap with\n rois = roi_rec['boxes']\n labels = roi_rec['max_classes']\n overlaps = roi_rec['max_overlaps']\n bbox_targets = roi_rec['bbox_targets']\n gt_lables = roi_rec['is_gt']\n\n if cfg.TRAIN.BATCH_ROIS == -1:\n im_rois, labels_t, bbox_targets, bbox_weights = \\\n sample_rois_v2(rois, num_classes, cfg, labels=labels, overlaps=overlaps, bbox_targets=bbox_targets,\n gt_boxes=None)\n\n assert np.abs(im_rois - rois).max() < 1e-3\n assert np.abs(labels_t - labels).max() < 1e-3\n else:\n im_rois, labels, bbox_targets, bbox_weights, gt_lables = \\\n sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,\n labels, overlaps, bbox_targets, gt_lables=gt_lables)\n\n # project im_rois\n # do not round roi\n if cfg.network.ROIDispatch:\n w = im_rois[:, 2] - im_rois[:, 0] + 1\n h = im_rois[:, 3] - im_rois[:, 1] + 1\n feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)\n\n rois_0_idx = np.where(feat_id == 0)[0]\n rois_0 = im_rois[rois_0_idx]\n if len(rois_0) == 0:\n rois_0 = np.zeros((1, 4))\n label_0 = -np.ones((1,))\n gt_label_0 = -np.ones((1,))\n bbox_targets_0 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_0 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_0 = labels[rois_0_idx]\n gt_label_0 = gt_lables[rois_0_idx]\n bbox_targets_0 = bbox_targets[rois_0_idx]\n bbox_weights_0 = bbox_weights[rois_0_idx]\n\n rois_1_idx = np.where(feat_id == 1)[0]\n rois_1 = im_rois[rois_1_idx]\n if len(rois_1) == 0:\n rois_1 = np.zeros((1, 4))\n label_1 = -np.ones((1,))\n gt_label_1 = -np.ones((1,))\n bbox_targets_1 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_1 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_1 = labels[rois_1_idx]\n gt_label_1 = gt_lables[rois_1_idx]\n bbox_targets_1 = bbox_targets[rois_1_idx]\n bbox_weights_1 = bbox_weights[rois_1_idx]\n\n rois_2_idx = np.where(feat_id == 2)\n rois_2 = im_rois[rois_2_idx]\n if len(rois_2) == 0:\n rois_2 = np.zeros((1, 4))\n label_2 = -np.ones((1,))\n gt_label_2 = -np.ones((1,))\n bbox_targets_2 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_2 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_2 = labels[rois_2_idx]\n gt_label_2 = gt_lables[rois_2_idx]\n bbox_targets_2 = bbox_targets[rois_2_idx]\n bbox_weights_2 = bbox_weights[rois_2_idx]\n\n rois_3_idx = np.where(feat_id == 3)\n rois_3 = im_rois[rois_3_idx]\n if len(rois_3) == 0:\n rois_3 = np.zeros((1, 4))\n label_3 = -np.ones((1,))\n gt_label_3 = -np.ones((1,))\n bbox_targets_3 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_3 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_3 = labels[rois_3_idx]\n gt_label_3 = gt_lables[rois_3_idx]\n bbox_targets_3 = bbox_targets[rois_3_idx]\n bbox_weights_3 = bbox_weights[rois_3_idx]\n\n # stack batch index\n rois_array_0.append(np.hstack((im_i * np.ones((rois_0.shape[0], 1)), rois_0)))\n rois_array_1.append(np.hstack((im_i * np.ones((rois_1.shape[0], 1)), rois_1)))\n rois_array_2.append(np.hstack((im_i * np.ones((rois_2.shape[0], 1)), rois_2)))\n rois_array_3.append(np.hstack((im_i * np.ones((rois_3.shape[0], 1)), rois_3)))\n\n labels = np.concatenate([label_0, label_1, label_2, label_3], axis=0)\n gt_lables = np.concatenate([gt_label_0, gt_label_1, gt_label_2, gt_label_3], axis=0)\n bbox_targets = np.concatenate([bbox_targets_0, bbox_targets_1, bbox_targets_2, bbox_targets_3], axis=0)\n bbox_weights = np.concatenate([bbox_weights_0, bbox_weights_1, bbox_weights_2, bbox_weights_3], axis=0)\n else:\n rois = im_rois\n batch_index = im_i * np.ones((rois.shape[0], 1))\n rois_array_this_image = np.hstack((batch_index, rois))\n rois_array.append(rois_array_this_image)\n\n # add labels\n gt_labels_array.append(gt_lables)\n labels_array.append(labels)\n bbox_targets_array.append(bbox_targets)\n bbox_weights_array.append(bbox_weights)\n\n gt_labels_array = np.array(gt_labels_array)\n nongt_index_array = np.where(gt_labels_array == 0)[1]\n labels_array = np.array(labels_array)\n bbox_targets_array = np.array(bbox_targets_array)\n bbox_weights_array = np.array(bbox_weights_array)\n\n if cfg.network.USE_NONGT_INDEX:\n\n label = {'label': labels_array,\n 'nongt_index': nongt_index_array,\n 'bbox_target': bbox_targets_array,\n 'bbox_weight': bbox_weights_array}\n\n else:\n label = {'label': labels_array,\n 'bbox_target': bbox_targets_array,\n 'bbox_weight': bbox_weights_array}\n\n if cfg.network.ROIDispatch:\n rois_array_0 = np.array(rois_array_0)\n rois_array_1 = np.array(rois_array_1)\n rois_array_2 = np.array(rois_array_2)\n rois_array_3 = np.array(rois_array_3)\n # rois_concate = np.concatenate((rois_array_0, rois_array_1, rois_array_2, rois_array_3), axis=1)\n # gt_rois_t = rois_concate[:, gt_labels_array[0,:] > 0]\n data = {'data': im_array,\n 'rois_0': rois_array_0,\n 'rois_1': rois_array_1,\n 'rois_2': rois_array_2,\n 'rois_3': rois_array_3}\n else:\n rois_array = np.array(rois_array)\n data = {'data': im_array,\n 'rois': rois_array}\n\n if cfg.TRAIN.LEARN_NMS:\n # im info\n im_info = np.array([roidb[0]['im_info']], dtype=np.float32)\n # gt_boxes\n if roidb[0]['gt_classes'].size > 0:\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)\n gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]\n gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]\n else:\n gt_boxes = np.empty((0, 5), dtype=np.float32)\n data['im_info'] = im_info\n data['gt_boxes'] = gt_boxes\n\n return data, label\n\n\ndef sample_rois_v2(rois, num_classes, cfg,\n labels=None, overlaps=None, bbox_targets=None, gt_boxes=None):\n \"\"\"\n generate random sample of ROIs comprising foreground and background examples\n :param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index\n :param fg_rois_per_image: foreground roi number\n :param rois_per_image: total roi number\n :param num_classes: number of classes\n :param labels: maybe precomputed\n :param overlaps: maybe precomputed (max_overlaps)\n :param bbox_targets: maybe precomputed\n :param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)\n :return: (labels, rois, bbox_targets, bbox_weights)\n \"\"\"\n if labels is None:\n overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))\n gt_assignment = overlaps.argmax(axis=1)\n overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # set labels of bg_rois to be 0\n bg_ind = np.where(overlaps < cfg.TRAIN.BG_THRESH_HI)[0]\n labels[bg_ind] = 0\n\n # load or compute bbox_target\n if bbox_targets is not None:\n bbox_target_data = bbox_targets\n else:\n targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment, :4])\n if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:\n targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))\n / np.array(cfg.TRAIN.BBOX_STDS))\n bbox_target_data = np.hstack((labels[:, np.newaxis], targets))\n\n bbox_targets, bbox_weights = \\\n expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)\n\n return rois, labels, bbox_targets, bbox_weights\n\n\ndef sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,\n labels=None, overlaps=None, bbox_targets=None, gt_boxes=None, gt_lables=None):\n \"\"\"\n generate random sample of ROIs comprising foreground and background examples\n :param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index\n :param fg_rois_per_image: foreground roi number\n :param rois_per_image: total roi number\n :param num_classes: number of classes\n :param labels: maybe precomputed\n :param overlaps: maybe precomputed (max_overlaps)\n :param bbox_targets: maybe precomputed\n :param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)\n :return: (labels, rois, bbox_targets, bbox_weights)\n \"\"\"\n if labels is None:\n overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))\n gt_assignment = overlaps.argmax(axis=1)\n overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # foreground RoI with FG_THRESH overlap\n fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)\n # Sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_per_this_image:\n fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)\n\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n # Compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\n bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)\n # Sample foreground regions without replacement\n if len(bg_indexes) > bg_rois_per_this_image:\n bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n\n # pad more to ensure a fixed minibatch size\n while keep_indexes.shape[0] < rois_per_image:\n gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])\n gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, gap_indexes)\n\n # select gt_labels\n gt_lables = gt_lables[keep_indexes]\n # select labels\n labels = labels[keep_indexes]\n # set labels of bg_rois to be 0\n bg_ind = np.where(overlaps[keep_indexes] < cfg.TRAIN.BG_THRESH_HI)[0]\n labels[bg_ind] = 0\n rois = rois[keep_indexes]\n\n # load or compute bbox_target\n if bbox_targets is not None:\n bbox_target_data = bbox_targets[keep_indexes, :]\n else:\n targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])\n if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:\n targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))\n / np.array(cfg.TRAIN.BBOX_STDS))\n bbox_target_data = np.hstack((labels[:, np.newaxis], targets))\n\n bbox_targets, bbox_weights = \\\n expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)\n\n return rois, labels, bbox_targets, bbox_weights, gt_lables\n", "\"\"\"\nProposal Target Operator selects foreground and background roi and assigns label, bbox_transform to them.\n\"\"\"\n\nimport mxnet as mx\nimport numpy as np\n\n\nclass BoxAnnotatorOHEMOperator(mx.operator.CustomOp):\n def __init__(self, num_classes, num_reg_classes, roi_per_img):\n super(BoxAnnotatorOHEMOperator, self).__init__()\n self._num_classes = num_classes\n self._num_reg_classes = num_reg_classes\n self._roi_per_img = roi_per_img\n\n def forward(self, is_train, req, in_data, out_data, aux):\n\n cls_score = in_data[0]\n bbox_pred = in_data[1]\n labels = in_data[2].asnumpy()\n bbox_targets = in_data[3]\n bbox_weights = in_data[4]\n\n per_roi_loss_cls = mx.nd.SoftmaxActivation(cls_score) + 1e-14\n per_roi_loss_cls = per_roi_loss_cls.asnumpy()\n per_roi_loss_cls = per_roi_loss_cls[np.arange(per_roi_loss_cls.shape[0], dtype='int'), labels.astype('int')]\n per_roi_loss_cls = -1 * np.log(per_roi_loss_cls)\n per_roi_loss_cls = np.reshape(per_roi_loss_cls, newshape=(-1,))\n\n per_roi_loss_bbox = bbox_weights * mx.nd.smooth_l1((bbox_pred - bbox_targets), scalar=1.0)\n per_roi_loss_bbox = mx.nd.sum(per_roi_loss_bbox, axis=1).asnumpy()\n\n top_k_per_roi_loss = np.argsort(per_roi_loss_cls + per_roi_loss_bbox)\n labels_ohem = labels\n labels_ohem[top_k_per_roi_loss[::-1][self._roi_per_img:]] = -1\n bbox_weights_ohem = bbox_weights.asnumpy()\n bbox_weights_ohem[top_k_per_roi_loss[::-1][self._roi_per_img:]] = 0\n\n labels_ohem = mx.nd.array(labels_ohem)\n bbox_weights_ohem = mx.nd.array(bbox_weights_ohem)\n\n for ind, val in enumerate([labels_ohem, bbox_weights_ohem]):\n self.assign(out_data[ind], req[ind], val)\n\n def backward(self, req, out_grad, in_data, out_data, in_grad, aux):\n for i in range(len(in_grad)):\n self.assign(in_grad[i], req[i], 0)\n\n\[email protected]('BoxAnnotatorOHEM')\nclass BoxAnnotatorOHEMProp(mx.operator.CustomOpProp):\n def __init__(self, num_classes, num_reg_classes, roi_per_img):\n super(BoxAnnotatorOHEMProp, self).__init__(need_top_grad=False)\n self._num_classes = int(num_classes)\n self._num_reg_classes = int(num_reg_classes)\n self._roi_per_img = int(roi_per_img)\n\n def list_arguments(self):\n return ['cls_score', 'bbox_pred', 'labels', 'bbox_targets', 'bbox_weights']\n\n def list_outputs(self):\n return ['labels_ohem', 'bbox_weights_ohem']\n\n def infer_shape(self, in_shape):\n labels_shape = in_shape[2]\n bbox_weights_shape = in_shape[4]\n\n return in_shape, \\\n [labels_shape, bbox_weights_shape]\n\n def create_operator(self, ctx, shapes, dtypes):\n return BoxAnnotatorOHEMOperator(self._num_classes, self._num_reg_classes, self._roi_per_img)\n\n def declare_backward_dependency(self, out_grad, in_data, out_data):\n return []\n" ]
[ [ "numpy.hstack", "numpy.minimum", "numpy.abs", "numpy.sqrt", "numpy.random.choice", "numpy.ones", "numpy.concatenate", "numpy.round", "numpy.append", "numpy.zeros", "numpy.array", "numpy.where", "numpy.sum", "numpy.empty" ], [ "numpy.reshape", "numpy.argsort", "numpy.log", "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ezg/PanoramicDataWin8
[ "229e9ab64cda30a0bd1c6d39a70754ba4651ad43" ]
[ "backend/binrange.py" ]
[ "#!/usr/bin/python\nimport json\nimport numpy as np\nimport pandas as pd\nimport math\n\nclass BinRange():\n def __init__(self, dataMinValue, dataMaxValue, targetBinNumber):\n self.dataMinValue = float(dataMinValue)\n self.dataMaxValue = float(dataMaxValue)\n self.targetBinNumber = float(targetBinNumber)\n self.maxValue = 0\n self.minValue = 0\n \n def getIndex(self, value):\n raise NotImplementedError()\n \n def addStep(self, value):\n raise NotImplementedError()\n \n def getLabel(self, value):\n return str(value)\n \n def getBins(self):\n raise NotImplementedError()\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, dimension):\n raise NotImplementedError()\n\n def getLabels(self):\n labels = []\n for b in self.getBins():\n labels.append((bin, bin, self.addStep(bin), self.getLabel(bin)))\n return labels\n\nclass AggregatedBinRange(BinRange): \n \n def __init__(self):\n BinRange.__init__(self, 0, 0, 0)\n self.type = 'AggregatedBinRange'\n \n @staticmethod\n def initialize():\n scale = AggregatedBinRange()\n return scale\n \n def getIndex(self, value):\n return 0 \n \n def addStep(self, value):\n return value + 1\n \n def getBins(self):\n scale = [0]\n return scale\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, dimension):\n return AggregatedBinRange()\n\nclass NominalBinRange(BinRange): \n def __init__(self):\n BinRange.__init__(self, 0, 0, 0)\n self.labelsValue = {} #string, index\n self.valuesLabel = {} #index, string\n self.type = 'NominalBinRange'\n \n @staticmethod\n def initialize(df, val):\n uniqueValues = df[val].unique()\n \n scale = NominalBinRange()\n for u in uniqueValues:\n if not u in scale.labelsValue:\n index = len(scale.labelsValue.keys())\n scale.labelsValue[u] = index\n scale.valuesLabel[index] = u\n return scale\n \n def getIndexFromValue(self, value):\n return self.labelsValue[value] \n \n def getIndex(self, value):\n return value\n \n def addStep(self, value):\n return value\n \n def getLabel(self, value):\n return self.valuesLabel[value] \n \n def getBins(self):\n scale = []\n for idx, label in enumerate(self.labelsValue):\n scale.append(idx)\n return scale\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, val):\n newRange = NominalBinRange()\n newRange.labelsValue = self.labelsValue\n newRange.valuesLabel = self.valuesLabel\n \n uniqueValues = df[val].unique()\n \n for u in uniqueValues:\n if not u in newRange.labelsValue:\n index = len(newRange.labelsValue.keys())\n newRange.labelsValue[u] = index\n newRange.valuesLabel[index] = u\n return newRange\n \nclass QuantitativeBinRange(BinRange): \n def __init__(self, dataMinValue, dataMaxValue, targetBinNumber, isIntegerRange):\n BinRange.__init__(self, dataMinValue, dataMaxValue, targetBinNumber)\n self.isIntegerRange = isIntegerRange\n self.step = 0\n self.type = 'QuantitativeBinRange'\n \n @staticmethod\n def initialize(dataMinValue, dataMaxValue, targetBinNumber, isIntegerRange):\n scale = QuantitativeBinRange(dataMinValue, dataMaxValue, targetBinNumber, isIntegerRange)\n extent = scale.__getExtent(scale.dataMinValue, scale.dataMaxValue, scale.targetBinNumber)\n scale.minValue = extent[0]\n scale.maxValue = extent[1]\n scale.step = extent[2]\n return scale\n \n def getIndex(self, value):\n return int(math.floor(round((value - self.minValue) / self.step, 8))) \n \n def addStep(self, value):\n return value + self.step\n \n def getBins(self):\n scale = []\n idx = 0\n for v in np.arange(self.minValue, self.maxValue, self.step):\n scale.append(v)\n idx += 1\n return scale\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, dimension):\n newMin = self.minValue\n newMax = self.maxValue\n\n if dataMin < self.minValue:\n while dataMin < newMin:\n newMin -= self.step\n \n if dataMax >= self.maxValue:\n while dataMax >= newMax:\n newMax += self.step\n\n multiplier = int(len(self.getBins()) / self.targetBinNumber);\n newStep = self.step\n if multiplier > 1:\n pass\n #newStep = Step * (double)multiplier\n\n newRange = QuantitativeBinRange(dataMin, dataMax, self.targetBinNumber, self.isIntegerRange)\n \n newRange.minValue = newMin\n newRange.maxValue = newMax\n newRange.dataMinValue = min(dataMin, self.dataMinValue)\n newRange.dataMaxValue = min(dataMax, self.dataMaxValue)\n newRange.step = self.step\n return newRange\n \n def __getExtent(self, dataMin, dataMax, m):\n if (dataMin == dataMax):\n dataMax += 0.1\n \n span = dataMax - dataMin\n\n step = math.pow(10, math.floor(math.log10(span / m)))\n err = m / span * step\n\n if (err <= .15):\n step *= 10\n elif (err <= .35):\n step *= 5\n elif (err <= .75):\n step *= 2\n\n if (self.isIntegerRange):\n step = math.ceil(step)\n \n ret = [0,0,0]\n ret[0] = (math.floor(round(dataMin, 8) / step) * step)\n ret[1] = (math.floor(round(dataMax, 8) / step) * step + step)\n ret[2] = step\n\n return ret\n \n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
banjtheman/odsc_nlp_workshop
[ "6562938fff0e9e50d4db8feed5552eaaa7a7f1f6" ]
[ "module_5/helpful_flow.py" ]
[ "# Python imports\nimport logging\nimport os\n\n# Project imports\nimport utils as helpful_funcs\n\n# 3rd party imports\nfrom metaflow import FlowSpec, Parameter, step, card\nimport numpy as np\n\n# How to run\n# python helpful_flow.py run --output_dir test_run\n\n\nclass HelpfulFlow(FlowSpec):\n \"\"\"\n This flow will run the Helpful pipeline\n \"\"\"\n\n output_dir = Parameter(\n \"output_dir\",\n default=\"test_run\",\n help=\"Location of output files\",\n required=True,\n )\n\n # The helpful training data\n train_data = \"https://helpful-sentences-from-reviews.s3.amazonaws.com/train.json\"\n test_data = \"https://helpful-sentences-from-reviews.s3.amazonaws.com/test.json\"\n\n @card\n @step\n def start(self):\n \"\"\"\n This is the 'start' step. All flows must have a step named 'start' that\n is the first step in the flow. We will download the data\n \"\"\"\n\n # Make output dir\n cmd = f\"mkdir -p {self.output_dir}\"\n os.system(cmd)\n\n # Get raw data\n self.raw_data_train = helpful_funcs.get_data(self.train_data)\n self.raw_data_test = helpful_funcs.get_data(self.test_data)\n self.next(self.prepare_data)\n\n @card\n @step\n def prepare_data(self):\n\n \"\"\"\n prepare data\n \"\"\"\n # Transfrom raw data to a dataframe\n self.df_train = helpful_funcs.data_to_df(self.raw_data_train)\n self.df_test = helpful_funcs.data_to_df(self.raw_data_test)\n\n # save df to output folder\n self.df_train.to_csv(\n f\"{self.output_dir}/helpful_sentences_train.csv\", index=False\n )\n self.df_test.to_csv(\n f\"{self.output_dir}/helpful_sentences_test.csv\", index=False\n )\n\n # We can call N functions to run in parallel\n self.next(self.vader_run, self.fasttext_start, self.huggingface_split)\n\n @card\n @step\n def vader_run(self):\n\n \"\"\"\n Run vader on data\n \"\"\"\n # Transfrom raw data to a dataframe\n self.results = helpful_funcs.test_vader(self.df_test)\n self.run_name = \"vader\"\n\n self.next(self.join)\n\n @card\n @step\n def fasttext_start(self):\n\n \"\"\"\n Convert data to fasttext format\n \"\"\"\n\n helpful_funcs.convert_csv_to_fast_text_doc(self.df_train, self.output_dir)\n self.next(self.fasttext_train)\n\n @card\n @step\n def fasttext_train(self):\n\n \"\"\"\n Train fasttext model\n \"\"\"\n\n # Note the fasttext_model cant be saved by metaflow, so we just eval here\n fasttext_model = helpful_funcs.train_fasttext_model(self.output_dir)\n self.results = helpful_funcs.test_fasttext(self.df_test, fasttext_model)\n self.run_name = \"fasttext\"\n\n self.next(self.join)\n\n @card\n @step\n def huggingface_split(self):\n\n \"\"\"\n Split data into 5\n \"\"\"\n # TODO we can prob split based on max workers\n self.helpful_list = np.array_split(self.df_test, 5)\n\n self.next(self.huggingface_predict, foreach=\"helpful_list\")\n\n @card\n @step\n def huggingface_predict(self):\n\n \"\"\"\n Predict with huggingface model\n \"\"\"\n\n self.run_name = \"huggingface_\"\n self.results = helpful_funcs.run_hugging_face(self.input)\n\n self.next(self.huggingface_join)\n\n @card\n @step\n def huggingface_join(self, inputs):\n \"\"\"\n Combine huggingface scores\n \"\"\"\n\n self.results = [input.results for input in inputs]\n self.run_names = [input.run_name for input in inputs]\n\n print(\"Huggingface Results\")\n print(self.results)\n print(self.run_names)\n\n sent_scores = {}\n sent_scores[\"pos_match\"] = 0\n sent_scores[\"neg_match\"] = 0\n sent_scores[\"miss\"] = 0\n sent_scores[\"model\"] = \"huggingface\"\n\n for index, result in enumerate(self.results):\n\n sent_scores[\"pos_match\"] += result[\"pos_match\"]\n sent_scores[\"neg_match\"] = result[\"neg_match\"]\n sent_scores[\"miss\"] = result[\"miss\"]\n\n num_sents = (\n sent_scores[\"pos_match\"] + sent_scores[\"neg_match\"] + sent_scores[\"miss\"]\n )\n missed_percent = sent_scores[\"miss\"] / num_sents\n correct_percent = 1 - missed_percent\n sent_scores[\"missed_percent\"] = missed_percent\n sent_scores[\"correct_percent\"] = correct_percent\n\n self.run_name = \"huggingface\"\n self.results = sent_scores\n\n self.next(self.join)\n\n @card\n @step\n def join(self, inputs):\n \"\"\"\n Save data artifacts from the runs\n \"\"\"\n\n self.results = [input.results for input in inputs]\n self.run_names = [input.run_name for input in inputs]\n\n print(\"Final Results\")\n print(self.results)\n print(self.run_names)\n\n for index, result in enumerate(self.results):\n\n curr_name = self.run_names[index]\n\n # save outputs\n helpful_funcs.save_json(\n f\"{self.output_dir}/{curr_name}_results.json\", result\n )\n\n self.next(self.end)\n\n @card\n @step\n def end(self):\n \"\"\"\n This is the 'end' step. All flows must have an 'end' step, which is the\n last step in the flow. It will print a \"Done and Done\"\n \"\"\"\n\n logging.info(\"Done and Done\")\n\n\nif __name__ == \"__main__\":\n loglevel = logging.INFO\n logging.basicConfig(\n format=\"%(asctime)s |%(levelname)s: %(message)s\", level=loglevel\n )\n HelpfulFlow()\n" ]
[ [ "numpy.array_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kingjuno/devolearn
[ "555c8c55441a4f0b9ed8801c37d07c45b03ec774" ]
[ "devolearn/cell_membrane_segmentor/cell_membrane_segmentor.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision.transforms import ToTensor\nfrom torchvision.transforms import ToPILImage\n\nimport os\nimport cv2\nimport wget\nimport imutils\nfrom tqdm import tqdm, tqdm_notebook\nfrom PIL import Image\nimport numpy as np\nfrom collections import deque\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport segmentation_models_pytorch as smp\nimport warnings\nwarnings.filterwarnings(\"ignore\") \n\nfrom ..base_inference_engine import InferenceEngine\n\n\"\"\"\n3d segmentation model for C elegans embryo\n\"\"\"\n\ndef generate_centroid_image(thresh):\n \"\"\"Used when centroid_mode is set to True\n \n Args:\n thresh (np.array): 2d numpy array that is returned from the segmentation model\n\n Returns:\n np.array : image containing the contours and their respective centroids \n list : list of all centroids for the given image as [(x1,y1), (x2,y2)...]\n \"\"\"\n\n thresh = cv2.blur(thresh, (5,5))\n thresh = thresh.astype(np.uint8)\n centroid_image = np.zeros(thresh.shape)\n cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n centroids = []\n for c in cnts:\n try:\n # compute the center of the contour\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n # draw the contour and center of the shape on the image\n cv2.drawContours(centroid_image, [c], -1, (255, 255, 255), 2)\n cv2.circle(centroid_image, (cX, cY), 2, (255, 255, 255), -1)\n centroids.append((cX, cY))\n except:\n pass\n\n return centroid_image, centroids\n\nclass cell_membrane_segmentor(InferenceEngine):\n def __init__(self, device = \"cpu\"):\n \"\"\"Segments the c. elegans embryo from images/videos, \n depends on segmentation-models-pytorch for the model backbone\n\n Args:\n device (str, optional): set to \"cuda\", runs operations on gpu and set to \"cpu\", runs operations on cpu. Defaults to \"cpu\".\n \"\"\"\n \n self.device = device\n self.ENCODER = 'resnet18'\n self.ENCODER_WEIGHTS = 'imagenet'\n self.CLASSES = [\"nucleus\"]\n self.ACTIVATION = 'sigmoid'\n self.in_channels = 1\n self.model_url = \"https://github.com/DevoLearn/devolearn/raw/master/devolearn/cell_membrane_segmentor/cell_membrane_segmentation_model.pth\"\n self.model_name = \"cell_membrane_segmentation_model.pth\"\n self.model_dir = os.path.dirname(__file__)\n # print(\"at : \", os.path.dirname(__file__))\n\n self.model = smp.FPN(\n encoder_name= self.ENCODER, \n encoder_weights= self.ENCODER_WEIGHTS, \n classes=len(self.CLASSES), \n activation= self.ACTIVATION,\n in_channels = self.in_channels \n )\n\n\n self.download_checkpoint()\n self.model.to(self.device)\n self.model.eval()\n\n self.mini_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((256,256), interpolation = Image.NEAREST),\n transforms.ToTensor(),\n ])\n\n\n def download_checkpoint(self):\n try:\n # print(\"model already downloaded, loading model...\")\n self.model = torch.load(self.model_dir + \"/\" + self.model_name, map_location= self.device) \n except:\n print(\"model not found, downloading from:\", self.model_url)\n if os.path.isdir(self.model_dir) == False:\n os.mkdir(self.model_dir)\n filename = wget.download(self.model_url, out= self.model_dir)\n # print(filename)\n self.model = torch.load(self.model_dir + \"/\" + self.model_name, map_location= self.device) \n\n def preprocess(self, image_grayscale_numpy):\n\n tensor = self.mini_transform(image_grayscale_numpy).unsqueeze(0).to(self.device)\n return tensor\n\n def predict(self, image_path, pred_size = (350,250), centroid_mode = False):\n \"\"\"Loads an image from image_path and converts it to grayscale, \n then passes it through the model and returns centroids of the segmented features.\n reference{\n https://github.com/DevoLearn/devolearn#segmenting-the-c-elegans-embryo\n }\n\n Args:\n image_path (str): path to image\n pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).\n centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.\n\n Returns:\n centroid_mode set to False:\n np.array : 1 channel image.\n centroid_mode set to True:\n np.array : 1 channel image,\n list : list of centroids.\n \"\"\"\n\n im = cv2.imread(image_path,0)\n\n tensor = self.preprocess(im)\n res = self.model(tensor).detach().cpu().numpy()[0][0]\n \n res = cv2.resize(res,pred_size)\n if centroid_mode == False:\n return res\n else:\n centroid_image, centroids = generate_centroid_image(res)\n return centroid_image, centroids\n \n\n def predict_from_video(self, video_path, pred_size = (350,250), save_folder = \"preds\", centroid_mode = False, notebook_mode = False):\n \"\"\"Splits a video from video_path into frames and passes the \n frames through the model for predictions. Saves predicted images in save_folder.\n And optionally saves all the centroid predictions into a pandas.DataFrame. \n\n Args:\n video_path (str): path to the video file.\n pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).\n save_folder (str, optional): path to folder to be saved in. Defaults to \"preds\".\n centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.\n notebook_mode (bool, optional): toogle between script(False) and notebook(True), for better user interface. Defaults to False.\n\n Returns:\n centroid_mode set to True:\n pd.DataFrame : containing file name and their centriods\n centroid_mode set to False:\n list : list containing the names of the entries in the save_folder directory \n \"\"\"\n \n vidObj = cv2.VideoCapture(video_path) \n success = 1\n images = deque()\n count = 0\n\n if centroid_mode == True:\n filenames_centroids = []\n\n while success: \n success, image = vidObj.read() \n \n try:\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n images.append(image)\n \n except:\n # print(\"skipped possible corrupt frame number : \", count)\n pass\n count += 1 \n \n if os.path.isdir(save_folder) == False:\n os.mkdir(save_folder)\n\n if notebook_mode == True:\n for i in tqdm_notebook(range(len(images)), desc = \"saving predictions: \"): \n save_name = save_folder + \"/\" + str(i) + \".jpg\"\n tensor = self.mini_transform(images[i]).unsqueeze(0).to(self.device)\n res = self.model(tensor).detach().cpu().numpy()[0][0]\n\n if centroid_mode == True:\n res, centroids = generate_centroid_image(res)\n filenames_centroids.append([save_name, centroids])\n\n res = cv2.resize(res,pred_size)\n cv2.imwrite(save_name, res*255)\n else :\n for i in tqdm(range(len(images)), desc = \"saving predictions: \"):\n save_name = save_folder + \"/\" + str(i) + \".jpg\"\n tensor = self.mini_transform(images[i]).unsqueeze(0).to(self.device)\n res = self.model(tensor).detach().cpu().numpy()[0][0]\n\n if centroid_mode == True:\n res, centroids = generate_centroid_image(res)\n filenames_centroids.append([save_name, centroids])\n\n res = cv2.resize(res,pred_size)\n cv2.imwrite(save_name, res*255)\n\n if centroid_mode == True:\n df = pd.DataFrame(filenames_centroids, columns = [\"filenames\", \"centroids\"])\n return df\n else:\n return os.listdir(save_folder)\n" ]
[ [ "numpy.zeros", "pandas.DataFrame", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
avi2412/nlp-dl-prework
[ "902d77344c351954e370a4aacf5a427db68cfad9" ]
[ "Lego-Collector's-Dilemma/code.py" ]
[ "# --------------\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\n# code starts here\n\ndata = pd.read_csv(path)\ndf = pd.DataFrame(data)\n#print(df.iloc[0:5])\n\nX = df.drop(['list_price'], axis = 1)\ny = df.iloc[:, 1]\n\nX_train, X_test, y_train, y_test = train_test_split (X, y, test_size = 0.3, random_state = 6)\n\n# code ends here\n\n\n\n# --------------\nimport matplotlib.pyplot as plt\n\n# code starts here \ncols = X_train.columns\nfig, axes = plt.subplots(3,3)\n\nfor i in range(0,3):\n for j in range(0,3):\n col = cols[i*3 + j]\n axes[i,j].scatter(X_train[col],y_train)\n axes[i,j].set_title(col)\n\nplt.show()\n# code ends here\n\n\n\n# --------------\n# Code starts here\ncorr =X_train.corr()\n\nX_train.drop(columns = ['play_star_rating','val_star_rating'], inplace = True)\nX_test.drop(columns = ['play_star_rating','val_star_rating'], inplace =True)\n\n# Code ends here\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# Code starts here\n\nregressor = LinearRegression()\nregressor.fit(X_train,y_train)\ny_pred = regressor.predict(X_test)\n\nmse = mean_squared_error(y_test,y_pred)\nr2 = r2_score(y_test,y_pred)\nprint(r2)\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\nresidual = y_test-y_pred\nresidual.hist()\n\n\n\n# Code ends here\n\n\n" ]
[ [ "sklearn.cross_validation.train_test_split", "pandas.read_csv", "sklearn.metrics.r2_score", "matplotlib.pyplot.subplots", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
ibrahim-sheriff/Deploying-a-ML-Model-on-Heroku-with-FastAPI
[ "483c563d0e3838580f5cd643c70db6a47e1c1219" ]
[ "src/tests/conftest.py" ]
[ "\"\"\"\r\nAuthor: Ibrahim Sherif\r\nDate: October, 2021\r\nThis script holds the conftest data used with pytest module\r\n\"\"\"\r\nimport os\r\nimport pytest\r\nimport pandas as pd\r\nimport great_expectations as ge\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport config\r\nfrom pipeline.data import get_clean_data\r\n\r\n\r\[email protected](scope='session')\r\ndef data():\r\n \"\"\"\r\n Data loaded from csv file used for tests\r\n\r\n Returns:\r\n df (ge.DataFrame): Data loaded from csv file\r\n \"\"\"\r\n if not os.path.exists(config.DATA_DIR):\r\n pytest.fail(f\"Data not found at path: {config.DATA_DIR}\")\r\n\r\n X_df, y_df = get_clean_data(config.DATA_DIR)\r\n X_df['salary'] = y_df\r\n X_df['salary'] = X_df['salary'].map({1: '>50k', 0: '<=50k'})\r\n\r\n df = ge.from_pandas(X_df)\r\n\r\n return df\r\n\r\n\r\[email protected](scope='session')\r\ndef sample_data():\r\n \"\"\"\r\n Sampled data from csv file used for tests\r\n\r\n Returns:\r\n X_train: Features train data\r\n X_test: Features test data\r\n y_train: Labels train data\r\n y_test: Labels test data\r\n \"\"\"\r\n if not os.path.exists(config.DATA_DIR):\r\n pytest.fail(f\"Data not found at path: {config.DATA_DIR}\")\r\n\r\n data_df = pd.read_csv(config.DATA_DIR, nrows=10)\r\n\r\n # chaning column names to use _ instead of -\r\n columns = data_df.columns\r\n columns = [col.replace('-', '_') for col in columns]\r\n data_df.columns = columns\r\n\r\n # make all characters to be lowercase in string columns\r\n data_df = data_df.applymap(\r\n lambda s: s.lower() if isinstance(s, str) else s)\r\n\r\n data_df['salary'] = data_df['salary'].map({'>50k': 1, '<=50k': 0})\r\n\r\n y_df = data_df.pop('salary')\r\n X_df = data_df\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(\r\n X_df, y_df, test_size=0.3, random_state=config.RANDOM_STATE, stratify=y_df)\r\n\r\n return X_train, X_test, y_train, y_test\r\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Simmons-Wang/easy-backtrack-tweets
[ "d5c7912e06376f63800e76af658a79b87129dc92" ]
[ "tweetBacktrack.py" ]
[ "import datetime\nimport pickle\nimport tweepy as tp\nimport pandas as pd\nimport time\n\n\ndef lookUpDetail(ids):\n \"\"\"\n :param ids: the list of tweets ids, the maximum length is 100 at a time.\n :return: dataframe which include 'tweet_id', 'favorite_count', 'retweet_count', 'lang',\n 'hashtags', 'url', 'user_id'\n \"\"\"\n statuses = api.lookup_statuses(ids)\n details = [[i.id, i.favorite_count, i.retweet_count, i.lang,\n i.entities['hashtags'], i.entities['urls'],\n i.author.id] for i in statuses]\n df = pd.DataFrame(details, columns=['tweet_id', 'favorite_count', 'retweet_count', 'lang',\n 'hashtags', 'urls', 'user_id'])\n\n df.hashtags = df.hashtags.apply(lambda x: [i['text'] for i in x] if x else [])\n df.urls = df.urls.apply(lambda x: x[0]['url'] if x else None)\n return df\n\n\ndef get_following(my_name):\n user1 = api.get_friends(screen_name=my_name, cursor=-1, count=200) # 200 is the limit\n user = user1[0]\n while user1[0]:\n user1 = api.get_friends(screen_name=my_name, cursor=user1[1][1], count=200)\n user = user + user1[0]\n time.sleep(2)\n friendsScreenName = [i.screen_name for i in user] # change this line to collect other attribute of friends\n return friendsScreenName\n\n\ndef get_history(f, start_time):\n tws = api.user_timeline(screen_name=f, count=200) # one crawl limit is 200\n userTws = tws.copy()\n while tws and (tws[-1].created_at > start_time):\n tws = api.user_timeline(screen_name=f, count=200, max_id=tws.max_id)\n userTws = userTws + tws\n details = [[i.created_at, i.id, i.text, i.favorite_count, i.retweet_count, i.lang,\n i.entities['hashtags'], i.entities['urls'],\n i.author.id, f] for i in userTws]\n return details\n\n\nif __name__ == '__main__':\n # replace the following attributions with yours\n CONSUMER_KEY = \"\"\n CONSUMER_SECRET = \"\"\n ACCESS_TOKEN = \"\"\n ACCESS_TOKEN_SECRET = \"\"\n\n auth = tp.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tp.API(auth)\n\n test_ids = [\n 1481194503650549761,\n 1481194425170956292,\n 1480951940389371914,\n 1480942056365252610,\n 1480888363011903491,\n 1480886828072718337,\n 1480848873627086849,\n 1480844751880351745,\n 1480823233267920897]\n\n test_result1 = lookUpDetail(test_ids)\n test_result2 = get_following('') # replace it with your name\n test_result3 = get_history('') # replace it with your name\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jezsadler/summit
[ "982de7f6424bb94da2084d4d84396b4b2673eeca" ]
[ "summit/benchmarks/experiment_emulator/bnn_emulator.py" ]
[ "import os\nimport os.path as osp\n\nimport numpy as np\n\nfrom summit.benchmarks.experiment_emulator.emulator import Emulator\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom blitz.modules import BayesianLinear\nfrom blitz.utils import variational_estimator\n\nfrom sklearn.metrics import r2_score\n\n# =======================================================================\n\n\nclass BNNEmulator(Emulator):\n \"\"\"BNN Emulator\n\n A Bayesian Neural Network (BNN) emulator.\n\n Parameters\n ---------\n domain: summit.domain.Domain\n The domain of the experiment\n dataset: class:~summit.utils.dataset.DataSet, optional\n A DataSet with data for training where the data columns correspond to the domain and the data rows correspond to the training points.\n By default: None\n model_name: string, optional\n Name of the model that is used for saving model parameters. Should be unique.\n By default: \"dataset_emulator_model_name\"\n \"\"\"\n\n # =======================================================================\n\n def __init__(self, domain, dataset, model_name, kwargs={}):\n super().__init__(domain, dataset, model_name, kwargs)\n self._model = self._setup_model()\n\n # Set model name for saving\n self.save_path = kwargs.get(\n \"save_path\",\n osp.join(osp.dirname(osp.realpath(__file__)), \"trained_models/BNN\"),\n )\n\n # Set up training hyperparameters\n self.set_training_hyperparameters()\n\n # =======================================================================\n\n def _setup_model(self, **kwargs):\n \"\"\" Setup the BNN model \"\"\"\n\n @variational_estimator\n class BayesianRegressor(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n\n self.blinear1 = BayesianLinear(input_dim, 24)\n self.blinear2 = BayesianLinear(24, 24)\n self.blinear3 = BayesianLinear(24, 24)\n self.blinear4 = BayesianLinear(24, 1)\n\n def forward(self, x):\n x = F.leaky_relu(self.blinear1(x))\n x = F.leaky_relu(self.blinear2(x))\n x = F.dropout(x, p=0.1, training=self.training)\n x = F.leaky_relu(self.blinear3(x))\n x = F.dropout(x, p=0.1, training=self.training)\n x = F.relu(self.blinear4(x))\n y = x\n return y.view(-1)\n\n # Training of model on given dataloader\n def _train(self, regressor, device, optimizer, criterion, X_train, loader):\n regressor.train()\n\n for i, (datapoints, labels) in enumerate(loader):\n optimizer.zero_grad()\n loss = regressor.sample_elbo(\n inputs=datapoints.to(device),\n labels=labels.to(device),\n criterion=criterion,\n sample_nbr=3,\n complexity_cost_weight=1 / X_train.shape[0],\n )\n loss.backward()\n optimizer.step()\n\n # Evaluate model for given dataloader\n def _evaluate_regression(\n self,\n regressor,\n device,\n loader,\n fun_untransform_data,\n out_transform,\n get_predictions=False,\n ):\n regressor.eval()\n regressor.freeze_()\n\n mae = 0\n pred_data = []\n real_data = []\n for i, (datapoints, labels) in enumerate(loader):\n data = datapoints.to(device)\n pred = regressor(data)\n tmp_pred_data = fun_untransform_data(\n data=pred, reduce=out_transform[0], divide=out_transform[1]\n )\n tmp_real_data = fun_untransform_data(\n data=labels, reduce=out_transform[0], divide=out_transform[1]\n )\n mae += (tmp_pred_data - tmp_real_data).abs().sum(0).item()\n\n if get_predictions:\n pred_data.extend(tmp_pred_data.tolist())\n real_data.extend(tmp_real_data.tolist())\n\n if get_predictions:\n return pred_data, real_data\n\n regressor.unfreeze_()\n\n return mae / len(loader.dataset)\n\n regression_model = BayesianRegressor(self.input_dim)\n return regression_model\n\n # =======================================================================\n\n def set_training_hyperparameters(self, kwargs={}):\n # Setter method for hyperparameters of training\n self.epochs = kwargs.get(\n \"epochs\", 300\n ) # number of max epochs the model is trained\n self.initial_lr = kwargs.get(\"initial_lr\", 0.001) # initial learning rate\n self.min_lr = kwargs.get(\"min_lr\", 0.00001)\n self.lr_decay = kwargs.get(\"lr_decay\", 0.7) # learning rate decay\n self.lr_decay_patience = kwargs.get(\n \"lr_decay_patience\", 3\n ) # number of epochs before learning rate is reduced by lr_decay\n self.early_stopping_epochs = kwargs.get(\n \"early_stopping_epochs\", 30\n ) # number of epochs before early stopping\n self.batch_size_train = kwargs.get(\"batch_size_train\", 4)\n self.transform_input = kwargs.get(\"transform_input\", \"standardize\")\n self.transform_output = kwargs.get(\"transform_output\", \"standardize\")\n self.test_size = kwargs.get(\"test_size\", 0.1)\n self.shuffle = kwargs.get(\"shuffle\", False)\n\n # =======================================================================\n\n def train_model(self, dataset=None, verbose=True, kwargs={}):\n # Manual call of training -> overwrite dataset with new dataset for training\n if dataset is not None:\n self._dataset = dataset\n\n # #<cv_fold>-fold cross-validation\n cv_fold = kwargs.get(\"cv_fold\", 10)\n\n # Data preprocess\n train_dataset, test_dataset = self._data_preprocess(\n transform_input=self.transform_input,\n transform_output=self.transform_output,\n test_size=self.test_size,\n shuffle=self.shuffle,\n )\n\n X_train_init, y_train_init = (\n torch.tensor(train_dataset[0]).float(),\n torch.tensor(train_dataset[1]).float(),\n )\n X_test, y_test = (\n torch.tensor(test_dataset[0]).float(),\n torch.tensor(test_dataset[1]).float(),\n )\n\n shuffle_train = kwargs.get(\"shuffle_train\", False)\n if shuffle_train:\n perm = torch.randperm(len(y_train_init))\n train_data = torch.cat([X_train_init, y_train_init], axis=1)[perm]\n X_train, y_train = (\n train_data[:, : -self.output_dim],\n train_data[:, -self.output_dim :],\n )\n else:\n X_train, y_train = X_train_init, y_train_init\n\n if verbose:\n print(\"\\n<---- Start training of BNN model ---->\")\n print(\" --- Length of train dataset: {} ---\".format(X_train.shape[0]))\n print(\" --- Length of test dataset: {} ---\".format(X_test.shape[0]))\n for i, k in enumerate(self.output_models):\n if verbose:\n print(\n \"\\n <-- Start {}-fold cross-validation training of BNN regressor on objective: {} -->\\n\".format(\n cv_fold, k\n )\n )\n\n train_acc, val_acc, test_acc = [], [], []\n y_train_pred_l, y_train_real_l, y_test_pred_l, y_test_real_l = (\n [],\n [],\n [],\n [],\n )\n for j in range(cv_fold):\n if verbose:\n print(\" ---------------- Split {} ----------------\".format(j + 1))\n\n # Set training details\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n regressor = self._setup_model().to(device)\n optimizer = optim.Adam(regressor.parameters(), lr=self.initial_lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n factor=self.lr_decay,\n patience=self.lr_decay_patience,\n min_lr=self.min_lr,\n )\n criterion = torch.nn.MSELoss()\n model_save_name = (\n self.model_name + \"_\" + str(k) + \"_\" + str(j + 1) + \"_BNN_model.pt\"\n )\n model_save_dir = osp.join(self.save_path, model_save_name)\n storable = self._check_file_path(model_save_dir)\n if not storable:\n self.output_models[k] = self._load_model(self.model_name)[k]\n continue\n\n # Setup train and val dataset for cross-validation\n if cv_fold <= 1:\n raise ValueError(\n \"{}-fold Cross-Validation not possible. Increase cv_fold.\".format(\n cv_fold\n )\n )\n if len(X_train) < cv_fold:\n raise ValueError(\n \"Too few data points ({}) for training provided. Decrease cv_fold.\".format(\n len(X_train)\n )\n )\n n = len(X_train) // cv_fold\n r = len(X_train) % cv_fold\n val_mask = torch.zeros(len(X_train), dtype=torch.uint8)\n # make sure every data point is included in the validation set once\n if j < r:\n val_mask[j * (n + 1) : (j + 1) * (n + 1)] = 1\n else:\n val_mask[j * n + r : (j + 1) * n + r] = 1\n X_val_cv, y_val_cv = X_train[val_mask], y_train[val_mask]\n X_train_cv, y_train_cv = X_train[1 - val_mask], y_train[1 - val_mask]\n\n out_transform = self.data_transformation_dict[k]\n y_train_obj, y_val_obj, y_test_obj = (\n y_train_cv[:, i],\n y_val_cv[:, i],\n y_test[:, i],\n )\n ds_train = torch.utils.data.TensorDataset(X_train_cv, y_train_obj)\n dataloader_train = torch.utils.data.DataLoader(\n ds_train, batch_size=self.batch_size_train, shuffle=True\n )\n ds_val = torch.utils.data.TensorDataset(X_val_cv, y_val_obj)\n dataloader_val = torch.utils.data.DataLoader(\n ds_val, batch_size=16, shuffle=False\n )\n ds_test = torch.utils.data.TensorDataset(X_test, y_test_obj)\n dataloader_test = torch.utils.data.DataLoader(\n ds_test, batch_size=16, shuffle=False\n )\n\n max_iter_stop = (\n self.early_stopping_epochs\n ) # maximum number of consecutive iteration w/o improvement after which training is stopped\n tmp_iter_stop = 0\n best_train_mae, best_val_mae, best_test_mae = (\n float(\"inf\"),\n float(\"inf\"),\n float(\"inf\"),\n )\n for epoch in range(self.epochs):\n\n lr = scheduler.optimizer.param_groups[0][\"lr\"]\n\n # train model\n self._model._train(\n regressor,\n device,\n optimizer,\n criterion,\n X_train_cv,\n dataloader_train,\n )\n\n train_mae = self._model._evaluate_regression(\n regressor,\n device,\n dataloader_train,\n self._untransform_data,\n out_transform,\n )\n val_mae = self._model._evaluate_regression(\n regressor,\n device,\n dataloader_val,\n self._untransform_data,\n out_transform,\n )\n scheduler.step(val_mae)\n\n if verbose:\n print(\n \" -- Epoch: {:03d}, LR: {:7f}, Train MAE: {:4f}, Val MAE: {:4f}\".format(\n epoch, lr, train_mae, val_mae\n )\n )\n\n # if prediction accuracy was improved in current epoch, reset <tmp_iter_stop> and save model\n if best_val_mae > val_mae:\n best_val_mae = val_mae\n tmp_iter_stop = 0\n torch.save(regressor.state_dict(), model_save_dir)\n test_mae = self._model._evaluate_regression(\n regressor,\n device,\n dataloader_test,\n self._untransform_data,\n out_transform,\n )\n best_train_mae, best_test_mae = train_mae, test_mae\n if verbose:\n print(\n \" -> Val MAE improved, current Test MAE: {:4f}\".format(\n test_mae\n )\n )\n # if prediction accuracy was not imporved in current epoch, increase <tmp_iter_stop> and stop training if <max_iter_stop> is reached\n else:\n tmp_iter_stop += 1\n if tmp_iter_stop >= max_iter_stop:\n break\n\n train_acc.append(best_train_mae)\n val_acc.append(best_val_mae)\n test_acc.append(best_test_mae)\n\n y_train_obj = y_train_init[:, i]\n ds_train_all = torch.utils.data.TensorDataset(X_train_init, y_train_obj)\n\n # load final model from epoch with lowest prediction accuracy\n regressor.load_state_dict(torch.load(model_save_dir))\n\n # get final model predictions for training and test data\n y_train_pred, y_train_real = self._model._evaluate_regression(\n regressor=regressor,\n device=device,\n loader=torch.utils.data.DataLoader(ds_train_all, shuffle=False),\n fun_untransform_data=self._untransform_data,\n out_transform=out_transform,\n get_predictions=True,\n )\n y_test_pred, y_test_real = self._model._evaluate_regression(\n regressor=regressor,\n device=device,\n loader=torch.utils.data.DataLoader(ds_test, shuffle=False),\n fun_untransform_data=self._untransform_data,\n out_transform=out_transform,\n get_predictions=True,\n )\n y_train_pred_l.append(y_train_pred), y_train_real_l.append(y_train_real)\n y_test_pred_l.append(y_test_pred), y_test_real_l.append(y_test_real)\n\n train_acc, val_acc, test_acc = (\n torch.tensor(train_acc),\n torch.tensor(val_acc),\n torch.tensor(test_acc),\n )\n y_train_pred_l, y_train_real_l, y_test_pred_l, y_test_real_l = (\n torch.tensor(y_train_pred_l),\n torch.tensor(y_train_real_l),\n torch.tensor(y_test_pred_l),\n torch.tensor(y_test_real_l),\n )\n\n X_train_final = np.asarray(X_train_init.tolist())\n X_test_final = np.asarray(X_test.tolist())\n for ind, inp_var in enumerate(self.input_names_transformable):\n tmp_inp_transform = self.data_transformation_dict[inp_var]\n X_train_final[:, ind] = self._untransform_data(\n data=X_train_final[:, ind],\n reduce=tmp_inp_transform[0],\n divide=tmp_inp_transform[1],\n )\n X_test_final[:, ind] = self._untransform_data(\n data=X_test_final[:, ind],\n reduce=tmp_inp_transform[0],\n divide=tmp_inp_transform[1],\n )\n\n self.output_models[k] = {\n \"model_save_dirs\": [\n self.model_name + \"_\" + str(k) + \"_\" + str(j + 1)\n for j in range(cv_fold)\n ],\n \"Final train MAE\": train_acc.mean().tolist(),\n \"Final validation MAE\": val_acc.mean().tolist(),\n \"Final test MAE\": test_acc.mean().tolist(),\n \"data_transformation_dict\": self.data_transformation_dict,\n \"X variable names\": self.input_names,\n \"X_train\": X_train_final.tolist(),\n \"y_train_real\": y_train_real_l.mean(axis=0).tolist(),\n \"y_train_pred_average\": y_train_pred_l.mean(axis=0).tolist(),\n \"X_test\": X_test_final.tolist(),\n \"y_test_real\": y_test_real_l.mean(axis=0).tolist(),\n \"y_test_pred_average\": y_test_pred_l.mean(axis=0).tolist(),\n }\n\n if verbose:\n print(\n \"\\n <-- Finished training of BNN model on objective: {} -->\\n\"\n \" -- Final Train MAE: {:4f}, Final Val MAE: {:4f}, Final Test MAE: {:4f} --\\n\"\n \" -- Model saved at: {} --\\n\".format(\n k,\n train_acc.mean(),\n val_acc.mean(),\n test_acc.mean(),\n model_save_dir,\n )\n )\n\n self._save_model()\n\n if verbose:\n print(\"<---- End training of BNN regressor ---->\\n\")\n\n # =======================================================================\n\n def validate_model(\n self, dataset=None, parity_plots=False, get_pred=False, kwargs={}\n ):\n self.output_models = self._load_model(self.model_name)\n\n self._model.freeze_() # freeze the model, in order to predict using only their weight distribution means\n self._model.eval() # set to evaluation mode (may be redundant)\n\n val_dict = {}\n lst_parity_plots = None\n if parity_plots:\n lst_parity_plots = []\n\n if dataset is not None:\n for i, (k, v) in enumerate(self.output_models.items()):\n model_load_dirs = v[\"model_save_dirs\"]\n self.data_transformation_dict = v[\"data_transformation_dict\"]\n out_transform = self.data_transformation_dict[k]\n\n X_val = self._data_preprocess(\n inference=True, infer_dataset=dataset, validate=True\n )\n X_val = torch.tensor(X_val).float()\n y_val = torch.tensor(dataset[(k, \"DATA\")].to_numpy()).float()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n prediction_l = []\n for m in model_load_dirs:\n model_load_dir = osp.join(self.save_path, m + \"_BNN_model.pt\")\n self._model.load_state_dict(\n torch.load(model_load_dir, map_location=torch.device(device))\n )\n data = X_val.to(device)\n predictions = self._model(data).detach()\n predictions = self._untransform_data(\n data=predictions,\n reduce=out_transform[0],\n divide=out_transform[1],\n )\n prediction_l.append(predictions)\n prediction_l = torch.tensor(prediction_l)\n predictions = prediction_l.mean(axis=0)\n val_dict[k] = {\n \"MAE\": (predictions - y_val).abs().mean().item(),\n \"RMSE\": ((((predictions - y_val) ** 2).mean()) ** (1 / 2)).item(),\n \"r2\": r2_score(y_val, predictions)\n if y_val.shape[0] > 1\n else \"Too few data points to calculate r2.\",\n }\n\n if parity_plots:\n parity_plot = self.create_parity_plot(\n datasets_pred=[predictions],\n datasets_real=[y_val],\n kwargs=kwargs,\n )\n lst_parity_plots.append(parity_plot)\n else:\n for i, (k, v) in enumerate(self.output_models.items()):\n y_train_real, y_train_pred, y_test_real, y_test_pred = (\n torch.tensor(v[\"y_train_real\"]).float(),\n torch.tensor(v[\"y_train_pred_average\"]).float(),\n torch.tensor(v[\"y_test_real\"]).float(),\n torch.tensor(v[\"y_test_pred_average\"]).float(),\n )\n val_dict[k] = {\n \"Train\": {\n \"MAE\": (y_train_real - y_train_pred).abs().mean().item(),\n \"RMSE\": (\n (((y_train_real - y_train_pred) ** 2).mean()) ** (1 / 2)\n ).item(),\n \"r2\": r2_score(y_train_real, y_train_pred)\n if y_train_pred.shape[0] > 1\n else \"Too few data points to calculate r2.\",\n },\n \"Test\": {\n \"MAE\": (y_test_real - y_test_pred).abs().mean().item(),\n \"RMSE\": (\n (((y_test_real - y_test_pred) ** 2).mean()) ** (1 / 2)\n ).item(),\n \"r2\": r2_score(y_test_real, y_test_pred)\n if y_test_pred.shape[0] > 1\n else \"Too few data points to calculate r2.\",\n },\n }\n if parity_plots:\n parity_plot = self.create_parity_plot(\n datasets_pred=[y_train_pred, y_test_pred],\n datasets_real=[y_train_real, y_test_real],\n kwargs=kwargs,\n )\n lst_parity_plots.append(parity_plot)\n if get_pred:\n return predictions\n return val_dict, lst_parity_plots\n\n # =======================================================================\n\n def infer_model(self, dataset):\n\n self.output_models = self._load_model(self.model_name)\n\n self._model.eval() # set to evaluation mode (may be redundant)\n self._model.freeze_() # freeze the model, in order to predict using only their weight distribution means\n\n infer_dict = {}\n for i, (k, v) in enumerate(self.output_models.items()):\n model_load_dirs = v[\"model_save_dirs\"]\n self.data_transformation_dict = v[\"data_transformation_dict\"]\n out_transform = self.data_transformation_dict[k]\n\n X_infer = self._data_preprocess(inference=True, infer_dataset=dataset)\n X_infer = torch.tensor(X_infer).float()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n prediction_l = []\n for m in model_load_dirs:\n model_load_dir = osp.join(self.save_path, m + \"_BNN_model.pt\")\n self._model.load_state_dict(\n torch.load(model_load_dir, map_location=torch.device(device))\n )\n data = X_infer.to(device)\n predictions = self._model(data).item()\n predictions = self._untransform_data(\n data=predictions, reduce=out_transform[0], divide=out_transform[1]\n )\n prediction_l.append(predictions)\n prediction_l = torch.tensor(prediction_l)\n predictions = prediction_l.mean(axis=0).item()\n infer_dict[k] = predictions\n\n return infer_dict\n" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "sklearn.metrics.r2_score", "torch.nn.functional.dropout", "torch.cat", "torch.load", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.tensor", "torch.cuda.is_available", "torch.device", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johnnytheboii/TensorFuzz_2.0
[ "d1d7ae7de26067c2a1c223dbef6d897752aa8f71" ]
[ "examples/quantize/quantized_fuzzer.py" ]
[ "# Copyright 2018 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fuzz a neural network to find disagreements between normal and quantized.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom lib import fuzz_utils\nfrom lib.corpus import InputCorpus\nfrom lib.corpus import seed_corpus_from_numpy_arrays\nfrom lib.coverage_functions import raw_logit_coverage_function\nfrom lib.coverage_functions import neuron_coverage_function\nfrom lib.coverage_functions import neuron_boundary_coverage_function\nfrom lib.coverage_functions import top_k_neuron_coverage_function\nfrom lib.fuzzer import Fuzzer\nfrom lib.mutation_functions import do_basic_mutations\nfrom lib.sample_functions import recent_sample_function\nfrom lib.sample_functions import uniform_sample_function\nimport time\n\n\ntf.flags.DEFINE_string(\n \"checkpoint_dir\", None, \"Dir containing checkpoints of model to fuzz.\"\n)\ntf.flags.DEFINE_string(\n \"output_path\", None, \"Where to write the satisfying output.\"\n)\ntf.flags.DEFINE_integer(\n \"total_inputs_to_fuzz\", 100, \"Loops over the whole corpus.\"\n)\ntf.flags.DEFINE_integer(\n \"mutations_per_corpus_item\", 100, \"Number of times to mutate corpus item.\"\n)\ntf.flags.DEFINE_float(\n \"perturbation_constraint\", None, \"Constraint on norm of perturbations.\"\n)\ntf.flags.DEFINE_float(\n \"ann_threshold\",\n 1.0,\n \"Distance below which we consider something new coverage.\",\n)\ntf.flags.DEFINE_boolean(\n \"random_seed_corpus\", False, \"Whether to choose a random seed corpus.\"\n)\nFLAGS = tf.flags.FLAGS\n\n\ndef metadata_function(metadata_batches):\n \"\"\"Gets the metadata.\"\"\"\n logit_32_batch = metadata_batches[0]\n logit_16_batch = metadata_batches[1]\n metadata_list = []\n for idx in range(logit_16_batch.shape[0]):\n metadata_list.append((logit_32_batch[idx], logit_16_batch[idx]))\n return metadata_list\n\n\ndef objective_function(corpus_element):\n \"\"\"Checks if the element is misclassified.\"\"\"\n logits_32 = corpus_element.metadata[0]\n logits_16 = corpus_element.metadata[1]\n prediction_16 = np.argmax(logits_16)\n prediction_32 = np.argmax(logits_32)\n if prediction_16 == prediction_32:\n return False\n return True\n\n\n# pylint: disable=too-many-locals\ndef main(_):\n \"\"\"Constructs the fuzzer and fuzzes.\"\"\"\n\n # Log more\n tf.logging.set_verbosity(tf.logging.INFO)\n\n coverage_function = top_k_neuron_coverage_function\n image, label = fuzz_utils.basic_mnist_input_corpus(\n choose_randomly=FLAGS.random_seed_corpus\n )\n numpy_arrays = [[image, label]]\n image_copy = image[:]\n\n with tf.Session() as sess:\n\n overall_start_time = time.time()\n tensor_map = fuzz_utils.get_tensors_from_checkpoint(\n sess, FLAGS.checkpoint_dir\n )\n\n fetch_function = fuzz_utils.build_fetch_function(sess, tensor_map)\n\n size = FLAGS.mutations_per_corpus_item\n\n def mutation_function(elt):\n \"\"\"Mutates the element in question.\"\"\"\n return do_basic_mutations(elt, size, FLAGS.perturbation_constraint)\n\n seed_corpus = seed_corpus_from_numpy_arrays(\n numpy_arrays, coverage_function, metadata_function, fetch_function\n )\n corpus = InputCorpus(\n seed_corpus, uniform_sample_function, FLAGS.ann_threshold, \"kdtree\"\n )\n fuzzer = Fuzzer(\n corpus,\n coverage_function,\n metadata_function,\n objective_function,\n mutation_function,\n fetch_function,\n )\n result, fetch_time = fuzzer.loop(FLAGS.total_inputs_to_fuzz)\n \n overall_end_time = time.time()\n print(\"Overall time is \" + str(overall_end_time-overall_start_time))\n print(\"Fetch time is \" + str(fetch_time))\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.flags.DEFINE_boolean", "tensorflow.flags.DEFINE_string", "numpy.argmax", "tensorflow.logging.set_verbosity", "tensorflow.Session", "tensorflow.flags.DEFINE_float", "tensorflow.flags.DEFINE_integer", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
C-bowman/inference-tools
[ "499c3c23f1b3817b4cabde21ba45f2e2c6b95f77" ]
[ "inference/priors.py" ]
[ "\"\"\"\n.. moduleauthor:: Chris Bowman <[email protected]>\n\"\"\"\nfrom typing import Union, Iterable\n\nfrom numpy import array, log, pi, zeros, concatenate, float64, where\nfrom numpy.random import normal, exponential, uniform\nfrom itertools import chain\n\n\nclass JointPrior(object):\n \"\"\"\n A class which combines multiple prior distribution objects into a single\n joint-prior distribution object.\n\n :param components: \\\n A list of prior distribution objects (e.g. GaussianPrior, ExponentialPrior)\n which will be combined into a single joint-prior object.\n\n :param int n_variables: \\\n The total number of model variables.\n \"\"\"\n\n def __init__(self, components, n_variables):\n if not all(isinstance(c, BasePrior) for c in components):\n raise TypeError(\n \"\"\"\n All objects contained in the 'components' argument must be instances\n of a subclass of BasePrior (e.g. GaussianPrior, UniformPrior)\n \"\"\"\n )\n\n # Combine any prior components which are of the same type\n self.components = []\n for cls in [GaussianPrior, ExponentialPrior, UniformPrior]:\n L = [c for c in components if isinstance(c, cls)]\n if len(L) == 1:\n self.components.extend(L)\n elif len(L) > 1:\n self.components.append(cls.combine(L))\n\n # check that no variable appears more than once across all prior components\n self.prior_variables = []\n for var in chain(*[c.variables for c in self.components]):\n if var in self.prior_variables:\n raise ValueError(\n f\"Variable index '{var}' appears more than once in prior components\"\n )\n self.prior_variables.append(var)\n\n if len(self.prior_variables) != n_variables:\n raise ValueError(\n f\"\"\"\n The total number of variables specified across the various prior\n components ({len(self.prior_variables)}) does not match the number specified in\n the 'n_variables' argument ({n_variables}).\n \"\"\"\n )\n\n if not all(0 <= i < n_variables for i in self.prior_variables):\n raise ValueError(\n \"\"\"\n All variable indices given to the prior components must have values\n in the range [0, n_variables-1].\n \"\"\"\n )\n\n self.n_variables = n_variables\n\n all_bounds = chain(*[c.bounds for c in self.components])\n all_inds = chain(*[c.variables for c in self.components])\n both = sorted(\n [(b, i) for b, i in zip(all_bounds, all_inds)], key=lambda x: x[1]\n )\n self.bounds = [v[0] for v in both]\n\n def __call__(self, theta):\n \"\"\"\n Returns the joint-prior log-probability value, calculated as the sum\n of the log-probabilities from each prior component for the provided\n set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n return sum(c(theta) for c in self.components)\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n grad = zeros(self.n_variables)\n for c in self.components:\n grad[c.variables] = c.gradient(theta)\n return grad\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n sample = zeros(self.n_variables)\n for c in self.components:\n sample[c.variables] = c.sample()\n return sample\n\n\nclass BasePrior(object):\n @staticmethod\n def check_variables(variable_inds: Union[int, Iterable[int]], n_vars: int):\n if not isinstance(variable_inds, (int, Iterable)):\n raise TypeError(\"'variable_inds' must be an integer or list of integers\")\n\n if isinstance(variable_inds, int):\n variable_inds = [variable_inds]\n\n if not all(isinstance(p, int) for p in variable_inds):\n raise TypeError(\"'variable_inds' must be an integer or list of integers\")\n\n if n_vars != len(variable_inds):\n raise ValueError(\n \"\"\"\n The total number of variables specified via the 'variable_indices' argument is\n inconsistent with the number specified by the other arguments.\n \"\"\"\n )\n\n if len(variable_inds) != len(set(variable_inds)):\n raise ValueError(\n \"\"\"\n All integers given via the 'variable_indices' must be unique.\n Two or more of the given integers are duplicates.\n \"\"\"\n )\n\n return variable_inds\n\n\nclass GaussianPrior(BasePrior):\n \"\"\"\n A class for generating a Gaussian prior for one or more of the model variables.\n\n :param mean: \\\n A list specifying the means of the Gaussian priors on each of the variables specified\n in the ``variable_indices`` argument.\n\n :param sigma: \\\n A list specifying the standard deviations of the Gaussian priors on each of the\n variables specified in the ``variable_indices`` argument.\n\n :param variable_indices: \\\n A list of integers specifying the indices of the variables to which the prior will apply.\n \"\"\"\n\n def __init__(self, mean, sigma, variable_indices):\n\n self.mean = array(mean, dtype=float64).squeeze()\n self.sigma = array(sigma, dtype=float64).squeeze()\n\n # if parameters were passed as floats, convert from 0D to 1D arrays\n if self.mean.ndim == 0:\n self.mean = self.mean.reshape([1])\n if self.sigma.ndim == 0:\n self.sigma = self.sigma.reshape([1])\n\n self.n_params = self.mean.size\n\n if self.mean.size != self.sigma.size:\n raise ValueError(\n \"mean and sigma arguments must have the same number of elements\"\n )\n\n if self.mean.ndim > 1 or self.sigma.ndim > 1:\n raise ValueError(\"mean and sigma arguments must be 1D arrays\")\n\n if not (self.sigma > 0.0).all():\n raise ValueError('All values of \"sigma\" must be greater than zero')\n\n self.variables = self.check_variables(variable_indices, self.n_params)\n\n # pre-calculate some quantities as an optimisation\n self.inv_sigma = 1.0 / self.sigma\n self.inv_sigma_sqr = self.inv_sigma ** 2\n self.normalisation = -log(self.sigma).sum() - 0.5 * log(2 * pi) * self.n_params\n self.bounds = [(None, None)] * self.n_params\n\n def __call__(self, theta):\n \"\"\"\n Returns the prior log-probability value for the provided set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n z = (self.mean - theta[self.variables]) * self.inv_sigma\n return -0.5 * (z ** 2).sum() + self.normalisation\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n return (self.mean - theta[self.variables]) * self.inv_sigma_sqr\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n return normal(loc=self.mean, scale=self.sigma)\n\n @classmethod\n def combine(cls, priors):\n if not all(isinstance(p, cls) for p in priors):\n raise ValueError(f\"All prior objects being combined must be of type {cls}\")\n\n variables = []\n for p in priors:\n variables.extend(p.variables)\n\n means = concatenate([p.mean for p in priors])\n sigmas = concatenate([p.sigma for p in priors])\n\n return cls(mean=means, sigma=sigmas, variable_indices=variables)\n\n\nclass ExponentialPrior(BasePrior):\n \"\"\"\n A class for generating an exponential prior for one or more of the model variables.\n\n :param beta: \\\n A list specifying the 'beta' parameter value of the exponential priors on each of the\n variables specified in the ``variable_indices`` argument.\n\n :param variable_indices: \\\n A list of integers specifying the indices of the variables to which the prior will apply.\n \"\"\"\n\n def __init__(self, beta, variable_indices):\n\n self.beta = array(beta, dtype=float64).squeeze()\n if self.beta.ndim == 0:\n self.beta = self.beta.reshape([1])\n self.n_params = self.beta.size\n\n if self.beta.ndim > 1:\n raise ValueError(\"beta argument must be a 1D array\")\n\n if not (self.beta > 0.0).all():\n raise ValueError('All values of \"beta\" must be greater than zero')\n\n self.variables = self.check_variables(variable_indices, self.n_params)\n\n # pre-calculate some quantities as an optimisation\n self.lam = 1.0 / self.beta\n self.normalisation = log(self.lam).sum()\n self.zeros = zeros(self.n_params)\n self.bounds = [(0.0, None)] * self.n_params\n\n def __call__(self, theta):\n \"\"\"\n Returns the prior log-probability value for the provided set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n if (theta < 0.0).any():\n return -1e100\n return -(self.lam * theta[self.variables]).sum() + self.normalisation\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n return where(theta[self.variables] >= 0.0, -self.lam, self.zeros)\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n return exponential(scale=self.beta)\n\n @classmethod\n def combine(cls, priors):\n if not all(isinstance(p, cls) for p in priors):\n raise ValueError(f\"All prior objects being combined must be of type {cls}\")\n\n variables = []\n for p in priors:\n variables.extend(p.variables)\n\n betas = concatenate([p.beta for p in priors])\n return cls(beta=betas, variable_indices=variables)\n\n\nclass UniformPrior(BasePrior):\n \"\"\"\n A class for generating a uniform prior for one or more of the model variables.\n\n :param lower: \\\n A list specifying the lower bound of the uniform priors on each of the variables\n specified in the ``variable_indices`` argument.\n\n :param upper: \\\n A list specifying the upper bound of the uniform priors on each of the variables\n specified in the ``variable_indices`` argument.\n\n :param variable_indices: \\\n A list of integers specifying the indices of the variables to which the prior will apply.\n \"\"\"\n\n def __init__(self, lower, upper, variable_indices):\n self.lower = array(lower).squeeze()\n self.upper = array(upper).squeeze()\n\n # if parameters were passed as floats, convert from 0D to 1D arrays\n self.lower = self.lower.reshape([1]) if self.lower.ndim == 0 else self.lower\n self.upper = self.upper.reshape([1]) if self.upper.ndim == 0 else self.upper\n\n self.n_params = self.lower.size\n self.grad = zeros(self.n_params)\n\n if self.lower.size != self.upper.size:\n raise ValueError(\n \"\"\"'lower' and 'upper' arguments must have the same number of elements\"\"\"\n )\n\n if self.lower.ndim > 1 or self.upper.ndim > 1:\n raise ValueError(\"'lower' and 'upper' arguments must be 1D arrays\")\n\n if (self.upper <= self.lower).any():\n raise ValueError(\n \"All values in 'lower' must be less than the corresponding values in 'upper'\"\n )\n\n self.variables = self.check_variables(variable_indices, self.n_params)\n\n # pre-calculate some quantities as an optimisation\n self.normalisation = -log(self.upper - self.lower).sum()\n self.bounds = [(lo, up) for lo, up in zip(self.lower, self.upper)]\n\n def __call__(self, theta):\n \"\"\"\n Returns the prior log-probability value for the provided set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n t = theta[self.variables]\n inside = (self.lower <= t) & (t <= self.upper)\n if inside.all():\n return self.normalisation\n return -1e100\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n return self.grad\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n return uniform(low=self.lower, high=self.upper)\n\n @classmethod\n def combine(cls, priors):\n if not all(isinstance(p, cls) for p in priors):\n raise ValueError(f\"All prior objects being combined must be of type {cls}\")\n\n variables = []\n for p in priors:\n variables.extend(p.variables)\n\n lower = concatenate([p.lower for p in priors])\n upper = concatenate([p.upper for p in priors])\n\n return cls(lower=lower, upper=upper, variable_indices=variables)\n" ]
[ [ "numpy.log", "numpy.random.exponential", "numpy.concatenate", "numpy.random.normal", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mschwoer/alphapept
[ "446b3c8b2a20619a74ff872c24a01fed8b99a20a" ]
[ "alphapept/ext/bruker/timsdata.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Python wrapper for timsdata.dll\"\"\"\n\nimport numpy as np\nimport sqlite3\nimport os, sys\nfrom ctypes import *\n\nif sys.platform[:5] == \"win32\":\n libname = \"timsdata.dll\"\nelif sys.platform[:5] == \"linux\":\n libname = \"libtimsdata.so\"\nelse:\n raise Exception(\"Unsupported platform.\")\n\n\npath = os.path.dirname(os.path.abspath(__file__))\nlibname = os.path.join(path, libname)\n\ndll = cdll.LoadLibrary(libname)\ndll.tims_open.argtypes = [ c_char_p, c_uint32 ]\ndll.tims_open.restype = c_uint64\ndll.tims_close.argtypes = [ c_uint64 ]\ndll.tims_close.restype = None\ndll.tims_get_last_error_string.argtypes = [ c_char_p, c_uint32 ]\ndll.tims_get_last_error_string.restype = c_uint32\ndll.tims_has_recalibrated_state.argtypes = [ c_uint64 ]\ndll.tims_has_recalibrated_state.restype = c_uint32\ndll.tims_read_scans_v2.argtypes = [ c_uint64, c_int64, c_uint32, c_uint32, c_void_p, c_uint32 ]\ndll.tims_read_scans_v2.restype = c_uint32\nMSMS_SPECTRUM_FUNCTOR = CFUNCTYPE(None, c_int64, c_uint32, POINTER(c_double), POINTER(c_float))\ndll.tims_read_pasef_msms.argtypes = [ c_uint64, POINTER(c_int64), c_uint32, MSMS_SPECTRUM_FUNCTOR ]\ndll.tims_read_pasef_msms.restype = c_uint32\ndll.tims_read_pasef_msms_for_frame.argtypes = [ c_uint64, c_int64, MSMS_SPECTRUM_FUNCTOR ]\ndll.tims_read_pasef_msms_for_frame.restype = c_uint32\nMSMS_PROFILE_SPECTRUM_FUNCTOR = CFUNCTYPE(None, c_int64, c_uint32, POINTER(c_int32))\nif sys.platform[:5] == \"win32\":\n dll.tims_read_pasef_profile_msms.argtypes = [ c_uint64, POINTER(c_int64), c_uint32, MSMS_PROFILE_SPECTRUM_FUNCTOR ]\n dll.tims_read_pasef_profile_msms.restype = c_uint32\n dll.tims_read_pasef_profile_msms_for_frame.argtypes = [ c_uint64, c_int64, MSMS_PROFILE_SPECTRUM_FUNCTOR ]\n dll.tims_read_pasef_profile_msms_for_frame.restype = c_uint32\n\nconvfunc_argtypes = [ c_uint64, c_int64, POINTER(c_double), POINTER(c_double), c_uint32 ]\n\ndll.tims_index_to_mz.argtypes = convfunc_argtypes\ndll.tims_index_to_mz.restype = c_uint32\ndll.tims_mz_to_index.argtypes = convfunc_argtypes\ndll.tims_mz_to_index.restype = c_uint32\n\ndll.tims_scannum_to_oneoverk0.argtypes = convfunc_argtypes\ndll.tims_scannum_to_oneoverk0.restype = c_uint32\ndll.tims_oneoverk0_to_scannum.argtypes = convfunc_argtypes\ndll.tims_oneoverk0_to_scannum.restype = c_uint32\n\ndll.tims_scannum_to_voltage.argtypes = convfunc_argtypes\ndll.tims_scannum_to_voltage.restype = c_uint32\ndll.tims_voltage_to_scannum.argtypes = convfunc_argtypes\ndll.tims_voltage_to_scannum.restype = c_uint32\n\nif sys.platform[:5] == \"win32\":\n dll.tims_oneoverk0_to_ccs_for_mz.argtypes = [c_double, c_int32, c_double]\n dll.tims_oneoverk0_to_ccs_for_mz.restype = c_double\n\n dll.tims_ccs_to_oneoverk0_for_mz.argtypes = [c_double, c_int32, c_double]\n dll.tims_ccs_to_oneoverk0_for_mz.restype = c_double\n\ndef throwLastTimsDataError (dll_handle):\n \"\"\"Throw last TimsData error string as an exception.\"\"\"\n\n len = dll_handle.tims_get_last_error_string(None, 0)\n buf = create_string_buffer(len)\n dll_handle.tims_get_last_error_string(buf, len)\n raise RuntimeError(buf.value)\n\n# Decodes a properties BLOB of type 12 (array of strings = concatenation of\n# zero-terminated UTF-8 strings). (The BLOB object returned by an SQLite query can be\n# directly put into this function.) \\returns a list of unicode strings.\ndef decodeArrayOfStrings (blob):\n if blob is None:\n return None # property not set\n\n if len(blob) == 0:\n return [] # empty list\n\n blob = bytearray(blob)\n if blob[-1] != 0:\n raise ValueError(\"Illegal BLOB contents.\") # trailing nonsense\n\n if sys.version_info.major == 2:\n return unicode(str(blob), 'utf-8').split('\\0')[:-1]\n if sys.version_info.major == 3:\n return str(blob, 'utf-8').split('\\0')[:-1]\n\n\n# Convert 1/K0 to CCS for a given charge and mz\ndef oneOverK0ToCCSforMz(ook0, charge, mz):\n return dll.tims_oneoverk0_to_ccs_for_mz(ook0, charge, mz)\n\n# Convert CCS to 1/K0 for a given charge and mz\ndef ccsToOneOverK0ToCCSforMz(ccs, charge, mz):\n return dll.tims_ccs_to_oneoverk0_for_mz(ccs, charge, mz)\n\n\nclass TimsData:\n\n def __init__ (self, analysis_directory, use_recalibrated_state=False):\n\n if sys.version_info.major == 2:\n if not isinstance(analysis_directory, unicode):\n raise ValueError(\"analysis_directory must be a Unicode string.\")\n if sys.version_info.major == 3:\n if not isinstance(analysis_directory, str):\n raise ValueError(\"analysis_directory must be a string.\")\n\n self.dll = dll\n\n self.handle = self.dll.tims_open(\n analysis_directory.encode('utf-8'),\n 1 if use_recalibrated_state else 0 )\n if self.handle == 0:\n throwLastTimsDataError(self.dll)\n\n self.conn = sqlite3.connect(os.path.join(analysis_directory, \"analysis.tdf\"))\n\n self.initial_frame_buffer_size = 128 # may grow in readScans()\n\n def __del__ (self):\n if hasattr(self, 'handle'):\n self.dll.tims_close(self.handle)\n\n def __callConversionFunc (self, frame_id, input_data, func):\n\n if type(input_data) is np.ndarray and input_data.dtype == np.float64:\n # already \"native\" format understood by DLL -> avoid extra copy\n in_array = input_data\n else:\n # convert data to format understood by DLL:\n in_array = np.array(input_data, dtype=np.float64)\n\n cnt = len(in_array)\n out = np.empty(shape=cnt, dtype=np.float64)\n success = func(self.handle, frame_id,\n in_array.ctypes.data_as(POINTER(c_double)),\n out.ctypes.data_as(POINTER(c_double)),\n cnt)\n\n if success == 0:\n throwLastTimsDataError(self.dll)\n\n return out\n\n def indexToMz (self, frame_id, indices):\n return self.__callConversionFunc(frame_id, indices, self.dll.tims_index_to_mz)\n\n def mzToIndex (self, frame_id, mzs):\n return self.__callConversionFunc(frame_id, mzs, self.dll.tims_mz_to_index)\n\n def scanNumToOneOverK0 (self, frame_id, scan_nums):\n return self.__callConversionFunc(frame_id, scan_nums, self.dll.tims_scannum_to_oneoverk0)\n\n def oneOverK0ToScanNum (self, frame_id, mobilities):\n return self.__callConversionFunc(frame_id, mobilities, self.dll.tims_oneoverk0_to_scannum)\n\n def scanNumToVoltage (self, frame_id, scan_nums):\n return self.__callConversionFunc(frame_id, scan_nums, self.dll.tims_scannum_to_voltage)\n\n def voltageToScanNum (self, frame_id, voltages):\n return self.__callConversionFunc(frame_id, voltages, self.dll.tims_voltage_to_scannum)\n\n\n # Output: list of tuples (indices, intensities)\n def readScans (self, frame_id, scan_begin, scan_end):\n\n # buffer-growing loop\n while True:\n cnt = int(self.initial_frame_buffer_size) # necessary cast to run with python 3.5\n buf = np.empty(shape=cnt, dtype=np.uint32)\n len = 4 * cnt\n\n required_len = self.dll.tims_read_scans_v2(self.handle, frame_id, scan_begin, scan_end,\n buf.ctypes.data_as(POINTER(c_uint32)),\n len)\n if required_len == 0:\n throwLastTimsDataError(self.dll)\n\n if required_len > len:\n if required_len > 16777216:\n # arbitrary limit for now...\n raise RuntimeError(\"Maximum expected frame size exceeded.\")\n self.initial_frame_buffer_size = required_len / 4 + 1 # grow buffer\n else:\n break\n\n result = []\n d = scan_end - scan_begin\n for i in range(scan_begin, scan_end):\n npeaks = buf[i-scan_begin]\n indices = buf[d : d+npeaks]\n d += npeaks\n intensities = buf[d : d+npeaks]\n d += npeaks\n result.append((indices,intensities))\n\n return result\n\n # read some peak-picked MS/MS spectra for a given list of precursors; returns a dict mapping\n # 'precursor_id' to a pair of arrays (mz_values, area_values).\n def readPasefMsMs (self, precursor_list):\n precursors_for_dll = np.array(precursor_list, dtype=np.int64)\n\n result = {}\n\n @MSMS_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_peaks, mz_values, area_values):\n result[precursor_id] = (mz_values[0:num_peaks], area_values[0:num_peaks])\n\n rc = self.dll.tims_read_pasef_msms(self.handle,\n precursors_for_dll.ctypes.data_as(POINTER(c_int64)),\n len(precursor_list),\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n\n\t\t# read peak-picked MS/MS spectra for a given frame; returns a dict mapping\n # 'precursor_id' to a pair of arrays (mz_values, area_values).\n def readPasefMsMsForFrame (self, frame_id):\n result = {}\n\n @MSMS_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_peaks, mz_values, area_values):\n result[precursor_id] = (mz_values[0:num_peaks], area_values[0:num_peaks])\n\n rc = self.dll.tims_read_pasef_msms_for_frame(self.handle,\n frame_id,\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n\n\t\t# read some \"quasi profile\" MS/MS spectra for a given list of precursors; returns a dict mapping\n # 'precursor_id' to the profil arrays (intensity_values).\n def readPasefProfileMsMs (self, precursor_list):\n precursors_for_dll = np.array(precursor_list, dtype=np.int64)\n\n result = {}\n\n @MSMS_PROFILE_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_points, intensity_values):\n result[precursor_id] = intensity_values[0:num_points]\n\n rc = self.dll.tims_read_pasef_profile_msms(self.handle,\n precursors_for_dll.ctypes.data_as(POINTER(c_int64)),\n len(precursor_list),\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n\n # read \"quasi profile\" MS/MS spectra for a given frame; returns a dict mapping\n # 'precursor_id' to the profil arrays (intensity_values).\n def readPasefProfileMsMsForFrame (self, frame_id):\n result = {}\n\n @MSMS_PROFILE_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_points, intensity_values):\n result[precursor_id] = intensity_values[0:num_points]\n\n rc = self.dll.tims_read_pasef_profile_msms_for_frame(self.handle,\n frame_id,\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n" ]
[ [ "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mahanswaray/simpletransformers
[ "44a97d689b6bd19495e698ae918e67c80828559e", "44a97d689b6bd19495e698ae918e67c80828559e" ]
[ "simpletransformers/classification/multi_label_classification_model.py", "simpletransformers/seq2seq/seq2seq_model.py" ]
[ "import logging\nimport random\nimport warnings\nfrom multiprocessing import cpu_count\n\nimport numpy as np\nimport torch\nfrom transformers import (\n WEIGHTS_NAME,\n AlbertConfig,\n AlbertTokenizer,\n BertConfig,\n BertTokenizer,\n DistilBertConfig,\n DistilBertTokenizer,\n ElectraConfig,\n ElectraTokenizer,\n FlaubertConfig,\n FlaubertTokenizer,\n RobertaConfig,\n RobertaTokenizer,\n XLMConfig,\n XLMRobertaConfig,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetTokenizer,\n)\n\nfrom simpletransformers.classification import ClassificationModel\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.config.model_args import MultiLabelClassificationArgs\nfrom simpletransformers.custom_models.models import (\n AlbertForMultiLabelSequenceClassification,\n BertForMultiLabelSequenceClassification,\n DistilBertForMultiLabelSequenceClassification,\n ElectraForMultiLabelSequenceClassification,\n FlaubertForMultiLabelSequenceClassification,\n RobertaForMultiLabelSequenceClassification,\n XLMForMultiLabelSequenceClassification,\n XLMRobertaForMultiLabelSequenceClassification,\n XLNetForMultiLabelSequenceClassification,\n)\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiLabelClassificationModel(ClassificationModel):\n def __init__(\n self,\n model_type,\n model_name,\n num_labels=None,\n pos_weight=None,\n args=None,\n use_cuda=True,\n cuda_device=-1,\n **kwargs,\n ):\n\n \"\"\"\n Initializes a MultiLabelClassification model.\n\n Args:\n model_type: The type of model (bert, roberta)\n model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).\n num_labels (optional): The number of labels or classes in the dataset.\n pos_weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"bert\": (BertConfig, BertForMultiLabelSequenceClassification, BertTokenizer,),\n \"roberta\": (RobertaConfig, RobertaForMultiLabelSequenceClassification, RobertaTokenizer,),\n \"xlnet\": (XLNetConfig, XLNetForMultiLabelSequenceClassification, XLNetTokenizer,),\n \"xlm\": (XLMConfig, XLMForMultiLabelSequenceClassification, XLMTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForMultiLabelSequenceClassification, DistilBertTokenizer,),\n \"albert\": (AlbertConfig, AlbertForMultiLabelSequenceClassification, AlbertTokenizer,),\n \"flaubert\": (FlaubertConfig, FlaubertForMultiLabelSequenceClassification, FlaubertTokenizer,),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForMultiLabelSequenceClassification, XLMRobertaTokenizer,),\n \"electra\": (ElectraConfig, ElectraForMultiLabelSequenceClassification, ElectraTokenizer),\n }\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, MultiLabelClassificationArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = {key: value[\"value\"] for key, value in sweep_config.as_dict().items() if key != \"_wandb\"}\n self.args.update_from_dict(sweep_values)\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if not use_cuda:\n self.args.fp16 = False\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n if num_labels:\n self.config = config_class.from_pretrained(model_name, num_labels=num_labels, **self.args.config)\n self.num_labels = num_labels\n else:\n self.config = config_class.from_pretrained(model_name, **self.args.config)\n self.num_labels = self.config.num_labels\n self.pos_weight = pos_weight\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n if self.pos_weight:\n self.model = model_class.from_pretrained(\n model_name, config=self.config, pos_weight=torch.Tensor(self.pos_weight).to(self.device), **kwargs\n )\n else:\n self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)\n\n self.results = {}\n\n self.tokenizer = tokenizer_class.from_pretrained(model_name, do_lower_case=self.args.do_lower_case, **kwargs)\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n\n if self.args.wandb_project and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args.wandb_project = None\n\n def _load_model_args(self, input_dir):\n args = MultiLabelClassificationArgs()\n args.load(input_dir)\n return args\n\n def train_model(\n self,\n train_df,\n multi_label=True,\n eval_df=None,\n output_dir=None,\n show_running_loss=True,\n args=None,\n verbose=True,\n **kwargs,\n ):\n return super().train_model(\n train_df,\n multi_label=multi_label,\n eval_df=eval_df,\n output_dir=output_dir,\n show_running_loss=show_running_loss,\n verbose=True,\n args=args,\n **kwargs,\n )\n\n def eval_model(self, eval_df, multi_label=True, output_dir=None, verbose=False, silent=False, **kwargs):\n return super().eval_model(\n eval_df, output_dir=output_dir, multi_label=multi_label, verbose=verbose, silent=silent, **kwargs\n )\n\n def evaluate(self, eval_df, output_dir, multi_label=True, prefix=\"\", verbose=True, silent=False, **kwargs):\n return super().evaluate(\n eval_df, output_dir, multi_label=multi_label, prefix=prefix, verbose=verbose, silent=silent, **kwargs\n )\n\n def load_and_cache_examples(\n self, examples, evaluate=False, no_cache=False, multi_label=True, verbose=True, silent=False\n ):\n return super().load_and_cache_examples(\n examples, evaluate=evaluate, no_cache=no_cache, multi_label=multi_label, verbose=verbose, silent=silent\n )\n\n def compute_metrics(self, preds, labels, eval_examples, multi_label=True, **kwargs):\n return super().compute_metrics(preds, labels, eval_examples, multi_label=multi_label, **kwargs)\n\n def predict(self, to_predict, multi_label=True):\n return super().predict(to_predict, multi_label=multi_label)\n", "import json\nimport logging\nimport math\nimport os\nimport random\nimport warnings\nfrom dataclasses import asdict\nfrom multiprocessing import Pool, cpu_count\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm.auto import tqdm, trange\nfrom transformers import (\n AdamW,\n AutoConfig,\n AutoModel,\n AutoTokenizer,\n BartConfig,\n BartForConditionalGeneration,\n BartTokenizer,\n BertConfig,\n BertForMaskedLM,\n BertModel,\n BertTokenizer,\n CamembertConfig,\n CamembertModel,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertModel,\n DistilBertTokenizer,\n ElectraConfig,\n ElectraModel,\n ElectraTokenizer,\n EncoderDecoderConfig,\n EncoderDecoderModel,\n LongformerConfig,\n LongformerModel,\n LongformerTokenizer,\n MarianConfig,\n MarianMTModel,\n MarianTokenizer,\n MobileBertConfig,\n MobileBertModel,\n MobileBertTokenizer,\n PreTrainedModel,\n PreTrainedTokenizer,\n RobertaConfig,\n RobertaModel,\n RobertaTokenizer,\n get_linear_schedule_with_warmup,\n)\n\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.config.model_args import Seq2SeqArgs\nfrom simpletransformers.seq2seq.seq2seq_utils import Seq2SeqDataset, SimpleSummarizationDataset\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CLASSES = {\n \"auto\": (AutoConfig, AutoModel, AutoTokenizer),\n \"bart\": (BartConfig, BartForConditionalGeneration, BartTokenizer),\n \"bert\": (BertConfig, BertModel, BertTokenizer),\n \"camembert\": (CamembertConfig, CamembertModel, CamembertTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),\n \"electra\": (ElectraConfig, ElectraModel, ElectraTokenizer),\n \"longformer\": (LongformerConfig, LongformerModel, LongformerTokenizer),\n \"mobilebert\": (MobileBertConfig, MobileBertModel, MobileBertTokenizer),\n \"marian\": (MarianConfig, MarianMTModel, MarianTokenizer),\n \"roberta\": (RobertaConfig, RobertaModel, RobertaTokenizer),\n}\n\n\nclass Seq2SeqModel:\n def __init__(\n self,\n encoder_type=None,\n encoder_name=None,\n decoder_name=None,\n encoder_decoder_type=None,\n encoder_decoder_name=None,\n config=None,\n args=None,\n use_cuda=True,\n cuda_device=-1,\n **kwargs,\n ):\n\n \"\"\"\n Initializes a Seq2SeqModel.\n\n Args:\n encoder_type (optional): The type of model to use as the encoder.\n encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n Must be the same \"size\" as the encoder model (base/base, large/large, etc.)\n encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)\n encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. \"outputs/\") OR a valid BART or MarianMT model.\n config (optional): A configuration file to build an EncoderDecoderModel.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\"\n\n if not config:\n # if not ((encoder_name and decoder_name) or encoder_decoder_name) and not encoder_type:\n if not ((encoder_name and decoder_name) or encoder_decoder_name):\n raise ValueError(\n \"You must specify a Seq2Seq config \\t OR \\t\"\n \"encoder_type, encoder_name, and decoder_name OR \\t \\t\"\n \"encoder_type and encoder_decoder_name\"\n )\n elif not (encoder_type or encoder_decoder_type):\n raise ValueError(\n \"You must specify a Seq2Seq config \\t OR \\t\"\n \"encoder_type, encoder_name, and decoder_name \\t OR \\t\"\n \"encoder_type and encoder_decoder_name\"\n )\n\n self.args = self._load_model_args(encoder_decoder_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, Seq2SeqArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = {key: value[\"value\"] for key, value in sweep_config.as_dict().items() if key != \"_wandb\"}\n self.args.update_from_dict(sweep_values)\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \"Make sure CUDA is available or set `use_cuda=False`.\"\n )\n else:\n self.device = \"cpu\"\n\n self.results = {}\n\n if not use_cuda:\n self.args.fp16 = False\n\n # config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config)\n if encoder_decoder_type:\n config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_decoder_type]\n else:\n config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_type]\n\n if encoder_decoder_type in [\"bart\", \"marian\"]:\n self.model = model_class.from_pretrained(encoder_decoder_name)\n if encoder_decoder_type == \"bart\":\n self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)\n elif encoder_decoder_type == \"marian\":\n if self.args.base_marian_model_name:\n self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args.base_marian_model_name)\n else:\n self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)\n self.decoder_tokenizer = self.encoder_tokenizer\n self.config = self.model.config\n else:\n if encoder_decoder_name:\n # self.model = EncoderDecoderModel.from_pretrained(encoder_decoder_name)\n self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(\n os.path.join(encoder_decoder_name, \"encoder\"), os.path.join(encoder_decoder_name, \"decoder\")\n )\n self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, \"encoder\"))\n self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, \"decoder\"))\n self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, \"encoder\"))\n self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, \"decoder\"))\n else:\n self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(\n encoder_name, decoder_name, config=config\n )\n self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)\n self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)\n self.encoder_config = self.model.config.encoder\n self.decoder_config = self.model.config.decoder\n\n if self.args.wandb_project and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args.wandb_project = None\n\n if encoder_decoder_name:\n self.args.model_name = encoder_decoder_name\n\n # # Checking if we are loading from a saved model or using a pre-trained model\n # if not saved_model_args and encoder_decoder_type == \"marian\":\n # Need to store base pre-trained model name to get the tokenizer when loading a saved model\n self.args.base_marian_model_name = encoder_decoder_name\n\n elif encoder_name and decoder_name:\n self.args.model_name = encoder_name + \"-\" + decoder_name\n else:\n self.args.model_name = \"encoder-decoder\"\n\n if encoder_decoder_type:\n self.args.model_type = encoder_decoder_type\n elif encoder_type:\n self.args.model_type = encoder_type + \"-bert\"\n else:\n self.args.model_type = \"encoder-decoder\"\n\n def train_model(\n self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs,\n ):\n \"\"\"\n Trains the model using 'train_data'\n\n Args:\n train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.\n - `input_text`: The input text sequence.\n - `target_text`: The target text sequence\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.\n\n Returns:\n None\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update_from_dict(args)\n\n # if self.args.silent:\n # show_running_loss = False\n\n if self.args.evaluate_during_training and eval_data is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_data is not specified.\"\n \" Pass eval_data to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \" Set args.overwrite_output_dir = True to overcome.\".format(output_dir)\n )\n\n self._move_model_to_device()\n\n train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, tr_loss = self.train(\n train_dataset,\n output_dir,\n show_running_loss=show_running_loss,\n eval_data=eval_data,\n verbose=verbose,\n **kwargs,\n )\n\n self._save_model(self.args.output_dir, model=self.model)\n\n # model_to_save = self.model.module if hasattr(self.model, \"module\") else self.model\n # model_to_save.save_pretrained(output_dir)\n # self.encoder_tokenizer.save_pretrained(output_dir)\n # self.decoder_tokenizer.save_pretrained(output_dir)\n # torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n if verbose:\n logger.info(\" Training of {} model complete. Saved to {}.\".format(self.args.model_name, output_dir))\n\n def train(\n self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,\n ):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n model = self.model\n args = self.args\n\n tb_writer = SummaryWriter(logdir=args.tensorboard_dir)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset,\n sampler=train_sampler,\n batch_size=args.train_batch_size,\n num_workers=self.args.dataloader_num_workers,\n )\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n\n optimizer_grouped_parameters = []\n custom_parameter_names = set()\n for group in self.args.custom_parameter_groups:\n params = group.pop(\"params\")\n custom_parameter_names.update(params)\n param_group = {**group}\n param_group[\"params\"] = [p for n, p in model.named_parameters() if n in params]\n optimizer_grouped_parameters.append(param_group)\n\n for group in self.args.custom_layer_parameters:\n layer_number = group.pop(\"layer\")\n layer = f\"layer.{layer_number}.\"\n group_d = {**group}\n group_nd = {**group}\n group_nd[\"weight_decay\"] = 0.0\n params_d = []\n params_nd = []\n for n, p in model.named_parameters():\n if n not in custom_parameter_names and layer in n:\n if any(nd in n for nd in no_decay):\n params_nd.append(p)\n else:\n params_d.append(p)\n custom_parameter_names.add(n)\n group_d[\"params\"] = params_d\n group_nd[\"params\"] = params_nd\n\n optimizer_grouped_parameters.append(group_d)\n optimizer_grouped_parameters.append(group_nd)\n\n if not self.args.train_custom_parameters_only:\n optimizer_grouped_parameters.extend(\n [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names and not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names and any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n )\n\n warmup_steps = math.ceil(t_total * args.warmup_ratio)\n args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps\n\n # TODO: Use custom optimizer like with BertSum?\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n if (\n args.model_name\n and os.path.isfile(os.path.join(args.model_name, \"optimizer.pt\"))\n and os.path.isfile(os.path.join(args.model_name, \"scheduler.pt\"))\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name, \"scheduler.pt\")))\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n logger.info(\" Training started\")\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.silent, mininterval=0)\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n steps_trained_in_current_epoch = 0\n epochs_trained = 0\n\n if args.model_name and os.path.exists(args.model_name):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name.split(\"/\")[-1].split(\"-\")\n if len(checkpoint_suffix) > 2:\n checkpoint_suffix = checkpoint_suffix[1]\n else:\n checkpoint_suffix = checkpoint_suffix[-1]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // args.gradient_accumulation_steps\n )\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the current epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n if args.evaluate_during_training:\n training_progress_scores = self._create_training_progress_scores(**kwargs)\n\n if args.wandb_project:\n wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)\n wandb.watch(self.model)\n\n if args.fp16:\n from torch.cuda import amp\n\n scaler = amp.GradScaler()\n\n model.train()\n for current_epoch in train_iterator:\n if epochs_trained > 0:\n epochs_trained -= 1\n continue\n train_iterator.set_description(f\"Epoch {epoch_number + 1} of {args.num_train_epochs}\")\n batch_iterator = tqdm(\n train_dataloader,\n desc=f\"Running Epoch {epoch_number} of {args.num_train_epochs}\",\n disable=args.silent,\n mininterval=0,\n )\n for step, batch in enumerate(batch_iterator):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n # batch = tuple(t.to(device) for t in batch)\n\n inputs = self._get_inputs_dict(batch)\n if args.fp16:\n with amp.autocast():\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n else:\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n current_loss = loss.item()\n\n if show_running_loss:\n batch_iterator.set_description(\n f\"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}\"\n )\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n if args.fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n if args.wandb_project:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n self._save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args.evaluate_during_training and (\n args.evaluate_during_training_steps > 0\n and global_step % args.evaluate_during_training_steps == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results = self.eval_model(\n eval_data,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n **kwargs,\n )\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n if args.save_eval_checkpoints:\n self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False,\n )\n\n if args.wandb_project:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self._save_model(\n args.best_model_dir, optimizer, scheduler, model=model, results=results\n )\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self._save_model(\n args.best_model_dir, optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self._save_model(\n args.best_model_dir, optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n epoch_number += 1\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number))\n\n if args.save_model_every_epoch or args.evaluate_during_training:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args.save_model_every_epoch:\n self._save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args.evaluate_during_training:\n results = self.eval_model(\n eval_data,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n **kwargs,\n )\n\n if args.save_eval_checkpoints:\n self._save_model(output_dir_current, optimizer, scheduler, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False)\n\n if args.wandb_project:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n return global_step, tr_loss / global_step\n\n def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_data. Saves results to output_dir.\n\n Args:\n eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.\n - `input_text`: The input text sequence.\n - `target_text`: The target text sequence.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.\n Returns:\n results: Dictionary containing evaluation results.\n \"\"\" # noqa: ignore flake8\"\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n self._move_model_to_device()\n\n eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)\n os.makedirs(output_dir, exist_ok=True)\n\n result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)\n self.results.update(result)\n\n if self.args.evaluate_generated_text:\n to_predict = eval_data[\"input_text\"].tolist()\n preds = self.predict(to_predict)\n\n result = self.compute_metrics(eval_data[\"target_text\"].tolist(), preds, **kwargs)\n self.results.update(result)\n\n if verbose:\n logger.info(self.results)\n\n return self.results\n\n def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_dataset.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n\n model = self.model\n args = self.args\n eval_output_dir = output_dir\n\n results = {}\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n for batch in tqdm(eval_dataloader, disable=args.silent or silent, desc=\"Running Evaluation\"):\n # batch = tuple(t.to(device) for t in batch)\n\n inputs = self._get_inputs_dict(batch)\n with torch.no_grad():\n outputs = model(**inputs)\n loss = outputs[0]\n eval_loss += loss.mean().item()\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n\n results[\"eval_loss\"] = eval_loss\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n return results\n\n def predict(self, to_predict):\n \"\"\"\n Performs predictions on a list of text.\n\n Args:\n to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.\n\n Returns:\n preds: A python list of the generated sequences.\n \"\"\" # noqa: ignore flake8\"\n\n self._move_model_to_device()\n\n all_outputs = []\n # Batching\n for batch in [\n to_predict[i : i + self.args.eval_batch_size] for i in range(0, len(to_predict), self.args.eval_batch_size)\n ]:\n if self.args.model_type == \"marian\":\n input_ids = self.encoder_tokenizer.prepare_translation_batch(\n batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors=\"pt\",\n )[\"input_ids\"]\n else:\n input_ids = self.encoder_tokenizer.batch_encode_plus(\n batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors=\"pt\",\n )[\"input_ids\"]\n input_ids = input_ids.to(self.device)\n\n if self.args.model_type in [\"bart\", \"marian\"]:\n outputs = self.model.generate(\n input_ids=input_ids,\n num_beams=self.args.num_beams,\n max_length=self.args.max_length,\n length_penalty=self.args.length_penalty,\n early_stopping=self.args.early_stopping,\n repetition_penalty=self.args.repetition_penalty,\n do_sample=self.args.do_sample,\n top_k=self.args.top_k,\n top_p=self.args.top_p,\n num_return_sequences=self.args.num_return_sequences,\n )\n else:\n outputs = self.model.generate(\n input_ids=input_ids,\n decoder_start_token_id=self.model.config.decoder.pad_token_id,\n num_beams=self.args.num_beams,\n max_length=self.args.max_length,\n length_penalty=self.args.length_penalty,\n early_stopping=self.args.early_stopping,\n repetition_penalty=self.args.repetition_penalty,\n do_sample=self.args.do_sample,\n top_k=self.args.top_k,\n top_p=self.args.top_p,\n num_return_sequences=self.args.num_return_sequences,\n )\n\n all_outputs.extend(outputs.cpu().numpy())\n\n if self.args.use_multiprocessed_decoding:\n self.model.to(\"cpu\")\n with Pool(self.args.process_count) as p:\n outputs = list(\n tqdm(\n p.imap(self._decode, all_outputs, chunksize=self.args.multiprocessing_chunksize),\n total=len(all_outputs),\n desc=\"Decoding outputs\",\n disable=self.args.silent,\n )\n )\n self._move_model_to_device()\n else:\n outputs = [\n self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for output_id in all_outputs\n ]\n\n if self.args.num_return_sequences > 1:\n return [\n outputs[i : i + self.args.num_return_sequences]\n for i in range(0, len(outputs), self.args.num_return_sequences)\n ]\n else:\n return outputs\n\n def _decode(self, output_id):\n return self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n\n def compute_metrics(self, labels, preds, **kwargs):\n \"\"\"\n Computes the evaluation metrics for the model predictions.\n\n Args:\n labels: List of target sequences\n preds: List of model generated outputs\n **kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.\n\n Returns:\n result: Dictionary containing evaluation results.\n \"\"\" # noqa: ignore flake8\"\n # assert len(labels) == len(preds)\n\n results = {}\n for metric, func in kwargs.items():\n results[metric] = func(labels, preds)\n\n return results\n\n def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):\n \"\"\"\n Creates a T5Dataset from data.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n \"\"\"\n\n encoder_tokenizer = self.encoder_tokenizer\n decoder_tokenizer = self.decoder_tokenizer\n args = self.args\n\n if not no_cache:\n no_cache = args.no_cache\n\n if not no_cache:\n os.makedirs(self.args.cache_dir, exist_ok=True)\n\n mode = \"dev\" if evaluate else \"train\"\n\n if args.dataset_class:\n CustomDataset = args.dataset_class\n return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)\n else:\n if args.model_type in [\"bart\", \"marian\"]:\n return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)\n else:\n return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode,)\n\n def _create_training_progress_scores(self, **kwargs):\n extra_metrics = {key: [] for key in kwargs}\n training_progress_scores = {\n \"global_step\": [],\n \"eval_loss\": [],\n \"train_loss\": [],\n **extra_metrics,\n }\n\n return training_progress_scores\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):\n if not output_dir:\n output_dir = self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n\n logger.info(f\"Saving model into {output_dir}\")\n\n if model and not self.args.no_save:\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n self._save_model_args(output_dir)\n\n if self.args.model_type in [\"bart\", \"marian\"]:\n os.makedirs(os.path.join(output_dir), exist_ok=True)\n model_to_save.save_pretrained(output_dir)\n self.config.save_pretrained(output_dir)\n if self.args.model_type == \"bart\":\n self.encoder_tokenizer.save_pretrained(output_dir)\n else:\n os.makedirs(os.path.join(output_dir, \"encoder\"), exist_ok=True)\n os.makedirs(os.path.join(output_dir, \"decoder\"), exist_ok=True)\n self.encoder_config.save_pretrained(os.path.join(output_dir, \"encoder\"))\n self.decoder_config.save_pretrained(os.path.join(output_dir, \"decoder\"))\n\n model_to_save = (\n self.model.encoder.module if hasattr(self.model.encoder, \"module\") else self.model.encoder\n )\n model_to_save.save_pretrained(os.path.join(output_dir, \"encoder\"))\n\n model_to_save = (\n self.model.decoder.module if hasattr(self.model.decoder, \"module\") else self.model.decoder\n )\n\n model_to_save.save_pretrained(os.path.join(output_dir, \"decoder\"))\n\n self.encoder_tokenizer.save_pretrained(os.path.join(output_dir, \"encoder\"))\n self.decoder_tokenizer.save_pretrained(os.path.join(output_dir, \"decoder\"))\n\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n if optimizer and scheduler and self.args.save_optimizer_and_scheduler:\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n\n if results:\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def _move_model_to_device(self):\n self.model.to(self.device)\n\n def _get_inputs_dict(self, batch):\n device = self.device\n if self.args.model_type in [\"bart\", \"marian\"]:\n pad_token_id = self.encoder_tokenizer.pad_token_id\n source_ids, source_mask, y = batch[\"source_ids\"], batch[\"source_mask\"], batch[\"target_ids\"]\n y_ids = y[:, :-1].contiguous()\n lm_labels = y[:, 1:].clone()\n lm_labels[y[:, 1:] == pad_token_id] = -100\n\n inputs = {\n \"input_ids\": source_ids.to(device),\n \"attention_mask\": source_mask.to(device),\n \"decoder_input_ids\": y_ids.to(device),\n \"lm_labels\": lm_labels.to(device),\n }\n else:\n lm_labels = batch[1]\n lm_labels_masked = lm_labels.clone()\n lm_labels_masked[lm_labels_masked == self.decoder_tokenizer.pad_token_id] = -100\n\n inputs = {\n \"input_ids\": batch[0].to(device),\n \"decoder_input_ids\": lm_labels.to(device),\n \"labels\": lm_labels_masked.to(device),\n }\n\n return inputs\n\n def _save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n self.args.save(output_dir)\n\n def _load_model_args(self, input_dir):\n args = Seq2SeqArgs()\n args.load(input_dir)\n return args\n\n def get_named_parameters(self):\n return [n for n, p in self.model.named_parameters()]\n" ]
[ [ "numpy.random.seed", "torch.Tensor", "torch.manual_seed", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.device" ], [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "pandas.DataFrame", "torch.cuda.amp.autocast", "torch.cuda.amp.GradScaler", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.device", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Tudor67/Neural-Networks-Assignments
[ "7376e9d3b0059df2f2b21d56787c47d3c1ba6746", "7376e9d3b0059df2f2b21d56787c47d3c1ba6746" ]
[ "2018-2019/project/utils/evaluation.py", "2018-2019/project/utils/preprocessing.py" ]
[ "import config\nimport numpy as np\nimport tensorflow as tf\n\ndef get_tp_fp_fn(a, b):\n a = np.equal(a, 1)\n not_a = np.logical_not(a)\n b = np.equal(b, 1)\n not_b = np.logical_not(b)\n \n tp = np.logical_and(a, b).sum().astype(np.float64)\n fp = np.logical_and(a, not_b).sum().astype(np.float64)\n fn = np.logical_and(not_a, b).sum().astype(np.float64)\n \n return tp, fp, fn\n\ndef jaccard(a, b):\n tp, fp, fn = get_tp_fp_fn(a, b)\n \n jaccard_coef = None\n if tp + fp + fn == 0:\n jaccard_coef = 1.\n else:\n jaccard_coef = tp / (tp + fp + fn)\n \n return jaccard_coef\n\ndef dice(a, b):\n tp, fp, fn = get_tp_fp_fn(a, b)\n \n dice_coef = None\n if tp + fp + fn == 0:\n dice_coef = 1.\n else:\n dice_coef = (2 * tp) / (2 * tp + fp + fn)\n \n return dice_coef\n\ndef jaccard_and_dice(preds, gts, thr):\n jaccard_and_dice_res = np.zeros((len(preds), 2))\n \n for idx, (pred, gt) in enumerate(zip(preds, gts)):\n pred = (pred >= thr)\n \n jaccard_coef = jaccard(pred, gt)\n dice_coef = dice(pred, gt)\n \n jaccard_and_dice_res[idx] = (jaccard_coef, dice_coef)\n \n return jaccard_and_dice_res\n\n# tensorflow implementation (with thr)\ndef tf_get_tp_fp_fn(a_in, b_in):\n a = tf.greater_equal(a_in, config.PRED_THR)\n not_a = tf.logical_not(a)\n b = tf.greater_equal(b_in, config.PRED_THR)\n not_b = tf.logical_not(b)\n \n tp_and = tf.logical_and(a, b)\n tp_count = tf.count_nonzero(tp_and)\n tp = tf.cast(tp_count, tf.float64)\n \n fp_and = tf.logical_and(a, not_b)\n fp_count = tf.count_nonzero(fp_and)\n fp = tf.cast(fp_count, tf.float64)\n \n fn_and = tf.logical_and(not_a, b)\n fn_count = tf.count_nonzero(fn_and)\n fn = tf.cast(fn_count, tf.float64)\n \n return tp, fp, fn\n\ndef tf_jaccard(a, b):\n tp, fp, fn = tf_get_tp_fp_fn(a, b)\n jaccard_coef = tf.cond(tf.equal(tp + fp + fn, 0),\n lambda: tf.constant(1, tf.float64),\n lambda: tp / (tp + fp + fn))\n return jaccard_coef\n\ndef tf_dice(a, b):\n tp, fp, fn = tf_get_tp_fp_fn(a, b)\n dice_coef = tf.cond(tf.equal(tp + fp + fn, 0),\n lambda: tf.constant(1, tf.float64),\n lambda: (2 * tp) / (2 * tp + fp + fn))\n return dice_coef", "import numpy as np\nimport os\nimport skimage\nimport sys\n\ndef resize_images(images, new_h, new_w, ch):\n resized_images = np.zeros([len(images), new_h, new_w, ch])\n for idx, img in enumerate(images):\n resized_images[idx] = skimage.transform.resize(img,\n [new_h, new_w, ch],\n mode='constant',\n anti_aliasing=False)\n return resized_images\n\ndef crop_image(img, patch_h=256, patch_w=256):\n patch_shape = (patch_h, patch_w, 3)\n if img.ndim == 2:\n img = img[:,:,np.newaxis]\n patch_shape = (patch_h, patch_w, 1)\n \n row_pad = (patch_shape[0] - (img.shape[0] % patch_shape[0])) % patch_shape[0]\n col_pad = (patch_shape[1] - (img.shape[1] % patch_shape[1])) % patch_shape[1]\n \n img_pad = np.pad(img, [(0, row_pad), (0, col_pad), (0, 0)], 'constant')\n \n rows_start = range(0, img_pad.shape[0], patch_shape[0])\n cols_start = range(0, img_pad.shape[1], patch_shape[1])\n \n patches = np.zeros([len(rows_start), len(cols_start), *patch_shape],\n dtype=np.uint8)\n \n for i, row in enumerate(rows_start):\n for j, col in enumerate(cols_start):\n patches[i][j] = img_pad[row:row + patch_shape[0],\n col:col + patch_shape[1],\n :]\n if patches.shape[4] == 1:\n patches = patches.squeeze(axis=4)\n \n return patches\n\ndef merge_patches(patches, img_h, img_w):\n img_pad_h = patches.shape[0] * patches.shape[2]\n img_pad_w = patches.shape[1] * patches.shape[3]\n \n # combine patches\n patches = np.moveaxis(patches, 2, 1)\n img_pad = patches.reshape([img_pad_h, img_pad_w, -1])\n \n # remove padding\n img = img_pad[:img_h, :img_w, :].squeeze()\n \n return img\n\ndef crop_images_and_save(images, img_names,\n save_path, img_format,\n patch_h, patch_w):\n \n if not os.path.isdir(save_path):\n os.makedirs(save_path)\n \n for img, img_name in zip(images, img_names):\n img_patches = crop_image(img, patch_h, patch_w)\n \n for i in range(img_patches.shape[0]):\n for j in range(img_patches.shape[1]):\n filename = f'{save_path}/{img_name}_{i}_{j}.{img_format}'\n skimage.io.imsave(filename, img_patches[i][j])\n \ndef crop_images_from_dir_and_save_all(images_path, save_path, patch_h, patch_w,\n img_format, append_h_w=True):\n img_names = os.listdir(images_path)\n for img_name in img_names:\n img = skimage.io.imread(f'{images_path}/{img_name}')\n \n img_name_with_shape = None\n if append_h_w:\n img_name_with_shape = append_img_name_with_h_w(remove_img_formats([img_name]),\n get_img_shapes([img]))[0]\n else:\n img_name_with_shape = remove_img_formats([img_name])[0]\n \n crop_images_and_save([img], [img_name_with_shape],\n save_path=save_path,\n img_format=img_format,\n patch_h=patch_h,\n patch_w=patch_w)\n \ndef load_patches(img_name, patches_path):\n patches_names_all = os.listdir(patches_path)\n patches = []\n max_row = 0\n \n for patch_name in sorted(patches_names_all):\n if patch_name.startswith(img_name):\n patch = skimage.io.imread(f'{patches_path}/{patch_name}')\n patches.append(patch)\n \n # useful for patches.reshape\n patch_shape = patch.shape\n row = 1 + int(patch_name.split('_')[-2])\n max_row = max(max_row, row)\n \n patches = np.array(patches).astype(np.uint8).reshape(max_row, -1, *patch_shape)\n return patches \n\ndef get_img_shapes(images):\n return [img.shape for img in images]\n\ndef remove_img_formats(img_names):\n return ['.'.join(img_name.split('.')[:-1]) for img_name in img_names]\n\ndef remove_grid_indices(img_names):\n return ['_'.join(img_name.split('_')[:-2]) for img_name in img_names]\n\ndef append_img_name_with_h_w(img_names, img_shapes):\n return [f'{img_name}_{img_shape[0]}_{img_shape[1]}'\n for img_name, img_shape in zip(img_names, img_shapes)]\n\ndef get_img_shapes_from_strings(img_names):\n img_shapes = []\n for img_name in img_names:\n h = int(img_name.split('_')[-4])\n w = int(img_name.split('_')[-3])\n img_shapes.append((h, w))\n return img_shapes\n\ndef merge_patches_and_save(img_shapes, img_names, patches_path,\n save_path, img_format):\n \n if not os.path.isdir(save_path):\n os.makedirs(save_path)\n \n for img_shape, img_name in zip(img_shapes, img_names):\n img_h, img_w = img_shape[:2]\n patches = load_patches(img_name, patches_path)\n img_from_patches = merge_patches(patches, img_h=img_h, img_w=img_w)\n \n filename = f'{save_path}/{img_name}.{img_format}'\n skimage.io.imsave(filename, img_from_patches)\n\ndef crop_images_and_save_all(dataset_with_img_names, dataset_path,\n img_format='png', patch_h=256, patch_w=256, \n append_img_h_w=False):\n \n # dataset splits\n train, train_img_names, val, val_img_names, test, test_img_names = dataset_with_img_names\n train_images, train_masks = train\n val_images, val_masks = val\n test_images, test_masks = test\n \n if append_img_h_w:\n train_img_names = append_img_name_with_h_w(train_img_names, get_img_shapes(train_images))\n val_img_names = append_img_name_with_h_w(val_img_names, get_img_shapes(val_images))\n test_img_names = append_img_name_with_h_w(test_img_names, get_img_shapes(test_images))\n \n d_splits = [(train_images, train_img_names, 'train_img'),\n (train_masks, train_img_names, 'train_mask'),\n (val_images, val_img_names, 'val_img'),\n (val_masks, val_img_names, 'val_mask'),\n (test_images, test_img_names, 'test_img'),\n (test_masks, test_img_names, 'test_mask')]\n \n for images, img_names, split_name in d_splits:\n save_path=f'{dataset_path}/{split_name.split(\"_\")[0]}/{split_name}_patches'\n \n crop_images_and_save(images, img_names,\n save_path=save_path,\n img_format=img_format,\n patch_h=patch_h,\n patch_w=patch_w)\n \ndef merge_patches_and_save_all(dataset_with_img_names,\n dataset_path,\n img_format='png'):\n \n # dataset splits\n train, train_img_names, val, val_img_names, test, test_img_names = dataset_with_img_names\n train_images, train_masks = train\n val_images, val_masks = val\n test_images, test_masks = test\n \n d_splits = [(train_images, train_img_names, 'train_img'),\n (train_masks, train_img_names, 'train_mask'),\n (val_images, val_img_names, 'val_img'),\n (val_masks, val_img_names, 'val_mask'),\n (test_images, test_img_names, 'test_img'),\n (test_masks, test_img_names, 'test_mask')]\n \n for images, img_names, split_name in d_splits:\n patches_path = f'{dataset_path}/{split_name.split(\"_\")[0]}/{split_name}_patches'\n save_path = f'{dataset_path}/{split_name.split(\"_\")[0]}/{split_name}_from_patches'\n \n img_shapes = get_img_shapes(images)\n \n merge_patches_and_save(img_shapes, img_names,\n patches_path=patches_path,\n save_path=save_path,\n img_format=img_format)\n \ndef merge_patches_directly_and_save_all(results_path,\n split_types=['pred'],\n img_format='png'):\n \n for split_name in ['train', 'val', 'test']:\n for split_type in split_types:\n patches_path = f'{results_path}/{split_name}/{split_name}_{split_type}_patches'\n save_path = f'{results_path}/{split_name}/{split_name}_{split_type}_from_patches'\n\n img_names_full = os.listdir(patches_path)\n img_names = remove_grid_indices(img_names_full) \n img_shapes = get_img_shapes_from_strings(img_names_full)\n \n # remove the same img_names (duplicates)\n unique_img_names = []\n unique_img_shapes = []\n for img_name, img_shape in zip(img_names, img_shapes):\n if img_name not in unique_img_names:\n unique_img_names.append(img_name)\n unique_img_shapes.append(img_shape)\n\n merge_patches_and_save(unique_img_shapes,\n unique_img_names,\n patches_path=patches_path,\n save_path=save_path,\n img_format=img_format)\n \ndef merge_patches_from_dir_and_save_all(patches_path,\n save_path,\n img_format='png'):\n \n img_names_full = os.listdir(patches_path)\n img_names = remove_grid_indices(img_names_full)\n img_shapes = get_img_shapes_from_strings(img_names_full)\n \n # remove the same img_names (duplicates)\n unique_img_names = []\n unique_img_shapes = []\n for img_name, img_shape in zip(img_names, img_shapes):\n if img_name not in unique_img_names:\n unique_img_names.append(img_name)\n unique_img_shapes.append(img_shape)\n\n merge_patches_and_save(unique_img_shapes,\n unique_img_names,\n patches_path=patches_path,\n save_path=save_path,\n img_format=img_format)" ]
[ [ "numpy.logical_not", "tensorflow.constant", "tensorflow.count_nonzero", "numpy.logical_and", "tensorflow.cast", "tensorflow.equal", "numpy.equal", "tensorflow.greater_equal", "tensorflow.logical_not", "tensorflow.logical_and" ], [ "numpy.array", "numpy.pad", "numpy.moveaxis" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
belkhir-nacim/generative_model_toolbox
[ "573e69979a77030004afe2df216893f556225454" ]
[ "generative_models_toolbox/vqvae2/sample.py" ]
[ "import argparse\nimport os\n\nimport torch\nimport torchvision.utils\nfrom tqdm import tqdm\n\nfrom .vqvae import VQVAE\nfrom .pixelsnail import PixelSNAIL\n\n\[email protected]_grad()\ndef sample_model(model, device, batch, size, temperature, condition=None):\n row = torch.zeros(batch, *size, dtype=torch.int64).to(device)\n cache = {}\n\n for i in tqdm(range(size[0])):\n for j in range(size[1]):\n out, cache = model(row[:, : i + 1, :], condition=condition, cache=cache)\n prob = torch.softmax(out[:, :, i, j] / temperature, 1)\n sample = torch.multinomial(prob, 1).squeeze(-1)\n row[:, i, j] = sample\n return row\n\n\ndef load_model(model: str, checkpoint: str, device):\n ckpt = torch.load(checkpoint)\n\n if 'args' in ckpt:\n args = ckpt['args']\n\n if model == 'vqvae':\n model = VQVAE()\n\n elif model == 'pixelsnail_top':\n model = PixelSNAIL(\n [32, 32],\n 512,\n args.channel,\n 5,\n 4,\n args.n_res_block,\n args.n_res_channel,\n dropout=args.dropout,\n n_out_res_block=args.n_out_res_block,\n )\n\n elif model == 'pixelsnail_bottom':\n model = PixelSNAIL(\n [64, 64],\n 512,\n args.channel,\n 5,\n 4,\n args.n_res_block,\n args.n_res_channel,\n attention=False,\n dropout=args.dropout,\n n_cond_res_block=args.n_cond_res_block,\n cond_res_channel=args.n_res_channel,\n )\n\n if 'model' in ckpt:\n ckpt = ckpt['model']\n\n model.load_state_dict(ckpt)\n model = model.to(device)\n model.eval()\n\n return model\n\n\ndef main():\n device = 'cuda'\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch', type=int, default=8)\n parser.add_argument('--vqvae', type=str)\n parser.add_argument('--top', type=str)\n parser.add_argument('--bottom', type=str)\n parser.add_argument('--temp', type=float, default=1.0)\n parser.add_argument('filename', type=str)\n\n args = parser.parse_args()\n\n model_vqvae = load_model('vqvae', args.vqvae, device)\n model_top = load_model('pixelsnail_top', args.top, device)\n model_bottom = load_model('pixelsnail_bottom', args.bottom, device)\n\n top_sample = sample_model(model_top, device, args.batch, [32, 32], args.temp)\n bottom_sample = sample_model(\n model_bottom, device, args.batch, [64, 64], args.temp, condition=top_sample\n )\n\n decoded_sample = model_vqvae.decode_code(top_sample, bottom_sample)\n decoded_sample = decoded_sample.clamp(-1, 1)\n\n torchvision.utils.save_image(decoded_sample, args.filename, normalize=True, range=(-1, 1))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.softmax", "torch.zeros", "torch.load", "torch.multinomial", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tatsushi-ikeda/pyheom
[ "d069fcf791959942b7a0357cda349d9976e06313" ]
[ "pyheom/pyheom.py" ]
[ "# \n# LibHEOM: Copyright (c) Tatsushi Ikeda\n# This library is distributed under BSD 3-Clause License.\n# See LINCENSE.txt for licence.\n# ------------------------------------------------------------------------\n\nimport enum\nimport sys\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse\nimport importlib\n\npylibheom = importlib.import_module(\"pylibheom\")\nfrom pyheom.noise_decomposition import *\n\nversion = getattr(pylibheom, 'version')()\n__version__ = version\n\nunit = enum.Enum('unit',\n '''dimensionless\n femtosecond\n picosecond\n wavenumber\n electronvolt''')\n\nhbar__J_s = 1.05457180013e-34\nUNIT_ENERGY_VALUE__J = {\n unit.wavenumber: 1.98644582441459e-23, # (299792458*100*6.62607004081e-34)\n unit.electronvolt: 1.602176620898e-19,\n};\nUNIT_TIME_VALUE__S = {\n unit.femtosecond: 1.0e-15,\n unit.picosecond: 1.0e-12,\n}\n\nunits = {'energy':unit.dimensionless,\n 'time': unit.dimensionless}\n\ndef calc_unit():\n if (units['energy'] == unit.dimensionless or units['time'] == unit.dimensionless):\n if (units['energy'] == unit.dimensionless and units['time'] == unit.dimensionless):\n result = 1.0\n else:\n print('[Error] Unit mismatch error: Both unit_energy and unit_time should be dimensionless.', file=sys.stderr)\n sys.exit(1)\n else:\n result = (UNIT_ENERGY_VALUE__J[units['energy']]\n *UNIT_TIME_VALUE__S[units['time']]\n /hbar__J_s)\n return result\n\n\ndef get_coo_matrix(matrix):\n impl_class_name = \"coo_matrix\"\n if matrix.dtype == np.complex64:\n ipml_class_name += \"_c\"\n elif matrix.dtype == np.complex128:\n impl_class_name += \"_z\"\n else:\n print('[Error] Unsupported matrix type: {}.'.format(matrix.dtype),\n file=sys.stderr)\n sys.exit(1)\n coo = sp.sparse.coo_matrix(matrix)\n impl_class = getattr(pylibheom, impl_class_name)\n return impl_class(\n coo.shape[0],\n coo.shape[1],\n coo.nnz,\n coo.row,\n coo.col,\n coo.data)\n \nclass heom():\n def __init__(self,\n H,\n noises,\n max_tier,\n matrix_type='sparse',\n hrchy_connection='loop',\n hrchy_filter=None,\n gpu_device=None,\n callback=lambda lidx, est: None,\n callback_interval=1024,\n unrolling=False):\n self.n_state = H.shape[0]\n \n impl_class_name = 'heom_z'\n\n if matrix_type == 'dense':\n impl_class_name += 'd'\n elif matrix_type == 'sparse':\n impl_class_name += 's'\n else:\n print('[Error] Unknown internal matrix type: {}.'.format(\n matrix_type))\n sys.exit(1)\n \n impl_class_name += 'l'\n\n if hrchy_connection == 'loop':\n impl_class_name += 'l'\n elif hrchy_connection == 'hierarchical-Liouville':\n impl_class_name += 'h'\n else:\n print('[Error] Unknown hrchy_connection: {}.'.format(\n hrchy_connection))\n sys.exit(1)\n\n if unrolling and self.n_state in [2, 3]:\n impl_class_name += '_{}'.format(self.n_state)\n \n if (not gpu_device is None):\n if getattr(pylibheom, 'support_gpu_parallelization'):\n impl_class_name += '_gpu'\n else:\n print('[Error] gpu parallelization is not supported.')\n print(' specified gpu device: {}.'.format(gpu_device))\n sys.exit(1)\n\n self.impl = getattr(pylibheom, impl_class_name)()\n \n if (not gpu_device is None):\n self.impl.set_device_number(gpu_device)\n \n self.impl.set_hamiltonian(get_coo_matrix(H.astype(np.complex128)))\n\n n_noise = len(noises)\n self.impl.alloc_noises(n_noise)\n \n self.noises = []\n \n for u in range(n_noise):\n gamma = noises[u][\"C\"][\"gamma\"].astype(np.complex128)\n phi_0 = noises[u][\"C\"][\"phi_0\"].astype(np.complex128)\n sigma = noises[u][\"C\"][\"sigma\"].astype(np.complex128)\n s = noises[u][\"C\"][\"s\"].astype(np.complex128)\n a = noises[u][\"C\"][\"a\"].astype(np.complex128)\n S_delta = complex(noises[u][\"C\"][\"S_delta\"])\n self.noises.append(type(\"noise\", (object,),\n dict(gamma=gamma,\n phi_0=phi_0,\n sigma_s=s.T@sigma,\n sigma_a=a.T@sigma,\n S_delta=S_delta)))\n self.impl.set_noise(u,\n get_coo_matrix(noises[u][\"V\"].astype(np.complex128)),\n get_coo_matrix(gamma),\n phi_0,\n sigma,\n get_coo_matrix(s),\n S_delta,\n get_coo_matrix(a))\n\n if hrchy_filter:\n self.hrchy_filter = lambda index, depth, lk: hrchy_filter(index, depth, lk, self.noises)\n else:\n self.hrchy_filter = lambda index, depth, lk, noises: True\n\n self.impl.linearize()\n self.n_hrchy \\\n = self.impl.alloc_hrchy(max_tier,\n callback,\n callback_interval,\n self.hrchy_filter,\n False if hrchy_filter is None else True)\n self.rho_h = np.zeros((self.n_state, self.n_state, self.n_hrchy),\n dtype=np.complex128, order='F')\n \n self.impl.init_aux_vars()\n \n def construct_commutator(self,\n x, coef_l, coef_r,\n callback=lambda lidx, est: None,\n callback_interval=1024):\n x_coo = sp.sparse.coo_matrix(x)\n self.impl.construct_commutator(x_coo.shape[0],\n x_coo.shape[1],\n x_coo.nnz,\n x_coo.row,\n x_coo.col,\n x_coo.data.astype(np.complex128),\n coef_l,\n coef_r,\n callback,\n callback_interval)\n\n def apply_commutator(self):\n self.impl.apply_commutator(self.rho_h.ravel(order='F'))\n\n def set_rho(self, rho):\n self.rho_h[:,:,0] = rho[:,:]\n\n def get_rho(self):\n return np.copy(self.rho_h[:,:,0])\n\n def set_rho_h(self, rho_h):\n self.rho_h[:,:,:] = rho_h[:,:,:]\n\n def get_rho_h(self):\n return np.copy(self.rho_h[:,:,:])\n\n def calc_diff(self, rho_h):\n drho_h_dt = np.zeros_like(rho_h)\n self.impl.calc_diff(drho_h_dt.ravel(order='F'),\n rho_h.ravel(order='F'),\n 1, 0)\n return drho_h_dt\n\n def get_diff_func(self):\n return lambda t, rho_h: self.calc_diff(rho_h)\n\n def solve(self, dt__unit, count,\n callback=lambda t, rho: None,\n callback_interval=1):\n self.impl.solve(self.rho_h.ravel(order='F'),\n dt__unit, dt__unit*calc_unit(),\n callback_interval, count//callback_interval,\n lambda t: callback(t, self.rho_h[:,:,0]))\n\n\nclass redfield():\n def __init__(self,\n H,\n noises,\n matrix_type='sparse',\n operator_space='Liouville',\n gpu_device=None,\n callback=lambda lidx: None,\n callback_interval=1024,\n unrolling=False,\n secular=False,\n H_c=None):\n self.n_state = H.shape[0]\n \n impl_class_name = 'redfield_z'\n\n if matrix_type == 'dense':\n impl_class_name += 'd'\n elif matrix_type == 'sparse':\n impl_class_name += 's'\n else:\n print('[Error] Unknown internal matrix type: {}.'.format(\n matrix_type))\n sys.exit(1)\n\n if operator_space == 'Hilbert':\n impl_class_name += 'h'\n elif operator_space == 'Liouville':\n impl_class_name += 'l'\n else:\n print('[Error] Unknown internal operator space: {}.'.format(\n operator_space))\n sys.exit(1)\n \n if unrolling and self.n_state in [2, 3]:\n impl_class_name += '_{}'.format(self.n_state)\n \n if (not gpu_device is None):\n if support_gpu_parallelization:\n impl_class_name += '_gpu'\n else:\n print('[Error] gpu parallelization is not supported.')\n print(' specified gpu device: {}.'.format(gpu_device))\n sys.exit(1)\n \n self.impl = getattr(pylibheom, impl_class_name)()\n \n if (not gpu_device is None):\n self.impl.set_device_number(gpu_device)\n \n E, self.Z = np.linalg.eig(H)\n self.impl.set_hamiltonian(get_coo_matrix(np.diag(E).astype(np.complex128)))\n if H_c is None:\n H_c = np.zeros_like(H)\n \n self.impl.set_redfield_options(get_coo_matrix(self.Z.T.conj()@H_c@(self.Z).astype(np.complex128)),\n secular)\n\n n_noise = len(noises)\n self.impl.alloc_noises(n_noise)\n for u in range(n_noise):\n V = get_coo_matrix((self.Z.T.conj())@noises[u][\"V\"]@(self.Z).astype(np.complex128))\n if \"func\" in noises[u][\"C\"]:\n self.impl.set_noise_func(u, V, noises[u][\"C\"][\"func\"])\n else: \n gamma = noises[u][\"C\"][\"gamma\"]\n phi_0 = noises[u][\"C\"][\"phi_0\"]\n sigma = noises[u][\"C\"][\"sigma\"]\n s = noises[u][\"C\"][\"s\"]\n a = noises[u][\"C\"][\"a\"]\n S_delta = noises[u][\"C\"][\"S_delta\"]\n self.impl.set_noise(u,\n V,\n get_coo_matrix(gamma.astype(np.complex128)),\n phi_0.astype(np.complex128),\n sigma.astype(np.complex128),\n get_coo_matrix(s.astype(np.complex128)),\n complex(S_delta),\n get_coo_matrix(a.astype(np.complex128)))\n \n \n self.rho = np.zeros((self.n_state, self.n_state),\n dtype=np.complex128,\n order='F')\n \n self.impl.init_aux_vars()\n \n def construct_commutator(self,\n x, coef_l, coef_r,\n callback=lambda lidx: None,\n callback_interval=1024):\n x_coo = sp.sparse.coo_matrix((self.Z.T.conj())@x@(self.Z))\n self.impl.construct_commutator(x_coo.shape[0],\n x_coo.shape[1],\n x_coo.nnz,\n x_coo.row,\n x_coo.col,\n x_coo.data.astype(np.complex128),\n coef_l,\n coef_r,\n callback,\n callback_interval)\n\n def apply_commutator(self):\n self.impl.apply_commutator(self.rho.ravel(order='F'))\n \n def set_rho(self, rho):\n self.rho[:,:] = (self.Z.T.conj())@rho[:,:]@(self.Z)\n\n def get_rho(self):\n return np.copy((self.Z)@self.rho[:,:]@(self.Z.T.conj()))\n\n def calc_diff(self, rho):\n drho_dt = np.zeros_like(rho)\n self.impl.calc_diff(drho_dt.ravel(order='F'),\n ((self.Z.T.conj())@rho.reshape((self.n_state, self.n_state), order='F')@(self.Z)).ravel(order='F'),\n 1, 0)\n return ((self.Z)@drho_dt.reshape((self.n_state, self.n_state), order='F')@(self.Z.T.conj())).ravel(order='F')\n \n def get_diff_func(self):\n return lambda t, rho: self.calc_diff(rho)\n\n def solve(self, dt__unit, count,\n callback=lambda t, rho: None,\n callback_interval=1):\n self.impl.solve(self.rho.ravel(order='F'),\n dt__unit, dt__unit*calc_unit(),\n callback_interval, count//callback_interval,\n lambda t: callback(t, (self.Z)@self.rho[:,:]@(self.Z.T.conj())))\n" ]
[ [ "numpy.diag", "scipy.sparse.coo_matrix", "numpy.linalg.eig", "numpy.copy", "numpy.zeros_like", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
marcusinthesky/Word2Risk
[ "0212718369f04607a1b06c009df9e6cee29fe103" ]
[ "scraper/News/spiders/biznews.py" ]
[ "import scrapy\nimport pandas as pd\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass biznewsSpider(scrapy.Spider):\n name = \"biznews\"\n \n def __init__(self, *a, **kw):\n super(biznewsSpider, self).__init__(*a, **kw)\n path = os.path.join(os.path.expanduser(\"~\"),\"Documents\",\"NMRQL\",\"Scraper\",\"News\",\"companies.csv\")\n self.companies = pd.read_csv(path).date.tolist()\n self.next_tag = 'html body#gsr.srp.tbo.vasq div#main div#cnt.big div.mw div#rcnt div.col div#center_col div div#foot span#xjs div#navcnt table#nav tbody tr td.b.navend a#pnnext.pn span::text'\n self.site = \"www.biznews.com\"\n\n def start_requests(self):\n for company in self.companies:\n self.pages = 1\n \n while True: \n l = f'https://www.bing.com/search?q=site%3a+{self.site}+\"{company.replace(\" \", \"+\")}\"&rf=1&qpvt=site%3a+{self.site}+\"+{company.replace(\" \", \"+\")}+\"&lf=&first={self.pages}0'\n r = requests.get(l)\n soup = BeautifulSoup(r.text, 'html.parser')\n pages_list = [int(i.text) for i in soup.find_all('a', attrs='sb_bp') if str.isnumeric(i.text)]\n \n if self.pages in pages_list:\n self.pages += 1\n yield scrapy.Request(l, callback=self.get_links_parse, meta={'company':company})\n else:\n break\n \n def get_links_parse(self, response):\n company = response.meta['company']\n for url in response.css(f'a[href^=\"https://{self.site}\"]::attr(href)').extract(): \n yield scrapy.Request(url, callback=self.yield_text_parse, meta={'company':company, 'url': url})\n\n def yield_text_parse(self, response):\n company = response.meta['company']\n url = response.meta['url']\n #title = response.css('div.article_header h2::text').extract_first()\n date = response.css('meta[property$=\"time\"]::attr(content)').extract_first()\n text = ' '.join(response.css('div.entry-content p::text').extract())\n \n yield {\n 'source': url,\n 'company': company,\n #'title': title,\n 'date':date,\n 'text': text\n }" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
esgomezm/deepcell-tf
[ "6693c9ed7e76793561e6c2281437acaf3e4fa441", "6693c9ed7e76793561e6c2281437acaf3e4fa441", "6693c9ed7e76793561e6c2281437acaf3e4fa441" ]
[ "deepcell/layers/location_test.py", "deepcell/model_zoo/fpn.py", "deepcell/own_training.py" ]
[ "# Copyright 2016-2019 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the location layers\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.platform import test\n\nfrom deepcell.utils import testing_utils\nfrom deepcell import layers\n\n\n@keras_parameterized.run_all_keras_modes\nclass LocationTest(keras_parameterized.TestCase):\n\n def test_location_2d(self):\n testing_utils.layer_test(\n layers.Location2D,\n kwargs={'in_shape': (5, 6, 4),\n 'data_format': 'channels_last'},\n custom_objects={'Location2D': layers.Location2D},\n input_shape=(3, 5, 6, 4))\n testing_utils.layer_test(\n layers.Location2D,\n kwargs={'in_shape': (4, 5, 6),\n 'data_format': 'channels_first'},\n custom_objects={'Location2D': layers.Location2D},\n input_shape=(3, 4, 5, 6))\n\n def test_location_3d(self):\n testing_utils.layer_test(\n layers.Location3D,\n kwargs={'in_shape': (11, 12, 10, 4),\n 'data_format': 'channels_last'},\n custom_objects={'Location3D': layers.Location3D},\n input_shape=(3, 11, 12, 10, 4))\n testing_utils.layer_test(\n layers.Location3D,\n kwargs={'in_shape': (4, 11, 12, 10),\n 'data_format': 'channels_first'},\n custom_objects={'Location3D': layers.Location3D},\n input_shape=(3, 4, 11, 12, 10))\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016-2019 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Feature pyramid network utility functions\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport re\n\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.layers import Conv2D, Conv3D, DepthwiseConv2D\nfrom tensorflow.python.keras.layers import Softmax\nfrom tensorflow.python.keras.layers import Input, Add\nfrom tensorflow.python.keras.layers import Activation\nfrom tensorflow.python.keras.layers import UpSampling2D, UpSampling3D\nfrom tensorflow.python.keras.layers import BatchNormalization\n\nfrom deepcell.layers import UpsampleLike\nfrom deepcell.layers import TensorProduct, ImageNormalization2D\nfrom deepcell.utils.backbone_utils import get_backbone\nfrom deepcell.utils.misc_utils import get_sorted_keys\n\n\ndef create_pyramid_level(backbone_input,\n upsamplelike_input=None,\n addition_input=None,\n upsample_type='upsamplelike',\n level=5,\n ndim=2,\n lite=False,\n interpolation='bilinear',\n feature_size=256):\n \"\"\"Create a pyramid layer from a particular backbone input layer.\n\n Args:\n backbone_input (layer): Backbone layer to use to create they pyramid\n layer\n upsamplelike_input (tensor): Optional input to use\n as a template for shape to upsample to\n addition_input (layer): Optional layer to add to\n pyramid layer after convolution and upsampling.\n upsample_type (str, optional): Choice of upsampling methods\n from ['upsamplelike','upsampling2d','upsampling3d'].\n Defaults to 'upsamplelike'.\n level (int): Level to use in layer names, defaults to 5.\n feature_size (int):Number of filters for\n convolutional layer, defaults to 256.\n ndim (int): The spatial dimensions of the input data. Default is 2,\n but it also works with 3\n lite (bool): Whether to use depthwise conv instead of regular conv for\n feature pyramid construction\n interpolation (str): Choice of interpolation mode for upsampling\n layers from ['bilinear', 'nearest']. Defaults to bilinear.\n\n Returns:\n tuple: Pyramid layer after processing, upsampled pyramid layer\n\n Raises:\n ValueError: ndim is not 2 or 3\n ValueError: upsample_type not ['upsamplelike','upsampling2d',\n 'upsampling3d']\n \"\"\"\n # Check input to ndims\n acceptable_ndims = {2, 3}\n if ndim not in acceptable_ndims:\n raise ValueError('Only 2 and 3 dimensional networks are supported')\n\n # Check if inputs to ndim and lite are compatible\n if ndim == 3 and lite:\n raise ValueError('lite == True is not compatible with 3 dimensional '\n 'networks')\n\n # Check input to interpolation\n acceptable_interpolation = {'bilinear', 'nearest'}\n if interpolation not in acceptable_interpolation:\n raise ValueError('Interpolation mode not supported. Choose from '\n '[\"bilinear\", \"nearest\"]')\n\n # Check input to upsample_type\n acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}\n if upsample_type not in acceptable_upsample:\n raise ValueError(\n 'Upsample method not supported. Choose from [\"upsamplelike\",'\n '\"upsampling2d\", \"upsampling3d\"]')\n\n reduced_name = 'C{}_reduced'.format(level)\n upsample_name = 'P{}_upsampled'.format(level)\n addition_name = 'P{}_merged'.format(level)\n final_name = 'P{}'.format(level)\n\n # Apply 1x1 conv to backbone layer\n if ndim == 2:\n pyramid = Conv2D(feature_size, (1, 1), strides=(1, 1),\n padding='same', name=reduced_name)(backbone_input)\n else:\n pyramid = Conv3D(feature_size, (1, 1, 1), strides=(1, 1, 1),\n padding='same', name=reduced_name)(backbone_input)\n\n # Add and then 3x3 conv\n if addition_input is not None:\n pyramid = Add(name=addition_name)([pyramid, addition_input])\n\n # Upsample pyramid input\n if upsamplelike_input is not None:\n if upsample_type == 'upsamplelike':\n pyramid_upsample = UpsampleLike(name=upsample_name)(\n [pyramid, upsamplelike_input])\n else:\n upsampling = UpSampling2D if ndim == 2 else UpSampling3D\n size = (2, 2) if ndim == 2 else (1, 2, 2)\n upsampling_kwargs = {\n 'size': size,\n 'name': upsample_name,\n 'interpolation': interpolation\n }\n if ndim > 2:\n del upsampling_kwargs['interpolation']\n pyramid_upsample = upsampling(**upsampling_kwargs)(pyramid)\n else:\n pyramid_upsample = None\n\n if ndim == 2:\n if lite:\n pyramid_final = DepthwiseConv2D((3, 3), strides=(1, 1),\n padding='same',\n name=final_name)(pyramid)\n else:\n pyramid_final = Conv2D(feature_size, (3, 3), strides=(1, 1),\n padding='same', name=final_name)(pyramid)\n else:\n pyramid_final = Conv3D(feature_size, (1, 3, 3), strides=(1, 1, 1),\n padding='same', name=final_name)(pyramid)\n\n return pyramid_final, pyramid_upsample\n\n\ndef __create_pyramid_features(backbone_dict,\n upsample_type='upsamplelike',\n ndim=2,\n feature_size=256,\n include_final_layers=True,\n lite=False,\n interpolation='bilinear'):\n \"\"\"Creates the FPN layers on top of the backbone features.\n\n Args:\n backbone_dict (dictionary): A dictionary of the backbone layers, with\n the names as keys, e.g. {'C0': C0, 'C1': C1, 'C2': C2, ...}\n upsample_type (str, optional): Choice of upsampling methods\n from ['upsamplelike','upsamling2d','upsampling3d'].\n Defaults to 'upsamplelike'.\n feature_size (int): Defaults to 256. The feature size to use\n for the resulting feature levels.\n include_final_layers (bool): Add two coarser pyramid levels\n ndim (int): The spatial dimensions of the input data.\n Default is 2, but it also works with 3\n lite (bool): Whether to use depthwise conv instead of regular conv for\n feature pyramid construction\n interpolation (str): Choice of interpolation mode for upsampling\n layers from ['bilinear', 'nearest']. Defaults to bilinear.\n\n Returns:\n dict: The feature pyramid names and levels,\n e.g. {'P3': P3, 'P4': P4, ...}\n Each backbone layer gets a pyramid level, and two additional levels\n are added, e.g. [C3, C4, C5] --> [P3, P4, P5, P6, P7]\n\n Raises:\n ValueError: ndim is not 2 or 3\n ValueError: upsample_type not ['upsamplelike','upsampling2d','upsampling3d']\n \"\"\"\n\n acceptable_ndims = [2, 3]\n if ndim not in acceptable_ndims:\n raise ValueError('Only 2 and 3 dimensional networks are supported')\n\n acceptable_interpolation = {'bilinear', 'nearest'}\n if interpolation not in acceptable_interpolation:\n raise ValueError('Interpolation mode not supported. Choose from '\n '[\"bilinear\", \"nearest\"]')\n\n acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}\n if upsample_type not in acceptable_upsample:\n raise ValueError(\n 'Upsample method not supported. Choose from [\"upsamplelike\",'\n '\"upsampling2d\", \"upsampling3d\"]')\n\n # Get names of the backbone levels and place in ascending order\n backbone_names = get_sorted_keys(backbone_dict)\n backbone_features = [backbone_dict[name] for name in backbone_names]\n\n pyramid_names = []\n pyramid_finals = []\n pyramid_upsamples = []\n\n # Reverse lists\n backbone_names.reverse()\n backbone_features.reverse()\n\n for i in range(len(backbone_names)):\n\n N = backbone_names[i]\n level = int(re.findall(r'\\d+', N)[0])\n p_name = 'P{}'.format(level)\n pyramid_names.append(p_name)\n\n backbone_input = backbone_features[i]\n\n # Don't add for the bottom of the pyramid\n if i == 0:\n if len(backbone_features) > 1:\n upsamplelike_input = backbone_features[i + 1]\n else:\n upsamplelike_input = None\n addition_input = None\n\n # Don't upsample for the top of the pyramid\n elif i == len(backbone_names) - 1:\n upsamplelike_input = None\n addition_input = pyramid_upsamples[-1]\n\n # Otherwise, add and upsample\n else:\n upsamplelike_input = backbone_features[i + 1]\n addition_input = pyramid_upsamples[-1]\n\n pf, pu = create_pyramid_level(backbone_input,\n upsamplelike_input=upsamplelike_input,\n addition_input=addition_input,\n upsample_type=upsample_type,\n level=level,\n ndim=ndim,\n lite=lite,\n interpolation=interpolation)\n pyramid_finals.append(pf)\n pyramid_upsamples.append(pu)\n\n # Add the final two pyramid layers\n if include_final_layers:\n # \"Second to last pyramid layer is obtained via a\n # 3x3 stride-2 conv on the coarsest backbone\"\n N = backbone_names[0]\n F = backbone_features[0]\n level = int(re.findall(r'\\d+', N)[0]) + 1\n P_minus_2_name = 'P{}'.format(level)\n\n if ndim == 2:\n P_minus_2 = Conv2D(feature_size, kernel_size=(3, 3),\n strides=(2, 2), padding='same',\n name=P_minus_2_name)(F)\n else:\n P_minus_2 = Conv3D(feature_size, kernel_size=(1, 3, 3),\n strides=(1, 2, 2), padding='same',\n name=P_minus_2_name)(F)\n\n pyramid_names.insert(0, P_minus_2_name)\n pyramid_finals.insert(0, P_minus_2)\n\n # \"Last pyramid layer is computed by applying ReLU\n # followed by a 3x3 stride-2 conv on second to last layer\"\n level = int(re.findall(r'\\d+', N)[0]) + 2\n P_minus_1_name = 'P{}'.format(level)\n P_minus_1 = Activation('relu', name=N + '_relu')(P_minus_2)\n\n if ndim == 2:\n P_minus_1 = Conv2D(feature_size, kernel_size=(3, 3),\n strides=(2, 2), padding='same',\n name=P_minus_1_name)(P_minus_1)\n else:\n P_minus_1 = Conv3D(feature_size, kernel_size=(1, 3, 3),\n strides=(1, 2, 2), padding='same',\n name=P_minus_1_name)(P_minus_1)\n\n pyramid_names.insert(0, P_minus_1_name)\n pyramid_finals.insert(0, P_minus_1)\n\n pyramid_names.reverse()\n pyramid_finals.reverse()\n\n # Reverse lists\n backbone_names.reverse()\n backbone_features.reverse()\n\n pyramid_dict = {}\n for name, feature in zip(pyramid_names, pyramid_finals):\n pyramid_dict[name] = feature\n\n return pyramid_dict\n\n\ndef semantic_upsample(x, n_upsample, n_filters=64, ndim=2, target=None):\n \"\"\"\n Performs iterative rounds of 2x upsampling and\n convolutions with a 3x3 filter to remove aliasing effects\n\n Args:\n x (tensor): The input tensor to be upsampled\n n_upsample (int): The number of 2x upsamplings\n n_filters (int): Defaults to 256. The number of filters for\n the 3x3 convolution\n target (tensor): An optional tensor with the target shape.\n If included, then the final upsampling layer will reshape\n to the target tensor's size\n ndim (int): The spatial dimensions of the input data.\n Default is 2, but it also works with 3\n\n Returns:\n tensor: The upsampled tensor\n\n Raises:\n ValueError: ndim is not in {2, 3}.\n \"\"\"\n acceptable_ndims = [2, 3]\n if ndim not in acceptable_ndims:\n raise ValueError('Only 2 and 3 dimensional networks are supported')\n\n conv = Conv2D if ndim == 2 else Conv3D\n upsampling = UpSampling2D if ndim == 2 else UpSampling3D\n\n for i in range(n_upsample):\n x = conv(n_filters, 3, strides=1,\n padding='same', data_format='channels_last')(x)\n\n if i == n_upsample - 1 and target is not None:\n x = UpsampleLike()([x, target])\n else:\n x = upsampling(size=2)(x)\n\n if n_upsample == 0:\n x = conv(n_filters, 3, strides=1,\n padding='same', data_format='channels_last')(x)\n\n if target is not None:\n x = UpsampleLike()([x, target])\n\n return x\n\n\ndef semantic_prediction(semantic_names,\n semantic_features,\n target_level=0,\n input_target=None,\n n_filters=64,\n n_dense=64,\n ndim=2,\n n_classes=3,\n semantic_id=0):\n \"\"\"Creates the prediction head from a list of semantic features\n\n Args:\n semantic_names (list): A list of the names of the semantic feature layers\n semantic_features (list): A list of semantic features\n NOTE: The semantic_names and semantic features should be in decreasing order\n e.g. [Q6, Q5, Q4, ...]\n target_level (int): (Optional) The level we need to reach.\n Performs 2x upsampling until we're at the target level.\n input_target (tensor): Optional tensor with the input image.\n n_filters (int): The number of filters for the 3x3 convolution.\n n_dense (int): The number of filters for dense layers.\n ndim (int): The spatial dimensions of the input data.\n Default is 2, but it also works with 3.\n n_classes (int): The number of classes to be predicted.\n semantic_id (int): Defaults to 0. A number to name the final layer.\n Allows for multiple semantic heads.\n Returns:\n tensor: The softmax prediction for the semantic segmentation head\n\n Raises:\n ValueError: ndim is not 2 or 3\n \"\"\"\n\n acceptable_ndims = [2, 3]\n if ndim not in acceptable_ndims:\n raise ValueError('Only 2 and 3 dimensional networks are supported')\n\n if K.image_data_format() == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n\n # Add all the semantic layers\n semantic_sum = semantic_features[0]\n for semantic_feature in semantic_features[1:]:\n semantic_sum = Add()([semantic_sum, semantic_feature])\n\n # Final upsampling\n min_level = int(re.findall(r'\\d+', semantic_names[-1])[0])\n n_upsample = min_level - target_level\n x = semantic_upsample(semantic_sum, n_upsample,\n target=input_target, ndim=ndim)\n\n # First tensor product\n x = TensorProduct(n_dense)(x)\n x = BatchNormalization(axis=channel_axis)(x)\n x = Activation('relu')(x)\n\n # Apply tensor product and softmax layer\n if n_classes > 1:\n x = TensorProduct(n_classes)(x)\n x = Softmax(axis=channel_axis, name='semantic_{}'.format(semantic_id))(x)\n else: # n_classes == 1\n x = TensorProduct(n_classes)(x)\n x = Activation('relu', name='semantic_{}'.format(semantic_id))(x)\n\n return x\n\n\ndef __create_semantic_head(pyramid_dict,\n input_target=None,\n target_level=2,\n n_classes=3,\n n_filters=128,\n semantic_id=0,\n ndim=2):\n \"\"\"\n Creates a semantic head from a feature pyramid network\n Args:\n pyramid_dict (dict): Pyramid names and features\n input_target (tensor): Optional tensor with the input image.\n target_level (int): Upsampling level.\n Level 1 = 1/2^1 size, Level 2 = 1/2^2 size, Level 3 = 1/2^3 size, etc.\n n_classes (int): The number of classes to be predicted\n n_filters (int): The number of convolutional filters.\n semantic_id (int): A number to name the final layer.\n Allows for multiple semantic heads.\n ndim (int): The spatial dimensions of the input data.\n Default is 2, but it also works with 3.\n Returns:\n keras.layers.Layer: The semantic segmentation head\n \"\"\"\n # Get pyramid names and features into list form\n pyramid_names = get_sorted_keys(pyramid_dict)\n pyramid_features = [pyramid_dict[name] for name in pyramid_names]\n\n # Reverse pyramid names and features\n pyramid_names.reverse()\n pyramid_features.reverse()\n\n semantic_features = []\n semantic_names = []\n\n for N, P in zip(pyramid_names, pyramid_features):\n # Get level and determine how much to upsample\n level = int(re.findall(r'\\d+', N)[0])\n\n n_upsample = level - target_level\n target = semantic_features[-1] if len(semantic_features) > 0 else None\n\n # Use semantic upsample to get semantic map\n semantic_features.append(semantic_upsample(\n P, n_upsample, n_filters=n_filters, target=target, ndim=ndim))\n semantic_names.append('Q{}'.format(level))\n\n # Combine all of the semantic features\n x = semantic_prediction(semantic_names, semantic_features,\n n_classes=n_classes, input_target=input_target,\n semantic_id=semantic_id, ndim=ndim)\n\n return x\n\n\ndef FPNet(backbone,\n input_shape,\n inputs=None,\n norm_method='whole_image',\n use_imagenet=False,\n pooling=None,\n required_channels=3,\n n_classes=3,\n name='fpnet',\n frames_per_batch=1,\n **kwargs):\n \"\"\"Creates a Feature Pyramid Network with a semantic segmentation head\n\n Args:\n backbone (str): A name of a supported backbone from [deepcell, resnet50]\n input_shape (tuple): Shape of the input image.\n inputs (keras.Layer): Optional preexisting layers.\n norm_method (str): Normalization method, defaults to 'whole_image'\n use_imagenet (bool): Whether to load imagenet-based pretrained weights.\n pooling (str): Optional pooling mode for feature extraction\n when include_top is False.\n - None means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - 'avg' means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - 'max' means that global max pooling will\n be applied.\n required_channels (int): The required number of channels of the\n backbone. 3 is the default for all current backbones.\n n_classes (int): The number of classes to be predicted\n name (str): Name to use for the model.\n frames_per_batch (int): Size of z axis in generated batches.\n If equal to 1, assumes 2D data.\n\n Returns:\n tensorflow.keras.models.Model: Feature pyramid network with a semantic\n segmentation head as the output\n \"\"\"\n\n if inputs is None:\n inputs = Input(shape=input_shape)\n\n # force the channel size for backbone input to be required_channels\n norm = ImageNormalization2D(norm_method=norm_method)(inputs)\n fixed_inputs = TensorProduct(required_channels)(norm)\n\n # force the input shape\n fixed_input_shape = list(input_shape)\n fixed_input_shape[-1] = required_channels\n fixed_input_shape = tuple(fixed_input_shape)\n\n model_kwargs = {\n 'include_top': False,\n 'weights': None,\n 'input_shape': fixed_input_shape,\n 'pooling': pooling\n }\n\n # Get backbone outputs\n _, backbone_dict = get_backbone(backbone, fixed_inputs,\n use_imagenet=use_imagenet,\n frames_per_batch=frames_per_batch,\n return_dict=True, **model_kwargs)\n\n # Construct feature pyramid network\n pyramid_dict = __create_pyramid_features(backbone_dict)\n\n levels = [int(re.findall(r'\\d+', k)[0]) for k in pyramid_dict]\n target_level = min(levels)\n\n x = __create_semantic_head(pyramid_dict, n_classes=n_classes,\n input_target=inputs, target_level=target_level,\n ndim=len(input_shape) - 1)\n\n return Model(inputs=inputs, outputs=x, name=name)\n", "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport datetime\nimport os\n\nimport numpy as np\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import callbacks\nfrom tensorflow.python.keras.optimizers import SGD\nfrom deepcell import losses\nfrom deepcell import image_generators\nfrom deepcell.utils import train_utils\nfrom deepcell.utils.train_utils import rate_scheduler\nfrom deepcell.utils.train_utils import get_callbacks\n\n\n\n# TODO: make this function compatible with 3D data or images with more than 1 channel.\ndef random_crop(image, label, crop_height, crop_width):\n \"\"\"\"\n Crop a random patch of size (crop_height, crop_width) in a 2D image following a sampling distribution where patches\n where label>0 have higher probability.\n image: 2D numpy array\n label: 2D numpy array with the segmentation, detection or any information about image\n crop height / crop width: determine the size of the patch to crop.\n Returns:\n patch of the image and the corresponding patch of the label.\n \"\"\"\n if (image.shape[0] != label.shape[0]) or (image.shape[1] != label.shape[1]):\n raise Exception('Image and label must have the same dimensions!')\n if (crop_width <= image.shape[1]) and (crop_height <= image.shape[0]):\n pdf_im = np.ones(label.shape) # label is a mask\n pdf_im[label > 0] = 10000 # the weight we want to give to positive values in labels.\n pdf_im = pdf_im[:-crop_height,:-crop_width] # limit the coordinates in which a centroid can lay\n prob = np.float32(pdf_im)\n # convert the 2D matrix into a vector and normalize it so you create a distribution of all the possible values\n # between 1 and prod(pdf.shape)(sum=1)\n prob = prob.ravel()/np.sum(prob)\n choices = np.prod(pdf_im.shape)\n # get a random centroid but following a pdf distribution.\n index = np.random.choice(choices, size=1, p=prob)\n coordinates = np.unravel_index(index, shape=pdf_im.shape)\n y = coordinates[0][0]\n x = coordinates[1][0]\n return image[y:y+crop_height, x:x+crop_width], label[y:y+crop_height, x:x+crop_width]\n else:\n raise Exception('Crop shape ({0}, {1}) exceeds image dimensions ({2}, {3})!'.format(crop_height, crop_width, image.shape[0], image.shape[1]))\n\n\n\n\ndef load_data_pairs(DATAPATH, mode = 'train', patch_crop=False, crop_height=256, crop_width=256):\n import cv2\n import sys\n files = os.listdir(os.path.join(DATAPATH, mode))\n # files = os.listdir(os.path.join(DATAPATH, mode, 'inputs'))\n X = None\n sys.stdout.write(\"\\rLoading data...\\n\")\n i = 0\n for fname in files:\n i = i+1\n text = \"\\r{0} {1}%\".format(\"|\" * 20, i/len(files) * 100)\n sys.stdout.write(text)\n sys.stdout.flush()\n input_im = cv2.imread(os.path.join(DATAPATH, mode, fname), cv2.IMREAD_ANYDEPTH)\n # input_im = cv2.imread(os.path.join(DATAPATH, mode, 'inputs', fname), cv2.IMREAD_ANYDEPTH)\n # input_im = input_im[:,:,0]\n mask_im = cv2.imread(os.path.join(DATAPATH, mode + '_labels', 'instance_ids_' + fname[4:]), cv2.IMREAD_ANYDEPTH)\n # mask_im = cv2.imread(os.path.join(DATAPATH, mode+, 'labels', 'instance_ids_' + fname[4:]), cv2.IMREAD_ANYDEPTH)\n # mask_im = mask_im[:,:,0]\n # mask_im[mask_im > 0] = 1\n\n if patch_crop==True:\n input_im, mask_im = random_crop(input_im, mask_im, crop_height, crop_width)\n input_im = input_im.reshape((1, crop_height, crop_width, 1))\n mask_im = mask_im.reshape((1, crop_height, crop_width, 1))\n else:\n input_im = input_im.reshape((1, input_im.shape[0], input_im.shape[1], 1))\n mask_im = mask_im.reshape((1, mask_im.shape[0], mask_im.shape[1], 1))\n\n if X is None:\n X = input_im\n y = mask_im\n else:\n X = np.concatenate((X,input_im), axis=0)\n y = np.concatenate((y,mask_im), axis=0)\n return X, y\n\n\n\n# TODO: modify this for 3D data or images with more than one channel.\ndef get_data_from_path(DATAPATH, patch_crop=False, crop_height=256, crop_width=256):\n \"\"\"\n Read the training, and test 2D data and save them as dictionaries used during the training.\n Args:\n DATAPATH: Main path where the data is stored as train, train_labels, test, test_labels\n patch_crop: Whether we want to crop a small patch of each image\n crop_height: Height size (Y-axis) of the path to crop\n crop_width: Width size (X-axis) of the path to crop\n\n Returns:\n train_dict with the input and output images. The length of the training data is equal to the number of images\n available in the directory.\n test_dict with the input and output images that belong to the test set. The length of the data is equal to the\n number of images available in the directory. The size of these images could be the original one instead of\n patches, as long as all have the same size.\n \"\"\"\n X_train, y_train = load_data_pairs(DATAPATH, mode='train', patch_crop=patch_crop,\n crop_height=crop_height, crop_width=crop_width)\n X_test, y_test = load_data_pairs(DATAPATH, mode='test', patch_crop=False)\n train_dict = {\n 'X': X_train,\n 'y': y_train\n }\n test_dict = {\n 'X': X_test,\n 'y': y_test\n }\n return train_dict, test_dict\n\n\ndef train_model_sample_fromdirectory(model,\n dataset,\n expt='',\n test_size=.2,\n n_epoch=10,\n batch_size=32,\n num_gpus=None,\n transform=None,\n window_size=None,\n balance_classes=True,\n max_class_samples=None,\n log_dir='/data/tensorboard_logs',\n model_dir='/data/models',\n model_name=None,\n focal=False,\n gamma=0.5,\n optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),\n lr_sched=rate_scheduler(lr=0.01, decay=0.95),\n rotation_range=0,\n flip=False,\n shear=0,\n zoom_range=0,\n seed=0,\n **kwargs):\n \"\"\"Train a model using sample mode.\n\n Args:\n model (tensorflow.keras.Model): The model to train.\n dataset (str): Path to a dataset to train the model with.\n expt (str): Experiment, substring to include in model name.\n test_size (float): Percent of data to leave as test data.\n n_epoch (int): Number of training epochs.\n batch_size (int): Number of batches per training step.\n num_gpus (int): The number of GPUs to train on.\n transform (str): Defines the transformation of the training data.\n One of 'watershed', 'fgbg', 'pixelwise'.\n window_size (tuple(int, int)): Size of sampling window\n balance_classes (bool): Whether to perform class-balancing on data\n max_class_samples (int): Maximum number of examples per class to sample\n log_dir (str): Filepath to save tensorboard logs. If None, disables\n the tensorboard callback.\n model_dir (str): Directory to save the model file.\n model_name (str): Name of the model (and name of output file).\n focal (bool): If true, uses focal loss.\n gamma (float): Parameter for focal loss\n optimizer (object): Pre-initialized optimizer object (SGD, Adam, etc.)\n lr_sched (function): Learning rate schedular function\n rotation_range (int): Maximum rotation range for image augmentation\n flip (bool): Enables horizontal and vertical flipping for augmentation\n shear (int): Maximum rotation range for image augmentation\n zoom_range (tuple): Minimum and maximum zoom values (0.8, 1.2)\n seed (int): Random seed\n kwargs (dict): Other parameters to pass to _transform_masks\n\n Returns:\n tensorflow.keras.Model: The trained model\n \"\"\"\n is_channels_first = K.image_data_format() == 'channels_first'\n\n if model_name is None:\n todays_date = datetime.datetime.now().strftime('%Y-%m-%d')\n data_name = os.path.splitext(os.path.basename(dataset))[0]\n model_name = '{}_{}_{}'.format(todays_date, data_name, expt)\n model_path = os.path.join(model_dir, '{}.h5'.format(model_name))\n loss_path = os.path.join(model_dir, '{}.npz'.format(model_name))\n\n # train_dict, test_dict = get_data(dataset, test_size=test_size, seed=seed)\n\n train_dict, test_dict = get_data_from_path(dataset, patch_crop=False)\n n_classes = model.layers[-1].output_shape[1 if is_channels_first else -1]\n\n # the data, shuffled and split between train and test sets\n print('X_train shape:', train_dict['X'].shape)\n print('y_train shape:', train_dict['y'].shape)\n print('X_test shape:', test_dict['X'].shape)\n print('y_test shape:', test_dict['y'].shape)\n print('Output Shape:', model.layers[-1].output_shape)\n print('Number of Classes:', n_classes)\n\n def loss_function(y_true, y_pred):\n if isinstance(transform, str) and transform.lower() == 'disc':\n return losses.discriminative_instance_loss(y_true, y_pred)\n if focal:\n return losses.weighted_focal_loss(\n y_true, y_pred, gamma=gamma, n_classes=n_classes)\n return losses.weighted_categorical_crossentropy(\n y_true, y_pred, n_classes=n_classes)\n\n if num_gpus is None:\n num_gpus = train_utils.count_gpus()\n\n if num_gpus >= 2:\n batch_size = batch_size * num_gpus\n model = train_utils.MultiGpuModel(model, num_gpus)\n\n print('Training on {} GPUs'.format(num_gpus))\n\n model.compile(loss=loss_function, optimizer=optimizer, metrics=['accuracy'])\n\n if train_dict['X'].ndim == 4:\n DataGenerator = image_generators.SampleDataGenerator\n window_size = window_size if window_size else (30, 30)\n elif train_dict['X'].ndim == 5:\n DataGenerator = image_generators.SampleMovieDataGenerator\n window_size = window_size if window_size else (30, 30, 3)\n else:\n raise ValueError('Expected `X` to have ndim 4 or 5. Got',\n train_dict['X'].ndim)\n\n # this will do preprocessing and realtime data augmentation\n datagen = DataGenerator(\n rotation_range=rotation_range,\n shear_range=shear,\n zoom_range=zoom_range,\n horizontal_flip=flip,\n vertical_flip=flip)\n\n # no validation augmentation\n datagen_val = DataGenerator(\n rotation_range=0,\n shear_range=0,\n zoom_range=0,\n horizontal_flip=0,\n vertical_flip=0)\n\n train_data = datagen.flow(\n train_dict,\n seed=seed,\n batch_size=batch_size,\n transform=transform,\n transform_kwargs=kwargs,\n window_size=window_size,\n balance_classes=balance_classes,\n max_class_samples=max_class_samples)\n # save_to_dir= './training_data',\n # save_prefix='t',\n # save_format='tif')\n\n val_data = datagen_val.flow(\n test_dict,\n seed=seed,\n batch_size=batch_size,\n transform=transform,\n transform_kwargs=kwargs,\n window_size=window_size,\n balance_classes=False,\n max_class_samples=max_class_samples)\n\n train_callbacks = get_callbacks(\n model_path, lr_sched=lr_sched,\n tensorboard_log_dir=log_dir,\n save_weights_only=num_gpus >= 2,\n monitor='val_loss', verbose=1)\n\n # fit the model on the batches generated by datagen.flow()\n loss_history = model.fit_generator(\n train_data,\n steps_per_epoch=train_data.y.shape[0] // batch_size,\n epochs=n_epoch,\n validation_data=val_data,\n validation_steps=val_data.y.shape[0] // batch_size,\n callbacks=train_callbacks)\n\n np.savez(loss_path, loss_history=loss_history.history)\n\n return model\n" ]
[ [ "tensorflow.python.platform.test.main" ], [ "tensorflow.python.keras.layers.DepthwiseConv2D", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.Add", "tensorflow.python.keras.layers.Activation", "tensorflow.python.keras.backend.image_data_format", "tensorflow.python.keras.models.Model", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.layers.Conv3D", "tensorflow.python.keras.layers.Input" ], [ "numpy.savez", "tensorflow.python.keras.backend.image_data_format", "numpy.random.choice", "numpy.ones", "numpy.concatenate", "tensorflow.python.keras.optimizers.SGD", "numpy.prod", "numpy.float32", "numpy.unravel_index", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] } ]
gesiscss/wikiwho_demo
[ "1549a63dc9714c1a813a77dcad481a69cd28dfcd" ]
[ "visualization/conflicts_listener.py" ]
[ "import pandas as pd\nimport numpy as np\nimport plotly\nfrom plotly import graph_objs\n\n\nclass ConflictsListener():\n\n def __init__(self, df):\n\n # time diff to seconds\n #df['diff_secs'] = df['time_diff'].dt.total_seconds()\n\n # conflict time diff to seconds \n #df['diff_secs_confl'] = np.nan\n #df['diff_secs_confl'] = df.loc[~df['conflict'].isnull(), 'diff_secs']\n\n self.df = df\n self.df_plotted = None\n\n def listen(self, _range, granularity, black, red):\n df = self.df\n\n df = df[(df.year_month.dt.date >= _range[0]) &\n (df.year_month.dt.date <= _range[1])]\n\n # calculate the aggreated values\n df = df.groupby(pd.Grouper(\n key='year_month', freq=granularity[0])).agg({'conflicts': ['sum'],\n 'elegibles': ['sum'],\n 'revisions': ['sum'],\n 'conflict': ['count', 'sum'],\n 'total': ['sum'],\n 'total_surv_48h': ['sum'],\n 'total_persistent': ['sum'],\n 'total_stopword_count': ['sum']}).reset_index()\n\n df.loc[df[('conflict', 'count')] == 0, ('conflict', 'sum')] = np.nan\n #df.loc[df[('conflicts', 'count')] == 0, ('diff_secs', 'sum')] = np.nan\n\n self.traces = []\n self.is_norm_scale = True\n df = self.__add_trace(df, black, 'rgba(0, 0, 0, 1)')\n df = self.__add_trace(df, red, 'rgba(255, 0, 0, .8)')\n\n #np.all(np.array([len(sc.x) == 1 for sc in self.traces]))\n\n _range = None\n if self.is_norm_scale:\n _range = [0, 1]\n\n # if red != 'None':\n # data.append(graph_objs.Scatter(\n # x=list(df['rev_time']), y=list(df[red]),\n # name=red,\n # marker=dict(color='rgba(255, 0, 0, .8)')))\n\n # if blue != 'None':\n # data.append(graph_objs.Scatter(\n # x=list(df['rev_time']), y=list(df[blue]),\n # name=blue,\n # marker=dict(color='rgba(0, 128, 43, 1)')))\n\n # if green != 'None':\n # data.append(graph_objs.Scatter(\n # x=list(df['rev_time']), y=list(df[green]),\n # name=green,\n # marker=dict(color='rgba(0, 153, 255, .8)')))\n\n layout = graph_objs.Layout(hovermode='closest',\n xaxis=dict(title=granularity, ticklen=5,\n zeroline=True, gridwidth=2),\n yaxis=dict(\n ticklen=5, gridwidth=2, range=_range),\n legend=dict(x=0.5, y=1.2),\n showlegend=True, barmode='group')\n\n self.df_plotted = df\n\n plotly.offline.init_notebook_mode(connected=True)\n plotly.offline.iplot({\"data\": self.traces, \"layout\": layout})\n\n def __add_trace(self, df, metric, color):\n sel = df.index\n if metric == 'None':\n return df\n elif metric == 'Conflict Score':\n df['conflict_score'] = df[\n ('conflict', 'sum')] / df[('elegibles', 'sum')]\n sel = ~df['conflict_score'].isnull()\n y = df.loc[sel, 'conflict_score']\n self.is_norm_scale = False\n\n elif metric == 'Conflict Ratio':\n df['conflict_ratio'] = df[\n ('conflicts', 'sum')] / df[('elegibles', 'sum')]\n sel = ~(df['conflict_ratio'].isnull() | (df[('conflict', 'count')] == 0))\n y = df.loc[sel, 'conflict_ratio']\n\n elif metric == 'Absolute Conflict Score':\n df['absolute_conflict_score'] = df[('conflict', 'sum')]\n sel = ~df['absolute_conflict_score'].isnull() \n y = df.loc[sel, 'absolute_conflict_score']\n self.is_norm_scale = False\n\n elif metric == 'Number of Conflicts':\n df['conflict_n'] = df[('conflicts', 'sum')]\n sel = df['conflict_n'] != 0\n y = df.loc[sel, 'conflict_n']\n self.is_norm_scale = False\n\n elif metric == 'Total':\n df['total_n'] = df[('total', 'sum')]\n sel = df['total_n'] != 0\n y = df.loc[sel, 'total_n']\n self.is_norm_scale = False\n \n elif metric == 'Total_surv_48h':\n df['total_surv_48h_n'] = df[('total_surv_48h', 'sum')]\n sel = df['total_surv_48h_n'] != 0\n y = df.loc[sel, 'total_surv_48h_n']\n self.is_norm_scale = False\n\n elif metric == 'Total_persistent':\n df['total_persistent_n'] = df[('total_persistent', 'sum')]\n sel = df['total_persistent_n'] != 0\n y = df.loc[sel, 'total_persistent_n']\n self.is_norm_scale = False\n \n elif metric == 'Total_stopword_count':\n df['total_stopword_count_n'] = df[('total_stopword_count', 'sum')]\n sel = df['total_stopword_count_n'] != 0\n y = df.loc[sel, 'total_stopword_count_n']\n self.is_norm_scale = False\n\n elif metric == 'Total Elegible Actions':\n df['elegibles_n'] = df[('elegibles', 'sum')]\n sel = df['elegibles_n'] != 0\n y = df.loc[sel, 'elegibles_n']\n self.is_norm_scale = False\n\n self.traces.append(\n graph_objs.Scatter(\n x=df.loc[sel,'year_month'], y=y,\n name=metric,\n marker=dict(color=color))\n )\n\n return df\n" ]
[ [ "pandas.Grouper" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
symphony233/gbnns_dim_red
[ "2403411600a60ad4365aba3d78a81da144a456b7" ]
[ "train.py" ]
[ "from __future__ import division\nimport argparse\nimport numpy as np\nimport torch\n\nfrom dim_red.triplet import train_triplet\nfrom dim_red.angular import train_angular\n\nfrom dim_red.support_func import sanitize\nfrom dim_red.data import load_dataset\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n def aa(*args, **kwargs):\n group.add_argument(*args, **kwargs)\n\n group = parser.add_argument_group('dataset options')\n aa(\"--database\", default=\"sift\")\n aa(\"--method\", type=str, default=\"triplet\")\n\n group = parser.add_argument_group('Model hyperparameters')\n aa(\"--dout\", type=int, default=16,\n help=\"output dimension\")\n aa(\"--dint\", type=int, default=1024)\n group = parser.add_argument_group('Computation params')\n aa(\"--seed\", type=int, default=1234)\n aa(\"--device\", choices=[\"cuda\", \"cpu\", \"auto\"], default=\"auto\")\n aa(\"--val_freq\", type=int, default=10,\n help=\"frequency of validation calls\")\n aa(\"--optim\", type=str, default=\"sgd\")\n aa(\"--print_results\", type=int, default=0)\n aa(\"--save\", type=int, default=0)\n aa(\"--full\", type=int, default=0)\n aa(\"--val_freq_search\", type=int, default=5,\n help=\"frequency of validation calls\")\n aa(\"--save_knn_1k\", type=int, default=0)\n aa(\"--save_optimal\", type=int, default=0)\n aa(\"--batch_size\", type=int, default=64)\n aa(\"--epochs\", type=int, default=40)\n aa(\"--lr_schedule\", type=str, default=\"0.1,0.1,0.05,0.01\")\n aa(\"--momentum\", type=float, default=0.9)\n\n args = parser.parse_args()\n\n if args.device == \"auto\":\n args.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n print(args)\n\n results_file_name = \"/home/shekhale/results/dim_red/\" + args.database + \"/train_results_\" + args.method + \".txt\"\n if args.print_results > 0:\n with open(results_file_name, \"a\") as rfile:\n rfile.write(\"\\n\\n\")\n rfile.write(\"START TRAINING \\n\")\n\n print (\"load dataset %s\" % args.database)\n (_, xb, xq, _) = load_dataset(args.database, args.device, calc_gt=False, mnt=True)\n\n base_size = xb.shape[0]\n threshold = int(base_size * 0.01)\n perm = np.random.permutation(base_size)\n xv = xb[perm[:threshold]]\n if args.full:\n xt = xb\n else:\n xt = xb[perm[threshold:]]\n\n print(xb.shape, xt.shape, xv.shape, xq.shape)\n\n xt = sanitize(xt)\n xv = sanitize(xv)\n xb = sanitize(xb)\n xq = sanitize(xq)\n\n if args.method == \"triplet\":\n train_triplet(xb, xt, xv, xq, args, results_file_name)\n elif args.method == \"angular\":\n train_angular(xb, xt, xv, xq, args, results_file_name, perm)\n else:\n print(\"Select an available method\")" ]
[ [ "torch.manual_seed", "numpy.random.permutation", "numpy.random.seed", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tkShir/EC521-Group6-Novel-Steganographic-Scheme
[ "d01ff5b625d5ef85790451fa62e5c33f15f06f0d" ]
[ "novel_stego_protocol/textgenrnn/textgenrnn.py" ]
[ "import json\nimport re\n\nimport numpy as np\nimport tensorflow as tf\nimport tqdm\nfrom pkg_resources import resource_filename\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow import config as config\nfrom tensorflow.compat.v1.keras.backend import set_session\nfrom tensorflow.keras.callbacks import LearningRateScheduler\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.preprocessing.text import Tokenizer, text_to_word_sequence\n\nfrom .model import textgenrnn_model\nfrom .model_training import generate_sequences_from_texts\nfrom .utils import (\n generate_after_epoch,\n save_model_weights,\n textgenrnn_encode_sequence,\n textgenrnn_generate,\n textgenrnn_generate2,\n textgenrnn_texts_from_file,\n textgenrnn_texts_from_file_context,\n)\n\n\nclass textgenrnn:\n META_TOKEN = '<s>'\n config = {\n 'rnn_layers': 2,\n 'rnn_size': 128,\n 'rnn_bidirectional': False,\n 'max_length': 40,\n 'max_words': 10000,\n 'dim_embeddings': 100,\n 'word_level': False,\n 'single_text': False\n }\n default_config = config.copy()\n\n def __init__(self, weights_path=None,\n vocab_path=None,\n config_path=None,\n name=\"textgenrnn\",\n allow_growth=None):\n\n if weights_path is None:\n weights_path = resource_filename(__name__,\n 'textgenrnn_weights.hdf5')\n\n if vocab_path is None:\n vocab_path = resource_filename(__name__,\n 'textgenrnn_vocab.json')\n\n if allow_growth is not None:\n c = tf.ConfigProto()\n c.gpu_options.allow_growth = True\n set_session(tf.Session(config=c))\n\n if config_path is not None:\n with open(config_path, 'r',\n encoding='utf8', errors='ignore') as json_file:\n self.config = json.load(json_file)\n\n self.config.update({'name': name})\n self.default_config.update({'name': name})\n\n with open(vocab_path, 'r',\n encoding='utf8', errors='ignore') as json_file:\n self.vocab = json.load(json_file)\n\n self.tokenizer = Tokenizer(filters='', lower=False, char_level=True)\n self.tokenizer.word_index = self.vocab\n self.num_classes = len(self.vocab) + 1\n self.model = textgenrnn_model(self.num_classes,\n cfg=self.config,\n weights_path=weights_path)\n self.indices_char = dict((self.vocab[c], c) for c in self.vocab)\n\n def generate(self, n=1, return_as_list=False, prefix=None,\n temperature=[1.0, 0.5, 0.2, 0.2], ciphertext=b\"\",\n max_gen_length=300, interactive=False,\n top_n=3, progress=True):\n gen_texts = []\n iterable = tqdm.trange(n) if progress and n > 1 else range(n)\n for _ in iterable:\n gen_text, _ = textgenrnn_generate(self.model,\n self.vocab,\n self.indices_char,\n temperature,\n self.config['max_length'],\n self.META_TOKEN,\n self.config['word_level'],\n self.config.get(\n 'single_text', False),\n max_gen_length,\n interactive,\n top_n,\n prefix, ciphertext)\n if not return_as_list:\n # print(\"{}\\n\".format(gen_text))\n return (gen_text)\n gen_texts.append(gen_text)\n\n if return_as_list:\n return gen_texts\n\n def generate2(self, n=1, return_as_list=False, prefix=None,\n temperature=[1.0, 0.5, 0.2, 0.2], stegotext=b\"\",\n max_gen_length=300, interactive=False,\n top_n=3, progress=True):\n # print(stegotext)\n gen_texts = []\n iterable = tqdm.trange(n) if progress and n > 1 else range(n)\n for _ in iterable:\n gen_text, _ = textgenrnn_generate2(self.model,\n self.vocab,\n self.indices_char,\n temperature,\n self.config['max_length'],\n self.META_TOKEN,\n self.config['word_level'],\n self.config.get(\n 'single_text', False),\n max_gen_length,\n interactive,\n top_n,\n prefix, stegotext)\n if not return_as_list:\n # print(\"{}\\n\".format(gen_text))\n return (gen_text)\n gen_texts.append(gen_text)\n\n if return_as_list:\n return gen_texts\n\n def generate_samples(self, n=3, temperatures=[0.2, 0.5, 1.0], **kwargs):\n for temperature in temperatures:\n print('#' * 20 + '\\nTemperature: {}\\n'.format(temperature) +\n '#' * 20)\n self.generate(n, temperature=temperature, progress=False, **kwargs)\n\n def train_on_texts(self, texts, context_labels=None,\n batch_size=128,\n num_epochs=50,\n verbose=1,\n new_model=False,\n gen_epochs=1,\n train_size=1.0,\n max_gen_length=300,\n validation=True,\n dropout=0.0,\n via_new_model=False,\n save_epochs=0,\n multi_gpu=False,\n **kwargs):\n\n if new_model and not via_new_model:\n self.train_new_model(texts,\n context_labels=context_labels,\n num_epochs=num_epochs,\n gen_epochs=gen_epochs,\n train_size=train_size,\n batch_size=batch_size,\n dropout=dropout,\n validation=validation,\n save_epochs=save_epochs,\n multi_gpu=multi_gpu,\n **kwargs)\n return\n\n if context_labels:\n context_labels = LabelBinarizer().fit_transform(context_labels)\n\n if self.config['word_level']:\n # If training word level, must add spaces around each\n # punctuation. https://stackoverflow.com/a/3645946/9314418\n punct = '!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\\\n\\\\t\\'‘’“”’–—…'\n for i in range(len(texts)):\n texts[i] = re.sub('([{}])'.format(punct), r' \\1 ', texts[i])\n texts[i] = re.sub(' {2,}', ' ', texts[i])\n texts = [text_to_word_sequence(text, filters='') for text in texts]\n\n # calculate all combinations of text indices + token indices\n indices_list = [np.meshgrid(np.array(i), np.arange(\n len(text) + 1)) for i, text in enumerate(texts)]\n # indices_list = np.block(indices_list) # this hangs when indices_list is large enough\n # FIX BEGIN ------\n indices_list_o = np.block(indices_list[0])\n for i in range(len(indices_list) - 1):\n tmp = np.block(indices_list[i + 1])\n indices_list_o = np.concatenate([indices_list_o, tmp])\n indices_list = indices_list_o\n # FIX END ------\n\n # If a single text, there will be 2 extra indices, so remove them\n # Also remove first sequences which use padding\n if self.config['single_text']:\n indices_list = indices_list[self.config['max_length']:-2, :]\n\n indices_mask = np.random.rand(indices_list.shape[0]) < train_size\n\n if multi_gpu:\n num_gpus = len(config.get_visible_devices('GPU'))\n batch_size = batch_size * num_gpus\n\n gen_val = None\n val_steps = None\n if train_size < 1.0 and validation:\n indices_list_val = indices_list[~indices_mask, :]\n gen_val = generate_sequences_from_texts(\n texts, indices_list_val, self, context_labels, batch_size)\n val_steps = max(\n int(np.floor(indices_list_val.shape[0] / batch_size)), 1)\n\n indices_list = indices_list[indices_mask, :]\n\n num_tokens = indices_list.shape[0]\n assert num_tokens >= batch_size, \"Fewer tokens than batch_size.\"\n\n level = 'word' if self.config['word_level'] else 'character'\n print(\"Training on {:,} {} sequences.\".format(num_tokens, level))\n\n steps_per_epoch = max(int(np.floor(num_tokens / batch_size)), 1)\n\n gen = generate_sequences_from_texts(\n texts, indices_list, self, context_labels, batch_size)\n\n base_lr = 4e-3\n\n # scheduler function must be defined inline.\n def lr_linear_decay(epoch):\n return (base_lr * (1 - (epoch / num_epochs)))\n\n '''\n FIXME\n This part is a bit messy as we need to initialize the model within\n strategy.scope() when using multi-GPU. Can probably be cleaned up a bit.\n '''\n\n if context_labels is not None:\n if new_model:\n weights_path = None\n else:\n weights_path = \"{}_weights.hdf5\".format(self.config['name'])\n self.save(weights_path)\n\n if multi_gpu:\n from tensorflow import distribute as distribute\n strategy = distribute.MirroredStrategy()\n with strategy.scope():\n parallel_model = textgenrnn_model(self.num_classes,\n dropout=dropout,\n cfg=self.config,\n context_size=context_labels.shape[1],\n weights_path=weights_path)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=4e-3))\n model_t = parallel_model\n print(\"Training on {} GPUs.\".format(num_gpus))\n else:\n model_t = self.model\n else:\n if multi_gpu:\n from tensorflow import distribute as distribute\n if new_model:\n weights_path = None\n else:\n weights_path = \"{}_weights.hdf5\".format(self.config['name'])\n\n strategy = distribute.MirroredStrategy()\n with strategy.scope():\n # Do not locate model/merge on CPU since sample sizes are small.\n parallel_model = textgenrnn_model(self.num_classes,\n cfg=self.config,\n weights_path=weights_path)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=4e-3))\n\n model_t = parallel_model\n print(\"Training on {} GPUs.\".format(num_gpus))\n else:\n model_t = self.model\n\n model_t.fit(gen, steps_per_epoch=steps_per_epoch,\n epochs=num_epochs,\n callbacks=[\n LearningRateScheduler(\n lr_linear_decay),\n generate_after_epoch(\n self, gen_epochs,\n max_gen_length),\n save_model_weights(\n self, num_epochs,\n save_epochs)],\n verbose=verbose,\n max_queue_size=10,\n validation_data=gen_val,\n validation_steps=val_steps\n )\n\n # Keep the text-only version of the model if using context labels\n if context_labels is not None:\n self.model = Model(inputs=self.model.input[0],\n outputs=self.model.output[1])\n\n def train_new_model(self, texts, context_labels=None, num_epochs=50,\n gen_epochs=1, batch_size=128, dropout=0.0,\n train_size=1.0,\n validation=True, save_epochs=0,\n multi_gpu=False, **kwargs):\n self.config = self.default_config.copy()\n self.config.update(**kwargs)\n\n print(\"Training new model w/ {}-layer, {}-cell {}LSTMs\".format(\n self.config['rnn_layers'], self.config['rnn_size'],\n 'Bidirectional ' if self.config['rnn_bidirectional'] else ''\n ))\n\n # Create text vocabulary for new texts\n # if word-level, lowercase; if char-level, uppercase\n self.tokenizer = Tokenizer(filters='',\n lower=self.config['word_level'],\n char_level=(not self.config['word_level']))\n self.tokenizer.fit_on_texts(texts)\n\n # Limit vocab to max_words\n max_words = self.config['max_words']\n self.tokenizer.word_index = {k: v for (\n k, v) in self.tokenizer.word_index.items() if v <= max_words}\n\n if not self.config.get('single_text', False):\n self.tokenizer.word_index[self.META_TOKEN] = len(\n self.tokenizer.word_index) + 1\n self.vocab = self.tokenizer.word_index\n self.num_classes = len(self.vocab) + 1\n self.indices_char = dict((self.vocab[c], c) for c in self.vocab)\n\n # Create a new, blank model w/ given params\n self.model = textgenrnn_model(self.num_classes,\n dropout=dropout,\n cfg=self.config)\n\n # Save the files needed to recreate the model\n with open('{}_vocab.json'.format(self.config['name']),\n 'w', encoding='utf8') as outfile:\n json.dump(self.tokenizer.word_index, outfile, ensure_ascii=False)\n\n with open('{}_config.json'.format(self.config['name']),\n 'w', encoding='utf8') as outfile:\n json.dump(self.config, outfile, ensure_ascii=False)\n\n self.train_on_texts(texts, new_model=True,\n via_new_model=True,\n context_labels=context_labels,\n num_epochs=num_epochs,\n gen_epochs=gen_epochs,\n train_size=train_size,\n batch_size=batch_size,\n dropout=dropout,\n validation=validation,\n save_epochs=save_epochs,\n multi_gpu=multi_gpu,\n **kwargs)\n\n def save(self, weights_path=\"textgenrnn_weights_saved.hdf5\"):\n self.model.save_weights(weights_path)\n\n def load(self, weights_path):\n self.model = textgenrnn_model(self.num_classes,\n cfg=self.config,\n weights_path=weights_path)\n\n def reset(self):\n self.config = self.default_config.copy()\n self.__init__(name=self.config['name'])\n\n def train_from_file(self, file_path, header=True, delim=\"\\n\",\n new_model=False, context=None,\n is_csv=False, **kwargs):\n\n context_labels = None\n if context:\n texts, context_labels = textgenrnn_texts_from_file_context(\n file_path)\n else:\n texts = textgenrnn_texts_from_file(file_path, header,\n delim, is_csv)\n\n print(\"{:,} texts collected.\".format(len(texts)))\n if new_model:\n self.train_new_model(\n texts, context_labels=context_labels, **kwargs)\n else:\n self.train_on_texts(texts, context_labels=context_labels, **kwargs)\n\n def train_from_largetext_file(self, file_path, new_model=True, **kwargs):\n with open(file_path, 'r', encoding='utf8', errors='ignore') as f:\n texts = [f.read()]\n\n if new_model:\n self.train_new_model(\n texts, single_text=True, **kwargs)\n else:\n self.train_on_texts(texts, single_text=True, **kwargs)\n\n def generate_to_file(self, destination_path, **kwargs):\n texts = self.generate(return_as_list=True, **kwargs)\n with open(destination_path, 'w', encoding=\"utf-8\") as f:\n for text in texts:\n f.write(\"{}\\n\".format(text))\n\n def encode_text_vectors(self, texts, pca_dims=50, tsne_dims=None,\n tsne_seed=None, return_pca=False,\n return_tsne=False):\n\n # if a single text, force it into a list:\n if isinstance(texts, str):\n texts = [texts]\n\n vector_output = Model(inputs=self.model.input,\n outputs=self.model.get_layer('attention').output)\n encoded_vectors = []\n maxlen = self.config['max_length']\n for text in texts:\n if self.config['word_level']:\n text = text_to_word_sequence(text, filters='')\n text_aug = [self.META_TOKEN] + list(text[0:maxlen])\n encoded_text = textgenrnn_encode_sequence(text_aug, self.vocab,\n maxlen)\n encoded_vector = vector_output.predict(encoded_text)\n encoded_vectors.append(encoded_vector)\n\n encoded_vectors = np.squeeze(np.array(encoded_vectors), axis=1)\n if pca_dims is not None:\n assert len(texts) > 1, \"Must use more than 1 text for PCA\"\n pca = PCA(pca_dims)\n encoded_vectors = pca.fit_transform(encoded_vectors)\n\n if tsne_dims is not None:\n tsne = TSNE(tsne_dims, random_state=tsne_seed)\n encoded_vectors = tsne.fit_transform(encoded_vectors)\n\n return_objects = encoded_vectors\n if return_pca or return_tsne:\n return_objects = [return_objects]\n if return_pca:\n return_objects.append(pca)\n if return_tsne:\n return_objects.append(tsne)\n\n return return_objects\n\n def similarity(self, text, texts, use_pca=True):\n text_encoded = self.encode_text_vectors(text, pca_dims=None)\n if use_pca:\n texts_encoded, pca = self.encode_text_vectors(texts,\n return_pca=True)\n text_encoded = pca.transform(text_encoded)\n else:\n texts_encoded = self.encode_text_vectors(texts, pca_dims=None)\n\n cos_similairity = cosine_similarity(text_encoded, texts_encoded)[0]\n text_sim_pairs = list(zip(texts, cos_similairity))\n text_sim_pairs = sorted(text_sim_pairs, key=lambda x: -x[1])\n return text_sim_pairs\n" ]
[ [ "tensorflow.keras.callbacks.LearningRateScheduler", "numpy.concatenate", "sklearn.manifold.TSNE", "tensorflow.config.copy", "tensorflow.ConfigProto", "numpy.block", "tensorflow.Session", "sklearn.preprocessing.LabelBinarizer", "tensorflow.keras.models.Model", "tensorflow.keras.preprocessing.text.text_to_word_sequence", "sklearn.metrics.pairwise.cosine_similarity", "numpy.random.rand", "numpy.floor", "numpy.array", "sklearn.decomposition.PCA", "tensorflow.keras.preprocessing.text.Tokenizer", "tensorflow.config.get_visible_devices", "tensorflow.keras.optimizers.Adam", "tensorflow.distribute.MirroredStrategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
malisit/onnx-tensorflow
[ "3eb41dc923f350ca533f1024f602a842dd55de45" ]
[ "onnx_tf/handlers/backend/sequence_erase.py" ]
[ "import tensorflow as tf\n\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\n\n\n@onnx_op(\"SequenceErase\")\nclass SequenceErase(BackendHandler):\n\n @classmethod\n def chk_pos_in_bounds(cls, input_seq, pos):\n \"\"\"\n Check the position is in-bounds with respect to the sequence.\n Accepted range for 'position' is in [-n, n - 1], where n is the\n number of tensors in 'input_sequence'.\n\n :param input_seq: input sequence\n :param pos: position of the output tensor\n\n :return: True if position is in-bounds \n \"\"\"\n seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0]\n\n cond1 = tf.greater_equal(pos, tf.negative(seq_length))\n cond2 = tf.less_equal(pos, seq_length - 1)\n\n # pos >= -n and pos < n\n return tf.reduce_all(tf.logical_and(cond1, cond2))\n\n @classmethod\n def version_11(cls, node, **kwargs):\n tensor_dict = kwargs[\"tensor_dict\"]\n input_sequence = tensor_dict[node.inputs[0]]\n seq_length = tf.shape(input_sequence.to_sparse())[0]\n position = tensor_dict[node.inputs[1]] if len(\n node.inputs) == 2 else seq_length - 1\n\n # check whether position is in-bounds and assert if not\n result = cls.chk_pos_in_bounds(input_sequence, position)\n assert_pos = tf.Assert(tf.equal(result, True), [result])\n\n with tf.control_dependencies([assert_pos]):\n s1 = input_sequence[:position]\n s2 = input_sequence[position + 1:]\n return [tf.concat([s1, s2], axis=0)]\n" ]
[ [ "tensorflow.negative", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.less_equal", "tensorflow.equal", "tensorflow.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "1.0", "1.2" ] } ]
pangtao22/quasistatic_simulator
[ "7c6f99cc7237dd922f6eb0b54c580303e86b5223", "7c6f99cc7237dd922f6eb0b54c580303e86b5223" ]
[ "examples/planar_hand_ball/run_planar_hand.py", "examples/box_ball_graze_2d/visualize_piecewise_dynamics_old.py" ]
[ "import os\nimport numpy as np\n\nfrom pydrake.all import PiecewisePolynomial\n\nfrom examples.setup_simulations import (\n run_quasistatic_sim)\nfrom qsim.parser import QuasistaticParser, QuasistaticSystemBackend\nfrom qsim.model_paths import models_dir\nfrom qsim.simulator import GradientMode\n\n\n#%% sim setup\nq_model_path = os.path.join(models_dir, 'q_sys', 'planar_hand_ball.yml')\n\nh = 0.1\nT = int(round(2 / h)) # num of time steps to simulate forward.\nduration = T * h\n\n# model instance names.\nrobot_l_name = \"arm_left\"\nrobot_r_name = \"arm_right\"\nobject_name = \"sphere\"\n\n# trajectory and initial conditions.\nnq_a = 2\nqa_l_knots = np.zeros((2, nq_a))\nqa_l_knots[0] = [-np.pi / 4, -np.pi / 4]\nq_robot_l_traj = PiecewisePolynomial.ZeroOrderHold(\n [0, T * h], qa_l_knots.T)\n\nqa_r_knots = np.zeros((2, nq_a))\nqa_r_knots[0] = [np.pi / 4, np.pi / 4]\nq_robot_r_traj = PiecewisePolynomial.ZeroOrderHold(\n [0, T * h], qa_r_knots.T)\n\nq_a_traj_dict_str = {robot_l_name: q_robot_l_traj,\n robot_r_name: q_robot_r_traj}\n\nq_u0 = np.array([0, 0.5, 0])\n\nq0_dict_str = {object_name: q_u0,\n robot_l_name: qa_l_knots[0],\n robot_r_name: qa_r_knots[0]}\n\n\n#%% run sim.\nif __name__ == \"__main__\":\n q_parser = QuasistaticParser(q_model_path)\n q_parser.set_sim_params(is_quasi_dynamic=True, gravity=np.array([0, 0, -10.]))\n\n loggers_dict_quasistatic_str, q_sys = run_quasistatic_sim(\n q_parser=q_parser,\n h=h,\n backend=QuasistaticSystemBackend.PYTHON,\n q_a_traj_dict_str=q_a_traj_dict_str,\n q0_dict_str=q0_dict_str,\n is_visualizing=True,\n real_time_rate=1.0)\n\n#%% look into the plant.\n plant = q_sys.plant\n for model in q_sys.q_sim.models_all:\n print(model, plant.GetModelInstanceName(model),\n q_sys.q_sim.velocity_indices[model])\n\n#%% derivatives.\n q_sim = q_sys.q_sim\n name_to_model_dict = q_sim.get_model_instance_name_to_index_map()\n idx_l = name_to_model_dict[robot_l_name]\n idx_r = name_to_model_dict[robot_r_name]\n idx_o = name_to_model_dict[object_name]\n q_dict = {idx_o: [0, 0.316, 0],\n idx_l: [-0.775, -0.785],\n idx_r: [0.775, 0.785]}\n\n # numerical gradient\n dfdu_numerical = q_sim.calc_dfdu_numerical(\n q_dict=q_dict, qa_cmd_dict=q_dict, du=1e-3, h=h)\n\n # analytical gradient\n q_sim.update_mbp_positions(q_dict)\n tau_ext_dict = q_sim.calc_tau_ext([])\n q_sim.step(q_a_cmd_dict=q_dict, tau_ext_dict=tau_ext_dict, h=h,\n mode=\"qp_mp\", gradient_mode=GradientMode.kBOnly,\n grad_from_active_constraints=True)\n dfdu_active = q_sim.get_Dq_nextDqa_cmd()\n\n\n#%% index for tau_a.\n indices = []\n for model in q_sys.q_sim.models_actuated:\n indices += q_sys.q_sim.velocity_indices[model].tolist()\n indices.sort()\n indices_map = {j: i for i, j in enumerate(indices)}\n\n#%% construct q and v vectors of MBP from log.\n logger_qu = loggers_dict_quasistatic_str[object_name]\n q_log = np.zeros((T, plant.num_positions()))\n v_log = np.zeros((T, plant.num_velocities()))\n tau_a_log = np.zeros((T - 1, plant.num_actuated_dofs()))\n\n for name, logger in loggers_dict_quasistatic_str.items():\n model = name_to_model_dict[name]\n for i, j in enumerate(q_sys.q_sim.velocity_indices[model]):\n q_log[:, j] = logger.data().T[:, i]\n\n v_log[1:, :] = (q_log[1:, :] - q_log[:-1, :]) / h\n\n for name in robot_stiffness_dict.keys():\n model = name_to_model_dict[name]\n logger_qa = loggers_dict_quasistatic_str[name]\n idx_v = q_sys.q_sim.velocity_indices[model]\n idx_tau_a = [indices_map[i] for i in idx_v]\n for l in range(T - 1):\n qa_l = logger_qa.data().T[l]\n qa_l1_cmd = q_a_traj_dict_str[name].value((l + 1) * h).squeeze()\n tau_a_log[l][idx_tau_a] = Kp * (qa_l1_cmd - qa_l)\n\n", "import tqdm\nimport meshcat\nimport numpy as np\n\nfrom qsim_old.simulator import QuasistaticSimulator\nfrom qsim_old.problem_definition_graze import problem_definition\n\n#%% sim old\nq_sim = QuasistaticSimulator(problem_definition, is_quasi_dynamic=True)\nviz = meshcat.Visualizer(zmq_url=\"tcp://127.0.0.1:6000\")\n\n\n#%% sample dynamics\n# Sample actions between the box x \\in [-0.05, 0.05] and y \\in [-0.05, 0.05].\nn = 1000\nq_a_cmd = np.random.rand(n, 2) * 0.1 - 0.05\nq_next = np.zeros((n, 3))\n\nfor i in tqdm.tqdm(range(n)):\n q0 = np.array([0, 0, 0.])\n dq_a, dq_u, lambda_n, lambda_f, result = q_sim.step_anitescu(q0, q_a_cmd[i])\n q_next[i] = q0 + np.hstack([dq_u, dq_a])\n\n\n\n#%% plot the points\n# viz.delete()\nn_u = problem_definition['n_u']\nh = problem_definition['h']\ndynamics_lcp = np.hstack([q_a_cmd, q_next[:, :n_u]]) # [x_cmd, y_cmd, x_u_next]\ndiscontinuity_lcp = np.hstack([q_a_cmd[:, 0][:, None],\n q_next[:, 2][:, None],\n q_next[:, 0][:, None]])\ndiscontinuity2_lcp = np.hstack([q_a_cmd[:, 0][:, None],\n q_a_cmd[:, 1][:, None],\n q_next[:, 0][:, None]])\n\n\nviz[\"dynamics_lcp\"].set_object(\n meshcat.geometry.PointCloud(\n position=dynamics_lcp.T, color=np.ones_like(dynamics_lcp).T))\n#\n# viz[\"discontinuity_lcp\"].set_object(\n# meshcat.geometry.PointCloud(\n# position=discontinuity_lcp.T, color=np.ones_like(dynamics_lcp).T))\n#\n# viz[\"discontinuity2_lcp\"].set_object(\n# meshcat.geometry.PointCloud(\n# position=discontinuity2_lcp.T, color=np.zeros_like(dynamics_lcp).T))\n\n\n" ]
[ [ "numpy.array", "numpy.zeros" ], [ "numpy.hstack", "numpy.ones_like", "numpy.random.rand", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Paul-St-Young/eried
[ "63dbcab435a1fbe65b3b727a6c4b743497f60862" ]
[ "examples/02_min-h4/eri/diff_evals.py" ]
[ "#!/usr/bin/env python3\nimport yaml\nimport numpy as np\n\ndef read_evals(fyml):\n with open(fyml, 'r') as f:\n evd = yaml.safe_load(f)\n elist = evd['evals']\n return np.array(elist)\n\ndef main():\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('nup', type=int)\n parser.add_argument('ndn', type=int)\n parser.add_argument('--lam', type=float, default=1)\n parser.add_argument('--e2e', type=float, default=1)\n parser.add_argument('--tol', type=float, default=1e-12)\n parser.add_argument('--verbose', action='store_true')\n args = parser.parse_args()\n nup = args.nup\n ndn = args.ndn\n\n prefix = 'evals-l%f-e%f-nup%d-ndn%d' % (args.lam, args.e2e, nup, ndn)\n fyml0 = '../fci/%s.yml' % prefix\n fyml1 = '../eri/%s.yml' % prefix\n\n e0 = read_evals(fyml0)\n e1 = read_evals(fyml1)\n de = e1-e0\n sel = abs(de) > args.tol\n idx = np.where(sel)[0]\n print(idx)\n print(de[sel])\n\nif __name__ == '__main__':\n main() # set no global variable\n" ]
[ [ "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TheRockStarDBA/sqlmlutils
[ "956bdd72638a649f0e613f100fbb81c900dcb65e" ]
[ "Python/sqlmlutils/sqlpythonexecutor.py" ]
[ "# Copyright(c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT license.\r\n\r\nfrom typing import Callable\r\nimport dill\r\nfrom pandas import DataFrame\r\n\r\nfrom .connectioninfo import ConnectionInfo\r\nfrom .sqlqueryexecutor import execute_query, execute_raw_query\r\nfrom .sqlbuilder import SpeesBuilder, SpeesBuilderFromFunction, StoredProcedureBuilder, \\\r\n ExecuteStoredProcedureBuilder, DropStoredProcedureBuilder\r\nfrom .sqlbuilder import StoredProcedureBuilderFromFunction, RETURN_COLUMN_NAME\r\n\r\n\r\nclass SQLPythonExecutor:\r\n\r\n def __init__(self, connection_info: ConnectionInfo):\r\n self._connection_info = connection_info\r\n\r\n def execute_function_in_sql(self,\r\n func: Callable, *args,\r\n input_data_query: str = \"\",\r\n **kwargs):\r\n \"\"\"Execute a function in SQL Server.\r\n\r\n :param func: function to execute_function_in_sql. NOTE: This function is shipped to SQL as text.\r\n Functions should be self contained and import statements should be inline.\r\n :param args: positional args to pass to function to execute_function_in_sql.\r\n :param input_data_query: sql query to fill the first argument of the function. The argument gets the result of\r\n the query as a pandas DataFrame (uses the @input_data_1 parameter in sp_execute_external_script)\r\n :param kwargs: keyword arguments to pass to function to execute_function_in_sql.\r\n :return: value returned by func\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>> def foo(val1, val2):\r\n >>> import math\r\n >>> print(val1)\r\n >>> return [math.cos(val2), math.cos(val2)]\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AirlineTestDB\"))\r\n >>> ret = sqlpy.execute_function_in_sql(foo, val1=\"blah\", val2=5)\r\n blah\r\n >>> print(ret)\r\n [0.28366218546322625, 0.28366218546322625]\r\n \"\"\"\r\n rows = execute_query(SpeesBuilderFromFunction(func, input_data_query, *args, **kwargs), self._connection_info)\r\n return self._get_results(rows)\r\n\r\n def execute_script_in_sql(self,\r\n path_to_script: str,\r\n input_data_query: str = \"\"):\r\n \"\"\"Execute a script in SQL Server.\r\n\r\n :param path_to_script: file path to Python script to execute.\r\n :param input_data_query: sql query to fill InputDataSet global variable with.\r\n (@input_data_1 parameter in sp_execute_external_script)\r\n :return: None\r\n\r\n \"\"\"\r\n try:\r\n with open(path_to_script, 'r') as script_file:\r\n content = script_file.read()\r\n print(\"File does exist, using \" + path_to_script)\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"File does not exist!\")\r\n execute_query(SpeesBuilder(content, input_data_query=input_data_query), connection=self._connection_info)\r\n\r\n def execute_sql_query(self,\r\n sql_query: str,\r\n params = ()):\r\n \"\"\"Execute a sql query in SQL Server.\r\n\r\n :param sql_query: the sql query to execute in the server\r\n :return: table returned by the sql_query\r\n \"\"\"\r\n rows = execute_raw_query(conn=self._connection_info, query=sql_query, params=params)\r\n df = DataFrame(rows)\r\n\r\n # _mssql's execute_query() returns duplicate keys for indexing, we remove them because they are extraneous\r\n for i in range(len(df.columns)):\r\n try:\r\n del df[i]\r\n except KeyError:\r\n pass\r\n\r\n return df\r\n\r\n def create_sproc_from_function(self, name: str, func: Callable,\r\n input_params: dict = None, output_params: dict = None):\r\n \"\"\"Create a SQL Server stored procedure based on a Python function.\r\n NOTE: Type annotations are needed either in the function definition or in the input_params dictionary\r\n WARNING: Output parameters can be used when creating the stored procedure, but Stored Procedures with\r\n output parameters other than a single DataFrame cannot be executed with sqlmlutils\r\n\r\n :param name: name of stored procedure.\r\n :param func: function used to define stored procedure. parameters to the function are used to define parameters\r\n to the stored procedure. type annotations of the parameters are used to infer SQL types of parameters to the\r\n stored procedure. currently supported type annotations are \"str\", \"int\", \"float\", and \"DataFrame\".\r\n :param input_params: optional dictionary of type annotations for each argument to func;\r\n if func has type annotations this is not necessary. If both are provided, they must match\r\n :param output_params optional dictionary of type annotations for each output parameter\r\n :return: True if creation succeeded\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>> def foo(val1: int, val2: str):\r\n >>> from pandas import DataFrame\r\n >>> print(val2)\r\n >>> df = DataFrame()\r\n >>> df[\"col1\"] = [val1, val1, val1]\r\n >>> return df\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AutoRegressTestDB\"))\r\n >>> sqlpy.create_sproc_from_function(\"MyStoredProcedure\", foo, with_results_set=True)\r\n >>>\r\n >>> # You can execute_function_in_sql the procedure in the usual way from sql: exec MyStoredProcedure 5, 'bar'\r\n >>> # You can also call the stored procedure from Python\r\n >>> ret = sqlpy.execute_sproc(name=\"MyStoredProcedure\", val1=5, val2=\"bar\")\r\n >>> sqlpy.drop_sproc(name=\"MyStoredProcedure\")\r\n\r\n \"\"\"\r\n if input_params is None:\r\n input_params = {}\r\n if output_params is None:\r\n output_params = {}\r\n # Save the stored procedure in database\r\n execute_query(StoredProcedureBuilderFromFunction(name, func,\r\n input_params, output_params), self._connection_info)\r\n return True\r\n\r\n def create_sproc_from_script(self, name: str, path_to_script: str,\r\n input_params: dict = None, output_params: dict = None):\r\n \"\"\"Create a SQL Server stored procedure based on a Python script\r\n\r\n :param name: name of stored procedure.\r\n :param path_to_script: file path to Python script to create a sproc from.\r\n :param input_params: optional dictionary of type annotations for inputs in the script\r\n :param output_params optional dictionary of type annotations for each output variable\r\n :return: True if creation succeeded\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AutoRegressTestDB\"))\r\n >>> sqlpy.create_sproc_from_script(name=\"script_sproc\", path_to_script=\"path/to/script\")\r\n >>>\r\n >>> # This will execute the script in sql; with no inputs or outputs it will just run and return nothing\r\n >>> sqlpy.execute_sproc(name=\"script_sproc\")\r\n >>> sqlpy.drop_sproc(name=\"script_sproc\")\r\n\r\n \"\"\"\r\n if input_params is None:\r\n input_params = {}\r\n if output_params is None:\r\n output_params = {}\r\n # Save the stored procedure in database\r\n try:\r\n with open(path_to_script, 'r') as script_file:\r\n content = script_file.read()\r\n print(\"File does exist, using \" + path_to_script)\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"File does not exist!\")\r\n\r\n execute_query(StoredProcedureBuilder(name, content,\r\n input_params, output_params), self._connection_info)\r\n return True\r\n\r\n def check_sproc(self, name: str) -> bool:\r\n \"\"\"Check to see if a SQL Server stored procedure exists in the database.\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AutoRegressTestDB\"))\r\n >>> if sqlpy.check_sproc(\"MyStoredProcedure\"):\r\n >>> print(\"MyStoredProcedure exists\")\r\n >>> else:\r\n >>> print(\"MyStoredProcedure does not exist\")\r\n\r\n :param name: name of stored procedure.\r\n :return: boolean whether the Stored Procedure exists in the database\r\n \"\"\"\r\n check_query = \"SELECT OBJECT_ID (%s, N'P')\"\r\n rows = execute_raw_query(conn=self._connection_info, query=check_query, params=name)\r\n return rows[0][0] is not None\r\n\r\n def execute_sproc(self, name: str, **kwargs) -> DataFrame:\r\n \"\"\"Call a stored procedure on a SQL Server database.\r\n WARNING: Output parameters can be used when creating the stored procedure, but Stored Procedures with\r\n output parameters other than a single DataFrame cannot be executed with sqlmlutils\r\n\r\n :param name: name of stored procedure.\r\n :param kwargs: keyword arguments to pass to stored procedure\r\n :return: DataFrame representing the output data set of the stored procedure (or empty)\r\n \"\"\"\r\n return DataFrame(execute_query(ExecuteStoredProcedureBuilder(name, **kwargs), self._connection_info))\r\n\r\n def drop_sproc(self, name: str):\r\n \"\"\"Drop a SQL Server stored procedure if it exists.\r\n\r\n :param name: name of stored procedure.\r\n :return: None\r\n \"\"\"\r\n if self.check_sproc(name):\r\n execute_query(DropStoredProcedureBuilder(name), self._connection_info)\r\n\r\n @staticmethod\r\n def _get_results(rows):\r\n hexstring = rows[0][RETURN_COLUMN_NAME]\r\n return dill.loads(bytes.fromhex(hexstring))\r\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
xadupre/keras-onnx
[ "17559f987ecce7ec40ab8a36a9596eb950f9b332", "17559f987ecce7ec40ab8a36a9596eb950f9b332" ]
[ "keras2onnx/ke2onnx/lstm.py", "applications/nightly_build/test_nasnet_mobile.py" ]
[ "###############################################################################\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\nimport numbers\nimport numpy as np\nfrom collections.abc import Iterable\nfrom ..common import cvtfunc, name_func\nfrom ..common.onnx_ops import (\n apply_concat,\n apply_gather,\n apply_reshape,\n apply_shape,\n apply_split,\n apply_squeeze,\n apply_unsqueeze,\n apply_transpose,\n OnnxOperatorBuilder\n)\nfrom ..proto import onnx_proto, keras\nfrom . import simplernn\nfrom onnx import numpy_helper\n\nLSTM = keras.layers.LSTM\nTensorProto = onnx_proto.TensorProto\n\n\ndef convert_ifco_to_iofc(tensor_ifco):\n \"\"\"Returns a tensor in input (i), output (o), forget (f), cell (c) ordering. The\n Keras ordering is ifco, while the ONNX ordering is iofc.\n \"\"\"\n splits = np.split(tensor_ifco, 4)\n return np.concatenate((splits[0], splits[3], splits[1], splits[2]))\n\n\ndef extract_params(op, hidden_size, input_size):\n \"\"\"Returns a tuple of the LSTM parameters, and converts them into the format for ONNX.\n \"\"\"\n params = op.get_weights()\n\n # Keras: [W_x, W_h, b] each in I F C O\n # ONNX: W[iofc] I O F C\n W_x = convert_ifco_to_iofc(params[0].T).reshape(4, hidden_size, input_size)\n W_h = convert_ifco_to_iofc(params[1].T).reshape(4, hidden_size, hidden_size)\n\n b = None\n if op.use_bias:\n b = np.zeros((8, hidden_size), dtype=np.float32)\n b[:4] = convert_ifco_to_iofc(params[2]).reshape(4, hidden_size)\n\n return W_x, W_h, b\n\n\ndef build_parameters(scope, operator, container, bidirectional=False):\n \"\"\"Returns the parameter initialization values after extracting them from the LSTM layer.\n \"\"\"\n op = operator.raw_operator\n _, seq_length, input_size = simplernn.extract_input_shape(op)\n\n _name = name_func(scope, operator)\n\n tensor_w = _name('W')\n tensor_r = _name('R')\n tensor_b = ''\n\n if bidirectional:\n forward_layer = op.forward_layer\n backward_layer = op.backward_layer\n hidden_size = forward_layer.units\n\n W_x, W_h, b = extract_params(forward_layer, hidden_size, input_size)\n W_x_back, W_h_back, b_back = extract_params(backward_layer, hidden_size, input_size)\n\n W = np.concatenate([W_x, W_x_back]).flatten()\n W_shape = [2, 4 * hidden_size, input_size]\n\n R = np.concatenate([W_h, W_h_back]).flatten()\n R_shape = [2, 4 * hidden_size, hidden_size]\n\n if (b is None and b_back is not None) or (b is not None and b_back is None):\n raise ValueError('Bidirectional bias must be enabled (or disabled) for both forward '\n 'and backward layers.')\n\n if b is not None:\n B = np.concatenate([b, b_back]).flatten()\n B_shape = [2, 8 * hidden_size]\n\n else:\n hidden_size = op.units\n\n W_x, W_h, b = extract_params(op, hidden_size, input_size)\n\n W = W_x.flatten()\n W_shape = [1, 4 * hidden_size, input_size]\n\n R = W_h.flatten()\n R_shape = [1, 4 * hidden_size, hidden_size]\n\n if b is not None:\n B = b.flatten()\n B_shape = [1, 8 * hidden_size]\n\n # Create initializers\n container.add_initializer(tensor_w, TensorProto.FLOAT, W_shape, W)\n container.add_initializer(tensor_r, TensorProto.FLOAT, R_shape, R)\n\n if b is not None:\n tensor_b = _name('B')\n container.add_initializer(tensor_b, TensorProto.FLOAT, B_shape, B)\n\n return tensor_w, tensor_r, tensor_b\n\n\ndef build_initial_states(scope, operator, container, bidirectional=False):\n \"\"\"Builds the initial hidden and cell states for the LSTM layer.\n \"\"\"\n _name = name_func(scope, operator)\n\n initial_h = simplernn.build_initial_states(scope, operator, container, bidirectional)\n\n # Determine if the cell states are set\n has_c = (\n (len(operator.inputs) > 1 and not bidirectional) or\n (len(operator.inputs) > 3 and bidirectional)\n )\n if not has_c:\n return initial_h, ''\n\n op = operator.raw_operator\n initial_c = _name('initial_c')\n\n if bidirectional:\n forward_layer = op.forward_layer\n hidden_size = forward_layer.units\n desired_shape = [1, -1, hidden_size]\n\n # Combine the forward and backward_layers\n forward_h = _name('initial_c_forward')\n backward_h = _name('initial_c_backward')\n apply_reshape(scope, operator.inputs[2].full_name, forward_h, container, desired_shape=desired_shape)\n apply_reshape(scope, operator.inputs[4].full_name, backward_h, container, desired_shape=desired_shape)\n\n apply_concat(scope, [forward_h, backward_h], initial_c, container)\n\n else:\n # Unsqueeze dim 0 to represent num_directions\n input_c = operator.inputs[2].full_name\n apply_unsqueeze(scope, input_c, initial_c, container, axes=[0])\n\n return initial_h, initial_c\n\n\ndef build_attributes(scope, operator, container, bidirectional=False):\n \"\"\"Returns a dictionary of attributes for the LSTM layer.\n \"\"\"\n op = operator.raw_operator\n\n attrs = {}\n\n if bidirectional:\n forward_layer = op.forward_layer\n backward_layer = op.backward_layer\n\n attrs['direction'] = 'bidirectional'\n attrs['hidden_size'] = forward_layer.units\n attrs.update(simplernn.extract_activations([\n forward_layer.recurrent_activation,\n forward_layer.activation,\n forward_layer.activation,\n backward_layer.recurrent_activation,\n backward_layer.activation,\n backward_layer.activation,\n ]))\n\n else:\n attrs['direction'] = 'reverse' if op.go_backwards else 'forward'\n attrs['hidden_size'] = op.units\n attrs.update(simplernn.extract_activations([\n op.recurrent_activation,\n op.activation,\n op.activation,\n ]))\n return attrs\n\n\ndef build_output(scope, operator, container, output_names, direction='forward'):\n \"\"\"Builds the output operators for the LSTM layer.\n \"\"\"\n bidirectional = True if direction == 'bidirectional' else False\n\n if bidirectional:\n return simplernn.build_output(scope, operator, container, output_names[:-1], bidirectional)\n\n lstm_y, lstm_h, lstm_c = output_names\n\n op = operator.raw_operator\n output_seq = op.return_sequences\n _, seq_length, input_size = simplernn.extract_input_shape(op)\n\n _name = name_func(scope, operator)\n\n output_name = operator.outputs[0].full_name\n\n time_major = simplernn.is_time_major(op, bidirectional)\n # Create output-adjusting operators\n if output_seq:\n # Squeeze the num_direction dim as we know its size is 1 for\n # lstm(forward/reverse).\n is_reverse = True if direction == 'reverse' else False\n lstm_out = output_name if time_major else _name('y_squeezed')\n squeeze_out = lstm_out if not is_reverse else _name('y_squeezed')\n apply_squeeze(scope, lstm_y, squeeze_out, container, axes=[1])\n\n if time_major:\n if is_reverse:\n reverse_sequence(scope, container, lstm_out, output_name, name=_name('reverse_seq'), axes=[0])\n\n else:\n # Onnx LSTM produces time major output. Add a transpose operator to\n # make it batch_major, if the keras op was not time_major.\n # This transforms [ S, B, I] -> [ B, S, I ] where B is\n # batch_size and S is seq_len.\n perm = [1, 0, 2]\n transpose_out = output_name if not is_reverse else _name('transpose')\n apply_transpose(scope, squeeze_out, transpose_out, container, perm=perm)\n if is_reverse:\n reverse_sequence(scope, container, transpose_out, output_name, name=_name('reverse_seq'), axes=[1])\n\n else:\n apply_squeeze(scope, lstm_h, output_name, container, axes=[0])\n\n\ndef reverse_sequence(scope, container, input_name, output_name, name, axes):\n oopb = OnnxOperatorBuilder(container, scope)\n rv2_in_names = [input_name]\n apply_shape(scope, input_name, input_name + '_shape', container)\n rv2_node_name = name\n inputs = rv2_in_names\n\n axis = axes[0]\n batch_axis = 1 if axis != 1 else 0\n\n const_batch = numpy_helper.from_array(np.array([batch_axis], dtype=np.int64), rv2_node_name + '_const_batch')\n container.add_initializer_from_tensor(const_batch)\n const_axis = numpy_helper.from_array(np.array([axis], dtype=np.int64), rv2_node_name + '_const_axis')\n container.add_initializer_from_tensor(const_axis)\n\n apply_gather(scope, [input_name + '_shape', const_batch.name], rv2_node_name + '_gather_batch', container)\n apply_gather(scope, [input_name + '_shape', const_axis.name], rv2_node_name + '_gather_axis', container)\n seq_array = oopb.add_node('Expand', [rv2_node_name + '_gather_axis', rv2_node_name + '_gather_batch'],\n rv2_node_name + '_expand')\n inputs.append(seq_array)\n\n res_seq_node = oopb.add_node('ReverseSequence', inputs, name=rv2_node_name + '_rev_seq', batch_axis=batch_axis,\n time_axis=axis, op_version=10)\n\n oopb.apply_op_with_output('apply_identity', [res_seq_node], [output_name],\n name=rv2_node_name + '_Identity')\n\n\ndef build_output_states(scope, operator, container, output_names, bidirectional=False):\n \"\"\"Builds the output hidden states for the LSTM layer.\n \"\"\"\n _, lstm_h, lstm_c = output_names\n op = operator.raw_operator\n\n if bidirectional:\n forward_layer = op.forward_layer\n output_state = forward_layer.return_state\n\n if not output_state:\n return\n\n # Split lstm_h and lstm_c into forward and backward components\n squeeze_names = []\n output_names = [o.full_name for o in operator.outputs[1:]]\n name_map = {lstm_h: output_names[::2], lstm_c: output_names[1::2]}\n\n for state_name, outputs in name_map.items():\n split_names = ['{}_{}'.format(state_name, d) for d in ('forward', 'backward')]\n\n apply_split(scope, state_name, split_names, container)\n squeeze_names.extend(list(zip(split_names, outputs)))\n\n for split_name, output_name in squeeze_names:\n apply_squeeze(scope, split_name, output_name, container)\n\n else:\n output_state = op.return_state\n\n if not output_state:\n return\n\n output_h = operator.outputs[1].full_name\n output_c = operator.outputs[2].full_name\n apply_squeeze(scope, lstm_h, output_h, container)\n apply_squeeze(scope, lstm_c, output_c, container)\n\n\ndef _calculate_keras_lstm_output_shapes(operator):\n op = operator.raw_operator\n if isinstance(op.output_shape[0], Iterable):\n operator.outputs[0].type.shape = list(i if isinstance(i, numbers.Integral) else None\n for i in op.output_shape[0])\n else:\n operator.outputs[0].type.shape = list(i if isinstance(i, numbers.Integral) else None for i in op.output_shape)\n\n\n@cvtfunc(shape_infer=_calculate_keras_lstm_output_shapes)\ndef convert_keras_lstm(scope, operator, container, bidirectional=False):\n op = operator.raw_operator\n _name = name_func(scope, operator)\n\n if bidirectional:\n output_seq = op.forward_layer.return_sequences\n else:\n output_seq = op.return_sequences\n\n time_major = simplernn.is_time_major(op, bidirectional)\n\n # Inputs\n lstm_x = operator.inputs[0].full_name\n if not time_major:\n # If the keras op was not time_major, we add a transpose op to make the\n # input time_major as ONNX lstm expects time_major input.\n # Transform [ B, S, I ] -> [ S, B, I] where B is batch_size and S is\n # seq_len.\n lstm_x = _name('X')\n apply_transpose(scope, operator.inputs[0].full_name, lstm_x, container, perm=[1, 0, 2])\n\n tensor_w, tensor_r, tensor_b = build_parameters(scope, operator, container, bidirectional)\n sequence_lengths = simplernn.build_sequence_lengths(scope, operator, container)\n initial_h, initial_c = build_initial_states(scope, operator, container, bidirectional)\n\n input_names = [\n lstm_x,\n tensor_w,\n tensor_r,\n tensor_b,\n sequence_lengths,\n initial_h,\n initial_c,\n '', # P (optional) : No peep hole in Keras.\n ]\n\n # Attributes\n attrs = build_attributes(scope, operator, container, bidirectional)\n\n # Outputs\n output_names = [_name('Y'), _name('Y_h'), _name('Y_c')]\n\n oopb = OnnxOperatorBuilder(container, scope)\n oopb.apply_op_with_output('apply_lstm',\n input_names,\n output_names,\n name=op.name,\n output_seq=output_seq,\n **attrs)\n\n build_output(scope, operator, container, output_names, attrs['direction'])\n build_output_states(scope, operator, container, output_names, bidirectional)\n", "###############################################################################\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\nimport os\nimport sys\nimport unittest\nimport keras2onnx\nimport numpy as np\nfrom keras2onnx.proto import keras\nfrom os.path import dirname, abspath\nsys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))\nfrom test_utils import run_keras_and_ort, test_level_0\nK = keras.backend\n\nActivation = keras.layers.Activation\nAveragePooling2D = keras.layers.AveragePooling2D\nAdd = keras.layers.Add\nadd = keras.layers.add\nBatchNormalization = keras.layers.BatchNormalization\nconcatenate = keras.layers.concatenate\nConv2D = keras.layers.Conv2D\nCropping2D = keras.layers.Cropping2D\nDense = keras.layers.Dense\nDropout = keras.layers.Dropout\nEmbedding = keras.layers.Embedding\nFlatten = keras.layers.Flatten\nGlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D\nGlobalMaxPooling2D = keras.layers.GlobalMaxPooling2D\nInput = keras.layers.Input\nLambda = keras.layers.Lambda\nLeakyReLU = keras.layers.LeakyReLU\nMaxPooling2D = keras.layers.MaxPooling2D\nmultiply = keras.layers.multiply\nPermute = keras.layers.Permute\nReshape = keras.layers.Reshape\nSeparableConv2D = keras.layers.SeparableConv2D\nUpSampling2D = keras.layers.UpSampling2D\nZeroPadding2D = keras.layers.ZeroPadding2D\n\nSequential = keras.models.Sequential\nModel = keras.models.Model\nfrom keras.regularizers import l2\n\n\ndef _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), weight_decay=5e-5, id=None):\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('separable_conv_block_%s' % id):\n x = Activation('relu')(ip)\n x = SeparableConv2D(filters, kernel_size, strides=strides, name='separable_conv_1_%s' % id,\n padding='same', use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=l2(weight_decay))(x)\n x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name=\"separable_conv_1_bn_%s\" % (id))(x)\n x = Activation('relu')(x)\n x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,\n padding='same', use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=l2(weight_decay))(x)\n x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name=\"separable_conv_2_bn_%s\" % (id))(x)\n return x\n\n\ndef _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n img_dim = 2 if K.image_data_format() == 'channels_first' else -2\n\n with K.name_scope('adjust_block'):\n if p is None:\n p = ip\n\n elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:\n with K.name_scope('adjust_reduction_block_%s' % id):\n p = Activation('relu', name='adjust_relu_1_%s' % id)(p)\n\n p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_1_%s' % id)(p)\n p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),\n name='adjust_conv_1_%s' % id, kernel_initializer='he_normal')(p1)\n\n p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)\n p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)\n p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)\n p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),\n name='adjust_conv_2_%s' % id, kernel_initializer='he_normal')(p2)\n\n p = concatenate([p1, p2], axis=channel_dim)\n p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name='adjust_bn_%s' % id)(p)\n\n elif p._keras_shape[channel_dim] != filters:\n with K.name_scope('adjust_projection_block_%s' % id):\n p = Activation('relu')(p)\n p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,\n use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal')(p)\n p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name='adjust_bn_%s' % id)(p)\n return p\n\n\ndef _normal_A(ip, p, filters, weight_decay=5e-5, id=None):\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('normal_A_block_%s' % id):\n p = _adjust_block(p, ip, filters, weight_decay, id)\n\n h = Activation('relu')(ip)\n h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='normal_conv_1_%s' % id,\n use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(h)\n h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name='normal_bn_1_%s' % id)(h)\n\n with K.name_scope('block_1'):\n x1_1 = _separable_conv_block(h, filters, kernel_size=(5, 5), weight_decay=weight_decay,\n id='normal_left1_%s' % id)\n x1_2 = _separable_conv_block(p, filters, weight_decay=weight_decay, id='normal_right1_%s' % id)\n x1 = add([x1_1, x1_2], name='normal_add_1_%s' % id)\n\n with K.name_scope('block_2'):\n x2_1 = _separable_conv_block(p, filters, (5, 5), weight_decay=weight_decay, id='normal_left2_%s' % id)\n x2_2 = _separable_conv_block(p, filters, (3, 3), weight_decay=weight_decay, id='normal_right2_%s' % id)\n x2 = add([x2_1, x2_2], name='normal_add_2_%s' % id)\n\n with K.name_scope('block_3'):\n x3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_left3_%s' % (id))(h)\n x3 = add([x3, p], name='normal_add_3_%s' % id)\n\n with K.name_scope('block_4'):\n x4_1 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_left4_%s' % (id))(p)\n x4_2 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_right4_%s' % (id))(p)\n x4 = add([x4_1, x4_2], name='normal_add_4_%s' % id)\n\n with K.name_scope('block_5'):\n x5 = _separable_conv_block(h, filters, weight_decay=weight_decay, id='normal_left5_%s' % id)\n x5 = add([x5, h], name='normal_add_5_%s' % id)\n\n x = concatenate([p, x1, x2, x3, x4, x5], axis=channel_dim, name='normal_concat_%s' % id)\n return x, ip\n\n\ndef _reduction_A(ip, p, filters, weight_decay=5e-5, id=None):\n \"\"\"\"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('reduction_A_block_%s' % id):\n p = _adjust_block(p, ip, filters, weight_decay, id)\n\n h = Activation('relu')(ip)\n h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='reduction_conv_1_%s' % id,\n use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(h)\n h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name='reduction_bn_1_%s' % id)(h)\n\n with K.name_scope('block_1'):\n x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), weight_decay=weight_decay,\n id='reduction_left1_%s' % id)\n x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), weight_decay=weight_decay,\n id='reduction_1_%s' % id)\n x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % id)\n\n with K.name_scope('block_2'):\n x2_1 = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_left2_%s' % id)(h)\n x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), weight_decay=weight_decay,\n id='reduction_right2_%s' % id)\n x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % id)\n\n with K.name_scope('block_3'):\n x3_1 = AveragePooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_left3_%s' % id)(h)\n x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), weight_decay=weight_decay,\n id='reduction_right3_%s' % id)\n x3 = add([x3_1, x3_2], name='reduction_add3_%s' % id)\n\n with K.name_scope('block_4'):\n x4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='reduction_left4_%s' % id)(x1)\n x4 = add([x2, x4])\n\n with K.name_scope('block_5'):\n x5_1 = _separable_conv_block(x1, filters, (3, 3), weight_decay=weight_decay, id='reduction_left4_%s' % id)\n x5_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_right5_%s' % id)(h)\n x5 = add([x5_1, x5_2], name='reduction_add4_%s' % id)\n\n x = concatenate([x2, x3, x4, x5], axis=channel_dim, name='reduction_concat_%s' % id)\n return x, ip\n\n\ndef _add_auxiliary_head(x, classes, weight_decay):\n img_height = 1 if K.image_data_format() == 'channels_last' else 2\n img_width = 2 if K.image_data_format() == 'channels_last' else 3\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('auxiliary_branch'):\n auxiliary_x = Activation('relu')(x)\n auxiliary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxiliary_x)\n auxiliary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection',\n kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(auxiliary_x)\n auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name='aux_bn_projection')(auxiliary_x)\n auxiliary_x = Activation('relu')(auxiliary_x)\n\n auxiliary_x = Conv2D(768, (auxiliary_x._keras_shape[img_height], auxiliary_x._keras_shape[img_width]),\n padding='valid', use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=l2(weight_decay), name='aux_conv_reduction')(auxiliary_x)\n auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name='aux_bn_reduction')(auxiliary_x)\n auxiliary_x = Activation('relu')(auxiliary_x)\n\n auxiliary_x = GlobalAveragePooling2D()(auxiliary_x)\n auxiliary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay),\n name='aux_predictions')(auxiliary_x)\n return auxiliary_x\n\n\ndef NASNet(input_shape=None,\n penultimate_filters=4032,\n nb_blocks=6,\n stem_filters=96,\n skip_reduction=True,\n use_auxiliary_branch=False,\n filters_multiplier=2,\n dropout=0.5,\n weight_decay=5e-5,\n include_top=True,\n weights=None,\n input_tensor=None,\n pooling=None,\n classes=1000,\n default_size=None):\n if K.backend() != 'tensorflow':\n raise RuntimeError('Only Tensorflow backend is currently supported, '\n 'as other backends do not support '\n 'separable convolution.')\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as ImageNet with `include_top` '\n 'as true, `classes` should be 1000')\n\n if default_size is None:\n default_size = 331\n\n K.set_image_data_format('channels_last')\n old_data_format = 'channels_first'\n\n img_input = Input(shape=input_shape)\n\n assert penultimate_filters % 24 == 0, \"`penultimate_filters` needs to be divisible \" \\\n \"by 24.\"\n\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n filters = penultimate_filters // 24\n\n if not skip_reduction:\n x = Conv2D(stem_filters, (3, 3), strides=(2, 2), padding='valid', use_bias=False, name='stem_conv1',\n kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(img_input)\n else:\n x = Conv2D(stem_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, name='stem_conv1',\n kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(img_input)\n\n x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,\n name='stem_bn1')(x)\n\n p = None\n if not skip_reduction: # imagenet / mobile mode\n x, p = _reduction_A(x, p, filters // (filters_multiplier ** 2), weight_decay, id='stem_1')\n x, p = _reduction_A(x, p, filters // filters_multiplier, weight_decay, id='stem_2')\n\n for i in range(nb_blocks):\n x, p = _normal_A(x, p, filters, weight_decay, id='%d' % (i))\n\n x, p0 = _reduction_A(x, p, filters * filters_multiplier, weight_decay, id='reduce_%d' % (nb_blocks))\n\n p = p0 if not skip_reduction else p\n\n for i in range(nb_blocks):\n x, p = _normal_A(x, p, filters * filters_multiplier, weight_decay, id='%d' % (nb_blocks + i + 1))\n\n auxiliary_x = None\n if not skip_reduction: # imagenet / mobile mode\n if use_auxiliary_branch:\n auxiliary_x = _add_auxiliary_head(x, classes, weight_decay)\n\n x, p0 = _reduction_A(x, p, filters * filters_multiplier ** 2, weight_decay, id='reduce_%d' % (2 * nb_blocks))\n\n if skip_reduction: # CIFAR mode\n if use_auxiliary_branch:\n auxiliary_x = _add_auxiliary_head(x, classes, weight_decay)\n\n p = p0 if not skip_reduction else p\n\n for i in range(nb_blocks):\n x, p = _normal_A(x, p, filters * filters_multiplier ** 2, weight_decay, id='%d' % (2 * nb_blocks + i + 1))\n\n x = Activation('relu')(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dropout(dropout)(x)\n x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay), name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n inputs = img_input\n\n # Create model.\n if use_auxiliary_branch:\n model = Model(inputs, [x, auxiliary_x], name='NASNet_with_auxiliary')\n else:\n model = Model(inputs, x, name='NASNet')\n\n if old_data_format:\n K.set_image_data_format(old_data_format)\n\n return model\n\n\ndef NASNetMobile(input_shape=(224, 224, 3),\n dropout=0.5,\n weight_decay=4e-5,\n use_auxiliary_branch=False,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n pooling=None,\n classes=1000):\n global _BN_DECAY, _BN_EPSILON\n _BN_DECAY = 0.9997\n _BN_EPSILON = 1e-3\n\n return NASNet(input_shape,\n penultimate_filters=1056,\n nb_blocks=4,\n stem_filters=32,\n skip_reduction=False,\n use_auxiliary_branch=use_auxiliary_branch,\n filters_multiplier=2,\n dropout=dropout,\n weight_decay=weight_decay,\n include_top=include_top,\n weights=weights,\n input_tensor=input_tensor,\n pooling=pooling,\n classes=classes,\n default_size=224)\n\n\n# Model from https://github.com/titu1994/neural-image-assessment/blob/master/utils/nasnet.py\nclass TestNASNetMobile(unittest.TestCase):\n\n def setUp(self):\n self.model_files = []\n\n def tearDown(self):\n for fl in self.model_files:\n os.remove(fl)\n\n @unittest.skipIf(test_level_0,\n \"Test level 0 only.\")\n def test_NASNetMobile(self):\n K.clear_session()\n keras_model = NASNetMobile()\n data = np.random.rand(2, 224, 224, 3).astype(np.float32)\n expected = keras_model.predict(data)\n onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)\n self.assertTrue(\n run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.concatenate", "numpy.split", "numpy.array", "numpy.zeros" ], [ "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fwitte/chp_orc
[ "509abf4faf2a5d08ef8311a0f2a8c75e1bbba95e" ]
[ "Optimization/app.py" ]
[ "# %%\n\n\nfrom CoolProp.CoolProp import PropsSI\nimport pygmo as pg\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom orc import ORC_without_ihe, CHPORC\nfrom tespy.components import HeatExchanger, Merge, Pump, Sink, Source, Splitter\nfrom tespy.components.heat_exchangers.condenser import Condenser\nfrom tespy.connections import Bus, Connection, Ref\nfrom tespy.networks import Network\nfrom opt import MultivariateOptimizationProblem\nimport json\nimport sys\nimport os\n\ndef variant_4(baseplant):\n\n # district heating system\n dh_return_temperature = 60\n dh_feed_temperature = 40\n dh_pressure = 5\n\n # components\n geo_splitter = Splitter(\"geo splitter\")\n geo_merge = Merge(\"geo merge\")\n\n # pump for district heating system?\n dh_source = Source(\"dh return\")\n dh_sink = Sink(\"dh feed\")\n dh_heat_exchanger = HeatExchanger(\"dh heat exchanger\")\n\n baseplant.nw.del_conns(*baseplant.nw.get_conn([\"22\", \"27\"]))\n\n c22 = Connection(baseplant.nw.get_comp(\"evaporator\"), \"out1\", geo_splitter, \"in1\", label=\"22\")\n\n # district heating\n c23 = Connection(geo_splitter, \"out1\", dh_heat_exchanger, \"in1\", label=\"23\")\n c24 = Connection(dh_heat_exchanger, \"out1\", geo_merge, \"in1\", label=\"24\")\n\n # orc\n c25 = Connection(geo_splitter, \"out2\", baseplant.nw.get_comp(\"preheater\"), \"in1\", label=\"25\")\n c26 = Connection(baseplant.nw.get_comp(\"preheater\"), \"out1\", geo_merge, \"in2\", label=\"26\")\n\n c27 = Connection(\n geo_merge, \"out1\", baseplant.nw.get_comp(\"geo re-injection\"), \"in1\", label=\"27\"\n )\n baseplant.nw.add_conns(c22, c23, c24, c25, c26, c27)\n\n # district heating\n c31 = Connection(dh_source, \"out1\", dh_heat_exchanger, \"in2\", label=\"31\")\n c32 = Connection(dh_heat_exchanger, \"out2\", dh_sink, \"in1\", label=\"32\")\n\n baseplant.nw.add_conns(c31, c32)\n\n # no pr1 required, parallel to preheater\n dh_heat_exchanger.set_attr(pr2=0.98)\n c31.set_attr(\n fluid={baseplant.working_fluid: 0, \"water\": 1}, T=dh_feed_temperature, p=dh_pressure\n )\n c32.set_attr(T=dh_return_temperature)\n\n # reinjection temperature specification\n c26.set_attr(T=70)\n c24.set_attr(T=70)\n\n # solve the network\n baseplant.nw.solve(\"design\")\n baseplant.nw.print_results()\n\n\ndef variant_3(nw):\n\n # district heating system\n dh_return_temperature = 60\n dh_feed_temperature = 40\n dh_pressure = 5\n\n # components\n geo_splitter = Splitter(\"geo splitter\")\n geo_merge = Merge(\"geo merge\")\n\n # pump for district heating system?\n dh_source = Source(\"dh return\")\n dh_sink = Sink(\"dh feed\")\n dh_heat_exchanger1 = HeatExchanger(\"dh heat exchanger 1\")\n dh_heat_exchanger2 = HeatExchanger(\"dh heat exchanger 2\")\n\n nw.del_conns(*nw.get_conn([\"21\", \"27\"]))\n\n c21_0 = Connection(\n nw.get_comp(\"geo source\"), \"out1\", geo_splitter, \"in1\", label=\"21_0\"\n )\n c21_1 = Connection(\n geo_splitter, \"out1\", nw.get_comp(\"evaporator\"), \"in1\", label=\"21_1\"\n )\n c23 = Connection(geo_splitter, \"out2\", dh_heat_exchanger2, \"in1\", label=\"23\")\n\n # district heating\n c24 = Connection(dh_heat_exchanger2, \"out1\", geo_merge, \"in1\", label=\"24\")\n c25 = Connection(\n nw.get_comp(\"preheater\"), \"out1\", dh_heat_exchanger1, \"in1\", label=\"25\"\n )\n c26 = Connection(dh_heat_exchanger1, \"out1\", geo_merge, \"in2\", label=\"26\")\n\n c27 = Connection(\n geo_merge, \"out1\", nw.get_comp(\"geo re-injection\"), \"in1\", label=\"27\"\n )\n nw.add_conns(c21_0, c21_1, c23, c24, c25, c26, c27)\n\n # district heating\n c31 = Connection(dh_source, \"out1\", dh_heat_exchanger1, \"in2\", label=\"31\")\n c32 = Connection(dh_heat_exchanger1, \"out2\", dh_heat_exchanger2, \"in2\", label=\"32\")\n c33 = Connection(dh_heat_exchanger2, \"out2\", dh_sink, \"in1\", label=\"33\")\n\n nw.add_conns(c31, c32, c33)\n\n dh_heat_exchanger1.set_attr(pr1=0.98, pr2=0.98)\n # no pr1 required, parallel to ORC/dh_heat_exchanger1\n dh_heat_exchanger2.set_attr(pr2=0.98)\n c21_0.set_attr(fluid={working_fluid: 0, \"water\": 1}, T=100, p=25, m=10)\n c31.set_attr(\n fluid={working_fluid: 0, \"water\": 1}, T=dh_feed_temperature, p=dh_pressure\n )\n c32.set_attr(T=(dh_feed_temperature + dh_return_temperature) / 2)\n c33.set_attr(T=dh_return_temperature)\n\n # reinjection temperature specification\n c26.set_attr(T=70)\n c24.set_attr(T=70)\n\n # solve the network\n nw.solve(\"design\")\n\n P = []\n Q = []\n T_range = [42, 44, 46, 48, 50, 52, 54, 56, 58]\n for T in T_range:\n c32.set_attr(T=T)\n nw.solve(\"design\")\n P += [abs(nw.get_comp(\"turbine\").P.val)]\n Q += [abs(dh_heat_exchanger1.Q.val + dh_heat_exchanger2.Q.val)]\n\n fig, ax = plt.subplots(2, 1)\n ax[0].plot(T_range, P)\n ax[0].grid()\n ax[0].set_ylabel(\"Turbine power\")\n ax[1].plot(T_range, Q)\n ax[1].grid()\n ax[1].set_xlabel(\"Temperature between heat exchangers\")\n ax[1].set_ylabel(\"District heating system heat\")\n fig.savefig(working_fluid + \".png\")\n plt.close()\n\n# create base plant and supply functionalities\nplant = CHPORC(\"R134a\")\n# modify the plant structure\nvariant_4(plant)\n# solve mode with specified parameters\nplant.nw.print_results()\n\n\n# make a trivial test:\n# -(un)specifiy some boundary conditions\n# -set some connection and component variables\n# -set a lower limit constraint\n\nwith open(sys.argv[1], 'r') as f:\n input_data = json.load(f)\n f.close()\n\nboundary_conditions = input_data['boundary_conditions']\nvariables = input_data['variables']\nconstraints = input_data['constraints']\nobjective = input_data['objective']\n\nplant.set_params(**boundary_conditions)\n\nnum_gen = input_data['num_gen']\nnum_ind = input_data['num_ind']\n# this should be outside of the optimitzation class\n\noptimize = MultivariateOptimizationProblem(plant, variables, constraints, objective)\n\n# this must be outside of\nalgo = pg.ihs(gen=num_gen)\noptimize.run(algo, num_ind, num_gen)\n\nprint(optimize.individuals)\n\npath = input_data['scenario_name'] + '/'\n\nif not os.path.isdir(path):\n os.mkdir(path)\n\noptimize.individuals.to_csv(input_data['scenario_name'] + '/result.csv')\n# write optimization instance data to json file for postprocessing\n\nvariables_labels = {}\nfor obj, data in optimize.variables.items():\n for label, params in data.items():\n for param in params:\n variables_labels[obj + '-' + label + '-' + param] = param + ' at ' + obj + ' ' + label\n\n\nwith open(input_data['scenario_name'] + '/problem.json', 'w') as f:\n output = {\n key + \"_list\": optimize.__dict__[key + \"_list\"]\n for key in [\"constraint\", \"variable\", \"objective\"]\n }\n output.update(variables_labels)\n f.write(json.dumps(output))\n f.close()" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
willxujun/tensorflow
[ "5c31a9c4a8aa94d2f41c60880bb3ca699c23328c" ]
[ "tensorflow/compiler/aot/ex2/make_graph.py" ]
[ "import argparse\nimport os\nimport sys\n\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nFLAGS = None\n\ndef mlp(_):\n # Parameters\n learning_rate = 0.1\n num_steps = 500\n batch_size = 128\n display_step = 100\n # Network Parameters\n n_hidden_1 = 256 # 1st layer number of neurons\n n_hidden_2 = 256 # 2nd layer number of neurons\n num_input = 784 # MNIST data input (img shape: 28*28)\n num_classes = 10 # MNIST total classes (0-9 digits)\n # tf Graph input\n X = tf.placeholder(\"float\", [None, num_input])\n Y = tf.placeholder(\"float\", [None, num_classes])\n # Store layers weight & bias\n weights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))\n }\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n }\n # Create model\n def neural_net(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n # Construct model\n logits = neural_net(X)\n prediction = tf.nn.softmax(logits)\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op)\n\n # Evaluate model\n correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n # Start training\n with tf.Session() as sess:\n # Run the initializer\n sess.run(init)\n for step in range(1, num_steps+1):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n print(\"Optimization Finished!\")\n # Calculate accuracy for MNIST test images\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={X: mnist.test.images,\n Y: mnist.test.labels}))\n\ndef write_graph(build_graph, out_dir):\n \"\"\"Build a graph using build_graph and write it out.\"\"\"\n g = ops.Graph()\n with g.as_default():\n build_graph(out_dir)\n filename = os.path.join(out_dir, 'test_graph_%s.pb' % build_graph.__name__)\n with open(filename, 'wb') as f:\n f.write(g.as_graph_def().SerializeToString())\n\ndef main(_):\n mlp(0)\n\n # launch the default graph\n sess = tf.Session()\n\n writer = tf.summary.FileWriter('vis', sess.graph)\n\n write_graph(mlp, FLAGS.out_dir)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument(\n '--out_dir',\n type=str,\n default='',\n help='Output directory for graphs, checkpoints and savers.')\n FLAGS, unparsed = parser.parse_known_args()\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.python.platform.app.run", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.softmax", "tensorflow.summary.FileWriter", "tensorflow.matmul", "tensorflow.python.framework.ops.Graph", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
thefullstackninja/effective_data_visualization_using_plotly_express
[ "043225b9a4e2333709df19be64475d8ed003daa3", "043225b9a4e2333709df19be64475d8ed003daa3" ]
[ "pie_charts/basic_pie_chart_tips_by_gender.py", "scatterplots/basic_iris_scatterplots.py" ]
[ "### Case study Distribution of tips by gender\n\nimport pandas as pd\nimport plotly.express as px\n\n\ndf = pd.read_csv(\"../data/tips.csv\")\n\nplot = px.pie(\n data_frame=df,\n values='tip',\n names='sex',\n title=\"Case study Distribution of tips by gender\"\n \n)\n\nplot.show()", "import plotly.express as px\nimport pandas as pd\n\n# import dataset\ndf = pd.read_csv(\"../data/iris.csv\")\n\nprint(df.columns)\n\nscatterplot = px.scatter(data_frame=df,\n x='SepalWidthCm',\n y='PetalLengthCm',\n size='PetalWidthCm',\n title=\"Plot of Sepal width vs Petal length for the Iris flower using Petal width as the size parameter\",\n color='Species')\n\nscatterplot.show()" ]
[ [ "pandas.read_csv" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
yasudakn/hmr
[ "6b7a9a4d1a312c0f93140d4d4752ab2d100a4ce3" ]
[ "src/tf_smpl/batch_lbs.py" ]
[ "\"\"\" Util functions for SMPL\n@@batch_skew\n@@batch_rodrigues\n@@batch_lrotmin\n@@batch_global_rigid_transformation\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef batch_skew(vec, batch_size=None):\n \"\"\"\n vec is N x 3, batch_size is int\n\n returns N x 3 x 3. Skew_sym version of each matrix.\n \"\"\"\n with tf.name_scope(\"batch_skew\", values=[vec]):\n if batch_size is None:\n batch_size = vec.shape.as_list()[0]\n col_inds = tf.constant([1, 2, 3, 5, 6, 7])\n indices = tf.reshape(\n tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,\n [-1, 1])\n updates = tf.reshape(\n tf.stack(\n [\n -vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],\n vec[:, 0]\n ],\n axis=1), [-1])\n out_shape = [batch_size * 9]\n res = tf.scatter_nd(indices, updates, out_shape)\n res = tf.reshape(res, [batch_size, 3, 3])\n\n return res\n\n\ndef batch_rodrigues(theta, name=None):\n \"\"\"\n Theta is N x 3\n \"\"\"\n with tf.name_scope(name, \"batch_rodrigues\", [theta]):\n batch_size = theta.shape.as_list()[0]\n\n # angle = tf.norm(theta, axis=1)\n # r = tf.expand_dims(tf.div(theta, tf.expand_dims(angle + 1e-8, -1)), -1)\n # angle = tf.expand_dims(tf.norm(theta, axis=1) + 1e-8, -1)\n angle = tf.expand_dims(tf.norm(theta + 1e-8, axis=1), -1)\n r = tf.expand_dims(tf.div(theta, angle), -1)\n\n angle = tf.expand_dims(angle, -1)\n cos = tf.cos(angle)\n sin = tf.sin(angle)\n\n outer = tf.matmul(r, r, transpose_b=True, name=\"outer\")\n\n eyes = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1])\n R = cos * eyes + (1 - cos) * outer + sin * batch_skew(\n r, batch_size=batch_size)\n return R\n\n\ndef batch_lrotmin(theta, name=None):\n \"\"\" NOTE: not used bc I want to reuse R and this is simple.\n Output of this is used to compute joint-to-pose blend shape mapping.\n Equation 9 in SMPL paper.\n\n\n Args:\n pose: `Tensor`, N x 72 vector holding the axis-angle rep of K joints.\n This includes the global rotation so K=24\n\n Returns\n diff_vec : `Tensor`: N x 207 rotation matrix of 23=(K-1) joints with identity subtracted.,\n \"\"\"\n with tf.name_scope(name, \"batch_lrotmin\", [theta]):\n with tf.name_scope(\"ignore_global\"):\n theta = theta[:, 3:]\n\n # N*23 x 3 x 3\n Rs = batch_rodrigues(tf.reshape(theta, [-1, 3]))\n lrotmin = tf.reshape(Rs - tf.eye(3), [-1, 207])\n\n return lrotmin\n\n\ndef batch_global_rigid_transformation(Rs, Js, parent, rotate_base=False):\n \"\"\"\n Computes absolute joint locations given pose.\n\n rotate_base: if True, rotates the global rotation by 90 deg in x axis.\n if False, this is the original SMPL coordinate.\n\n Args:\n Rs: N x 24 x 3 x 3 rotation vector of K joints\n Js: N x 24 x 3, joint locations before posing\n parent: 24 holding the parent id for each index\n\n Returns\n new_J : `Tensor`: N x 24 x 3 location of absolute joints\n A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.\n \"\"\"\n with tf.name_scope(\"batch_forward_kinematics\", values=[Rs, Js]):\n N = Rs.shape[0].value\n if rotate_base:\n print('Flipping the SMPL coordinate frame!!!!')\n rot_x = tf.constant(\n [[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=Rs.dtype)\n rot_x = tf.reshape(tf.tile(rot_x, [N, 1]), [N, 3, 3])\n root_rotation = tf.matmul(Rs[:, 0, :, :], rot_x)\n else:\n root_rotation = Rs[:, 0, :, :]\n\n # Now Js is N x 24 x 3 x 1\n Js = tf.expand_dims(Js, -1)\n\n def make_A(R, t, name=None):\n # Rs is N x 3 x 3, ts is N x 3 x 1\n with tf.name_scope(name, \"Make_A\", [R, t]):\n R_homo = tf.pad(R, [[0, 0], [0, 1], [0, 0]])\n t_homo = tf.concat([t, tf.ones([N, 1, 1])], 1)\n return tf.concat([R_homo, t_homo], 2)\n\n A0 = make_A(root_rotation, Js[:, 0])\n results = [A0]\n for i in range(1, parent.shape[0]):\n j_here = Js[:, i] - Js[:, parent[i]]\n A_here = make_A(Rs[:, i], j_here)\n res_here = tf.matmul(\n results[parent[i]], A_here, name=\"propA%d\" % i)\n results.append(res_here)\n\n # 10 x 24 x 4 x 4\n results = tf.stack(results, axis=1)\n\n new_J = results[:, :, :3, 3]\n\n # --- Compute relative A: Skinning is based on\n # how much the bone moved (not the final location of the bone)\n # but (final_bone - init_bone)\n # ---\n Js_w0 = tf.concat([Js, tf.zeros([N, 24, 1, 1])], 2)\n init_bone = tf.matmul(results, Js_w0)\n # Append empty 4 x 3:\n init_bone = tf.pad(init_bone, [[0, 0], [0, 0], [0, 0], [3, 0]])\n A = results - init_bone\n\n return new_J, A\n" ]
[ [ "tensorflow.matmul", "tensorflow.sin", "tensorflow.constant", "tensorflow.cos", "tensorflow.norm", "tensorflow.concat", "tensorflow.zeros", "tensorflow.stack", "tensorflow.range", "tensorflow.reshape", "tensorflow.scatter_nd", "tensorflow.expand_dims", "tensorflow.eye", "tensorflow.div", "tensorflow.ones", "tensorflow.name_scope", "tensorflow.pad", "tensorflow.tile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "1.0", "1.2" ] } ]
Parallel-in-Time/PararealF90
[ "a8318a79b92465a8a3cf775cc7fd096ff0494529" ]
[ "plot_solution.py" ]
[ "import sys\nsys.path.append('./scripts')\nfrom get_parameter import get_parameter\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\nnu, Nx, Ny, Nz, dt_fine, dt_coarse, Niter, Tend, do_io, be_verbose = get_parameter()\nsol = np.array([])\n\nfilename = \"q_final_fine.dat\"\nwith open(filename,'r') as fobj:\n while True:\n line = fobj.readline()\n if not line: break\n sol = np.append(sol, [float(line)])\n\nassert np.size(sol)==Nx*Ny*Nz, 'Length of solution does not match parameter... was probably generated with different setting.'\n\nsol.shape = ((Nx, Ny, Nz))\n\nx = np.linspace(0, 1, Nx)\ny = np.linspace(0, 1, Ny)\nxx, yy = np.meshgrid(x, y)\n\nfig = plt.figure(figsize=(8,8))\nax = fig.gca(projection='3d')\nax.view_init(elev=0., azim=-90.)\nsurf = ax.plot_surface(xx, yy, sol[:,:,0], rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)\nax.set_xlim(left = 0.0, right = 1.0)\nax.set_ylim(bottom = 0.0, top = 1.0)\n#ax.set_zlim(bottom = 0.0, top = 1.0)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()\n" ]
[ [ "numpy.linspace", "numpy.meshgrid", "matplotlib.pyplot.ylabel", "numpy.size", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IgorHoholko/metrics
[ "5510ccd99eaec5ab8175bbd5e2ad9e66e82d10e4", "5510ccd99eaec5ab8175bbd5e2ad9e66e82d10e4" ]
[ "torchmetrics/regression/psnr.py", "torchmetrics/functional/classification/stat_scores.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.psnr import _psnr_compute, _psnr_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass PSNR(Metric):\n r\"\"\"\n Computes `peak signal-to-noise ratio <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_ (PSNR):\n\n .. math:: \\text{PSNR}(I, J) = 10 * \\log_{10} \\left(\\frac{\\max(I)^2}{\\text{MSE}(I, J)}\\right)\n\n Where :math:`\\text{MSE}` denotes the `mean-squared-error\n <https://en.wikipedia.org/wiki/Mean_squared_error>`_ function.\n\n Args:\n data_range:\n the range of the data. If None, it is determined from the data (max - min).\n The ``data_range`` must be given when ``dim`` is not None.\n base: a base of a logarithm to use (default: 10)\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'``: no reduction will be applied\n\n dim:\n Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is\n None meaning scores will be reduced across all dimensions and all batches.\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Raises:\n ValueError:\n If ``dim`` is not ``None`` and ``data_range`` is not given.\n\n Example:\n >>> from torchmetrics import PSNR\n >>> psnr = PSNR()\n >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\n >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])\n >>> psnr(preds, target)\n tensor(2.5527)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n\n def __init__(\n self,\n data_range: Optional[float] = None,\n base: float = 10.0,\n reduction: str = 'elementwise_mean',\n dim: Optional[Union[int, Tuple[int, ...]]] = None,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n\n if dim is None and reduction != 'elementwise_mean':\n rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')\n\n if dim is None:\n self.add_state(\"sum_squared_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n else:\n self.add_state(\"sum_squared_error\", default=[])\n self.add_state(\"total\", default=[])\n\n if data_range is None:\n if dim is not None:\n # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to\n # calculate `data_range` in the future.\n raise ValueError(\"The `data_range` must be given when `dim` is not None.\")\n\n self.data_range = None\n self.add_state(\"min_target\", default=tensor(0.0), dist_reduce_fx=torch.min)\n self.add_state(\"max_target\", default=tensor(0.0), dist_reduce_fx=torch.max)\n else:\n self.add_state(\"data_range\", default=tensor(float(data_range)), dist_reduce_fx='mean')\n self.base = base\n self.reduction = reduction\n self.dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n def update(self, preds: Tensor, target: Tensor):\n \"\"\"\n Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)\n if self.dim is None:\n if self.data_range is None:\n # keep track of min and max target values\n self.min_target = min(target.min(), self.min_target)\n self.max_target = max(target.max(), self.max_target)\n\n self.sum_squared_error += sum_squared_error\n self.total += n_obs\n else:\n self.sum_squared_error.append(sum_squared_error)\n self.total.append(n_obs)\n\n def compute(self):\n \"\"\"\n Compute peak signal-to-noise ratio over state.\n \"\"\"\n if self.data_range is not None:\n data_range = self.data_range\n else:\n data_range = self.max_target - self.min_target\n\n if self.dim is None:\n sum_squared_error = self.sum_squared_error\n total = self.total\n else:\n sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])\n total = torch.cat([values.flatten() for values in self.total])\n return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.utilities import _deprecation_warn_arg_is_multiclass\nfrom torchmetrics.utilities.checks import _input_format_classification\n\n\ndef _del_column(tensor: Tensor, index: int):\n \"\"\" Delete the column at index.\"\"\"\n\n return torch.cat([tensor[:, :index], tensor[:, (index + 1):]], 1)\n\n\ndef _stat_scores(\n preds: Tensor,\n target: Tensor,\n reduce: str = \"micro\",\n) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Calculate the number of tp, fp, tn, fn.\n\n Args:\n preds:\n An ``(N, C)`` or ``(N, C, X)`` tensor of predictions (0 or 1)\n target:\n An ``(N, C)`` or ``(N, C, X)`` tensor of true labels (0 or 1)\n reduce:\n One of ``'micro'``, ``'macro'``, ``'samples'``\n\n Return:\n Returns a list of 4 tensors; tp, fp, tn, fn.\n The shape of the returned tensors depnds on the shape of the inputs\n and the ``reduce`` parameter:\n\n If inputs are of the shape ``(N, C)``, then\n - If ``reduce='micro'``, the returned tensors are 1 element tensors\n - If ``reduce='macro'``, the returned tensors are ``(C,)`` tensors\n - If ``reduce'samples'``, the returned tensors are ``(N,)`` tensors\n\n If inputs are of the shape ``(N, C, X)``, then\n - If ``reduce='micro'``, the returned tensors are ``(N,)`` tensors\n - If ``reduce='macro'``, the returned tensors are ``(N,C)`` tensors\n - If ``reduce='samples'``, the returned tensors are ``(N,X)`` tensors\n \"\"\"\n if reduce == \"micro\":\n dim = [0, 1] if preds.ndim == 2 else [1, 2]\n elif reduce == \"macro\":\n dim = 0 if preds.ndim == 2 else 2\n elif reduce == \"samples\":\n dim = 1\n\n true_pred, false_pred = target == preds, target != preds\n pos_pred, neg_pred = preds == 1, preds == 0\n\n tp = (true_pred * pos_pred).sum(dim=dim)\n fp = (false_pred * pos_pred).sum(dim=dim)\n\n tn = (true_pred * neg_pred).sum(dim=dim)\n fn = (false_pred * neg_pred).sum(dim=dim)\n\n return tp.long(), fp.long(), tn.long(), fn.long()\n\n\ndef _stat_scores_update(\n preds: Tensor,\n target: Tensor,\n reduce: str = \"micro\",\n mdmc_reduce: Optional[str] = None,\n num_classes: Optional[int] = None,\n top_k: Optional[int] = None,\n threshold: float = 0.5,\n multiclass: Optional[bool] = None,\n ignore_index: Optional[int] = None,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n\n preds, target, _ = _input_format_classification(\n preds, target, threshold=threshold, num_classes=num_classes, multiclass=multiclass, top_k=top_k\n )\n\n if ignore_index is not None and not 0 <= ignore_index < preds.shape[1]:\n raise ValueError(f\"The `ignore_index` {ignore_index} is not valid for inputs with {preds.shape[0]} classes\")\n\n if ignore_index is not None and preds.shape[1] == 1:\n raise ValueError(\"You can not use `ignore_index` with binary data.\")\n\n if preds.ndim == 3:\n if not mdmc_reduce:\n raise ValueError(\n \"When your inputs are multi-dimensional multi-class, you have to set the `mdmc_reduce` parameter\"\n )\n if mdmc_reduce == \"global\":\n preds = torch.transpose(preds, 1, 2).reshape(-1, preds.shape[1])\n target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1])\n\n # Delete what is in ignore_index, if applicable (and classes don't matter):\n if ignore_index is not None and reduce != \"macro\":\n preds = _del_column(preds, ignore_index)\n target = _del_column(target, ignore_index)\n\n tp, fp, tn, fn = _stat_scores(preds, target, reduce=reduce)\n\n # Take care of ignore_index\n if ignore_index is not None and reduce == \"macro\":\n tp[..., ignore_index] = -1\n fp[..., ignore_index] = -1\n tn[..., ignore_index] = -1\n fn[..., ignore_index] = -1\n\n return tp, fp, tn, fn\n\n\ndef _stat_scores_compute(tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> Tensor:\n\n outputs = [\n tp.unsqueeze(-1),\n fp.unsqueeze(-1),\n tn.unsqueeze(-1),\n fn.unsqueeze(-1),\n tp.unsqueeze(-1) + fn.unsqueeze(-1), # support\n ]\n outputs = torch.cat(outputs, -1)\n outputs = torch.where(outputs < 0, tensor(-1, device=outputs.device), outputs)\n\n return outputs\n\n\ndef stat_scores(\n preds: Tensor,\n target: Tensor,\n reduce: str = \"micro\",\n mdmc_reduce: Optional[str] = None,\n num_classes: Optional[int] = None,\n top_k: Optional[int] = None,\n threshold: float = 0.5,\n multiclass: Optional[bool] = None,\n ignore_index: Optional[int] = None,\n is_multiclass: Optional[bool] = None, # todo: deprecated, remove in v0.4\n) -> Tensor:\n \"\"\"Computes the number of true positives, false positives, true negatives, false negatives.\n Related to `Type I and Type II errors <https://en.wikipedia.org/wiki/Type_I_and_type_II_errors>`__\n and the `confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion>`__.\n\n The reduction method (how the statistics are aggregated) is controlled by the\n ``reduce`` parameter, and additionally by the ``mdmc_reduce`` parameter in the\n multi-dimensional multi-class case. Accepts all inputs listed in :ref:`references/modules:input types`.\n\n Args:\n preds: Predictions from model (probabilities or labels)\n target: Ground truth values\n threshold:\n Threshold probability value for transforming probability predictions to binary\n (0 or 1) predictions, in the case of binary or multi-label inputs.\n\n top_k:\n Number of highest probability entries for each sample to convert to 1s - relevant\n only for inputs with probability predictions. If this parameter is set for multi-label\n inputs, it will take precedence over ``threshold``. For (multi-dim) multi-class inputs,\n this parameter defaults to 1.\n\n Should be left unset (``None``) for inputs with label predictions.\n\n reduce:\n Defines the reduction that is applied. Should be one of the following:\n\n - ``'micro'`` [default]: Counts the statistics by summing over all [sample, class]\n combinations (globally). Each statistic is represented by a single integer.\n - ``'macro'``: Counts the statistics for each class separately (over all samples).\n Each statistic is represented by a ``(C,)`` tensor. Requires ``num_classes``\n to be set.\n - ``'samples'``: Counts the statistics for each sample separately (over all classes).\n Each statistic is represented by a ``(N, )`` 1d tensor.\n\n .. note:: What is considered a sample in the multi-dimensional multi-class case\n depends on the value of ``mdmc_reduce``.\n\n num_classes:\n Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data.\n\n ignore_index:\n Specify a class (label) to ignore. If given, this class index does not contribute\n to the returned score, regardless of reduction method. If an index is ignored, and\n ``reduce='macro'``, the class statistics for the ignored class will all be returned\n as ``-1``.\n\n mdmc_reduce:\n Defines how the multi-dimensional multi-class inputs are handeled. Should be\n one of the following:\n\n - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional\n multi-class (see :ref:`references/modules:input types` for the definition of input types).\n\n - ``'samplewise'``: In this case, the statistics are computed separately for each\n sample on the ``N`` axis, and then the outputs are concatenated together. In each\n sample the extra axes ``...`` are flattened to become the sub-sample axis, and\n statistics for each sample are computed by treating the sub-sample axis as the\n ``N`` axis for that sample.\n\n - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs are\n flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they\n were ``(N_X, C)``. From here on the ``reduce`` parameter applies as usual.\n\n multiclass:\n Used only in certain special cases, where you want to treat inputs as a different type\n than what they appear to be. See the parameter's\n :ref:`documentation section <references/modules:using the multiclass parameter>`\n for a more detailed explanation and examples.\n is_multiclass:\n .. deprecated:: 0.3\n Argument will not have any effect and will be removed in v0.4, please use ``multiclass`` intead.\n\n Return:\n The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds\n to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The\n shape depends on the ``reduce`` and ``mdmc_reduce`` (in case of multi-dimensional\n multi-class data) parameters:\n\n - If the data is not multi-dimensional multi-class, then\n\n - If ``reduce='micro'``, the shape will be ``(5, )``\n - If ``reduce='macro'``, the shape will be ``(C, 5)``,\n where ``C`` stands for the number of classes\n - If ``reduce='samples'``, the shape will be ``(N, 5)``, where ``N`` stands for\n the number of samples\n\n - If the data is multi-dimensional multi-class and ``mdmc_reduce='global'``, then\n\n - If ``reduce='micro'``, the shape will be ``(5, )``\n - If ``reduce='macro'``, the shape will be ``(C, 5)``\n - If ``reduce='samples'``, the shape will be ``(N*X, 5)``, where ``X`` stands for\n the product of sizes of all \"extra\" dimensions of the data (i.e. all dimensions\n except for ``C`` and ``N``)\n\n - If the data is multi-dimensional multi-class and ``mdmc_reduce='samplewise'``, then\n\n - If ``reduce='micro'``, the shape will be ``(N, 5)``\n - If ``reduce='macro'``, the shape will be ``(N, C, 5)``\n - If ``reduce='samples'``, the shape will be ``(N, X, 5)``\n\n Raises:\n ValueError:\n If ``reduce`` is none of ``\"micro\"``, ``\"macro\"`` or ``\"samples\"``.\n ValueError:\n If ``mdmc_reduce`` is none of ``None``, ``\"samplewise\"``, ``\"global\"``.\n ValueError:\n If ``reduce`` is set to ``\"macro\"`` and ``num_classes`` is not provided.\n ValueError:\n If ``num_classes`` is set\n and ``ignore_index`` is not in the range ``[0, num_classes)``.\n ValueError:\n If ``ignore_index`` is used with ``binary data``.\n ValueError:\n If inputs are ``multi-dimensional multi-class`` and ``mdmc_reduce`` is not provided.\n\n Example:\n >>> from torchmetrics.functional import stat_scores\n >>> preds = torch.tensor([1, 0, 2, 1])\n >>> target = torch.tensor([1, 1, 2, 0])\n >>> stat_scores(preds, target, reduce='macro', num_classes=3)\n tensor([[0, 1, 2, 1, 1],\n [1, 1, 1, 1, 2],\n [1, 0, 3, 0, 1]])\n >>> stat_scores(preds, target, reduce='micro')\n tensor([2, 2, 6, 2, 4])\n \"\"\"\n multiclass = _deprecation_warn_arg_is_multiclass(is_multiclass, multiclass)\n\n if reduce not in [\"micro\", \"macro\", \"samples\"]:\n raise ValueError(f\"The `reduce` {reduce} is not valid.\")\n\n if mdmc_reduce not in [None, \"samplewise\", \"global\"]:\n raise ValueError(f\"The `mdmc_reduce` {mdmc_reduce} is not valid.\")\n\n if reduce == \"macro\" and (not num_classes or num_classes < 1):\n raise ValueError(\"When you set `reduce` as 'macro', you have to provide the number of classes.\")\n\n if num_classes and ignore_index is not None and (not 0 <= ignore_index < num_classes or num_classes == 1):\n raise ValueError(f\"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes\")\n\n tp, fp, tn, fn = _stat_scores_update(\n preds,\n target,\n reduce=reduce,\n mdmc_reduce=mdmc_reduce,\n top_k=top_k,\n threshold=threshold,\n num_classes=num_classes,\n multiclass=multiclass,\n ignore_index=ignore_index,\n )\n return _stat_scores_compute(tp, fp, tn, fn)\n" ]
[ [ "torch.tensor" ], [ "torch.tensor", "torch.transpose", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
baijianhua/pymath
[ "a96ebbd8c8ac646c436d8bf33cb01764a948255d", "a96ebbd8c8ac646c436d8bf33cb01764a948255d", "a96ebbd8c8ac646c436d8bf33cb01764a948255d", "a96ebbd8c8ac646c436d8bf33cb01764a948255d" ]
[ "bak/coord.py", "metrics_tensor/matrix_transform_invert.py", "bak/transform1.py", "common/common.py" ]
[ "#用python绘制坐标\n#https://matplotlib.org/examples/axes_grid/demo_axisline_style.html\n#https://stackoverflow.com/questions/13430231/how-i-can-get-cartesian-coordinate-system-in-matplotlib\n#https://stackoverflow.com/questions/50798265/what-is-subplotzero-documentation-lacking\n\n# notice import as 和 from import有什么区别?\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom bak.basic_units import cm\n\nfig: Figure = plt.figure()\n# notice 得到并设置坐标坐标系\nax: Axes = fig.subplots()\nax.set_title('x axis spine at zero data coordinate')\nax.set_xlabel(\"Axes zero\")\nax.set_ylabel(\"Y\")\nax.axis()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.set_xlim(-3, 10)\nax.set_ylim(-3, 10)\n\n# todo 设定坐标轴的样式和刻度\nyAxis: Axes = ax.spines['left']\nyAxis.set_position(('data', 0))\nxAxis: Axes = ax.spines['bottom']\nxAxis.set_position(('data', 0))\n#xAxis.set_axisline_style(\"-|>\")\n\n# notice 设定x的范围\nx = np.arange(-1, 3, 0.01)\n# notice 绘制图形\nax.plot(x, 2*x, xunits=cm, yunits=cm)\n\nplt.show()\n", "from numpy import mat\n\n'''\n测试A的转置的逆与A的逆的转置是否相等\n'''\nm = mat([[2, 3, 1],\n [4, 5, 6],\n [7, 8, 9],\n ])\n\nprint(m.T.I)\nprint(m.I.T)\n", "#https://codereview.stackexchange.com/questions/184687/animation-of-linear-transformations\n\nimport pygame\nfrom pygame.locals import *\nimport time\nimport numpy\n\nWIDTH, HEIGHT = 600, 600\nUNIT = 100\nSTEP = 0.01\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\ndef transform_point( x_y , a, b, c, d):\n return (a*x_y[0] + b*x_y[1], c*x_y[0] + d*x_y[1])\n\ndef transform_point_basis( x_y, e1x_e1y, e2x_e2y ):\n return transform_point( x_y, e1x_e1y[0], e2x_e2y[0], e1x_e1y[1], e2x_e2y[1])\n\n# In case you prefer just points, no lines\n#points = [ [(x, y) for x in range(-WIDTH//2, WIDTH//2, UNIT ) ] for y in range(-HEIGHT//2, HEIGHT//2, UNIT) ]\n\n# Builds the grid lines.\n# Weird range for computer to cartesian coordinates\npoints = [ [(x, y) for x in range(-WIDTH//2, WIDTH//2) if x % UNIT == 0 or y % UNIT == 0] \\\n for y in range(-HEIGHT//2, HEIGHT//2) ]\n\ndef twod_map(f, xss):\n return [ [f(item) for item in xs] for xs in xss]\n\ndef color_bases( x_y ):\n \"\"\"\n Colors the 1-st canonical base (0,1) red,\n The 2-nd canonical base (1,0) green\n \"\"\"\n if ( distance_from_o( x_y ) < UNIT and x_y[1] == 0 and x_y[0] > 0):\n return (255, 0, 0)\n if ( distance_from_o( x_y ) < UNIT and x_y[0] == 0 and x_y[1] >0):\n return (0, 255 ,0)\n return (255, 255, 255)\n\ndef bright_by_distance( x_y ):\n return (255 - distance_from_o( x_y ) // 3 % 256, \\\n 255 - distance_from_o( x_y ) // 3 % 256, \\\n 255 - distance_from_o( x_y ) // 3 % 256)\n\n\ndef color_up_right( x_y ):\n \"\"\"\n The most right a point was in the original state, the red-der it is.\n The most height a point was in the original state, the green-er it is.\n Does not work for size > 3*255.\n \"\"\"\n return (int(x_y[0] + WIDTH//2)//3%255, int(-x_y[1] + HEIGHT//2)//3%255, 0)\n\ndef main(final_coefficients, color_func=color_bases):\n for percentage in numpy.arange(0, 1, STEP):\n final_a, final_b, final_c, final_d = final_coefficients\n\n # In identity matrix, a and d start at one and c and c start from 0\n # In fact transform_point( point , 1, 0, 0, 1) = point\n # So to represent the transformation a and d must start\n # similar to 1 and become more and more similar to the final\n a = 1 * ( (1 - percentage) ) + percentage * final_a\n d = 1 * ( (1 - percentage) ) + percentage * final_d\n b = percentage * final_b\n c = percentage * final_c\n\n koefficients = (a,b,c,d) #map(lambda k: float(k) * (float(percentage)) , final_coefficients)\n show_points( twod_map(lambda p: transform_point(p, a,b,c,d), points), points, color_func)\n\n # Be sure final state is precise\n show_points( twod_map(lambda p: transform_point(p, *final_coefficients), points), points, color_func)\n\ndef main_basis(base_effect1, base_effect2, color_func=color_bases):\n final_coefficients =base_effect1[0], base_effect2[0], base_effect1[1], base_effect2[1]\n main(final_coefficients, color_func=color_func)\n\ndef distance_from_o(p):\n return int ( (p[0]**2 + p[1]**2)**0.5 )\n\ndef to_cartesian( x_y, width=WIDTH, height=HEIGHT ):\n return int(x_y[0] + width//2), int(-x_y[1] + height//2)\n\ndef draw_basic_grid(screen, grid):\n pygame.display.flip()\n screen.fill( (0,0,0) )\n for l in grid:\n for p in l:\n coords = to_cartesian(p)\n screen.set_at( coords, (50, 50, 50))\n\ndef show_points(points, originals, color_func=color_bases):\n\n draw_basic_grid(screen, originals)\n\n # Original points are needed for coloring.\n for (line, lineorig) in zip(points, originals):\n for (point, original) in zip(line, lineorig):\n screen.set_at( to_cartesian(point), \\\n color_func( (original) ))\n\n\nmain_basis( (-2, 1),\n (-1, 2),\n color_func = color_bases)", "from numpy import mat\nfrom numpy.core._multiarray_umath import array\n\n\ndef get_column_from_matrix(m: mat, c: int) -> array:\n \"\"\"\n 取矩阵的某一列\n :param m:\n :param c:\n :return:\n \"\"\"\n return array([m[0, c], m[1, c]])\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.mat" ], [ "numpy.arange" ], [ "numpy.core._multiarray_umath.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
manaccac/sc2_bot
[ "3aa8b3711378b71fd0a44662cdd7148846e39530", "3aa8b3711378b71fd0a44662cdd7148846e39530", "3aa8b3711378b71fd0a44662cdd7148846e39530" ]
[ "bot/venv/lib/python3.7/site-packages/scipy/ndimage/_ni_docstrings.py", "bot/venv/lib/python3.7/site-packages/scipy/linalg/decomp_cholesky.py", "bot/venv/lib/python3.7/site-packages/scipy/optimize/tests/test__differential_evolution.py" ]
[ "\"\"\"Docstring components common to several ndimage functions.\"\"\"\nfrom scipy._lib import doccer\n\n__all__ = ['docfiller']\n\n\n_input_doc = (\n\"\"\"input : array_like\n The input array.\"\"\")\n_axis_doc = (\n\"\"\"axis : int, optional\n The axis of `input` along which to calculate. Default is -1.\"\"\")\n_output_doc = (\n\"\"\"output : array or dtype, optional\n The array in which to place the output, or the dtype of the\n returned array. By default an array of the same dtype as input\n will be created.\"\"\")\n_size_foot_doc = (\n\"\"\"size : scalar or tuple, optional\n See footprint, below. Ignored if footprint is given.\nfootprint : array, optional\n Either `size` or `footprint` must be defined. `size` gives\n the shape that is taken from the input array, at every element\n position, to define the input to the filter function.\n `footprint` is a boolean array that specifies (implicitly) a\n shape, but also which of the elements within this shape will get\n passed to the filter function. Thus ``size=(n,m)`` is equivalent\n to ``footprint=np.ones((n,m))``. We adjust `size` to the number\n of dimensions of the input array, so that, if the input array is\n shape (10,10,10), and `size` is 2, then the actual size used is\n (2,2,2). When `footprint` is given, `size` is ignored.\"\"\")\n_mode_doc = (\n\"\"\"mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the input array is extended\n beyond its boundaries. Default is 'reflect'. Behavior for each valid\n value is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_mode_multiple_doc = (\n\"\"\"mode : str or sequence, optional\n The `mode` parameter determines how the input array is extended\n when the filter overlaps a border. By passing a sequence of modes\n with length equal to the number of dimensions of the input array,\n different modes can be specified along each axis. Default value is\n 'reflect'. The valid values and their behavior is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_cval_doc = (\n\"\"\"cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0.\"\"\")\n_origin_doc = (\n\"\"\"origin : int, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right.\"\"\")\n_origin_multiple_doc = (\n\"\"\"origin : int or sequence, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right. By passing a sequence of origins with length equal to\n the number of dimensions of the input array, different shifts can\n be specified along each axis.\"\"\")\n_extra_arguments_doc = (\n\"\"\"extra_arguments : sequence, optional\n Sequence of extra positional arguments to pass to passed function.\"\"\")\n_extra_keywords_doc = (\n\"\"\"extra_keywords : dict, optional\n dict of extra keyword arguments to pass to passed function.\"\"\")\n_prefilter_doc = (\n\"\"\"prefilter : bool, optional\n Determines if the input array is prefiltered with `spline_filter`\n before interpolation. The default is True, which will create a\n temporary `float64` array of filtered values if `order > 1`. If\n setting this to False, the output will be slightly blurred if\n `order > 1`, unless the input is prefiltered, i.e. it is the result\n of calling `spline_filter` on the original input.\"\"\")\n\ndocdict = {\n 'input': _input_doc,\n 'axis': _axis_doc,\n 'output': _output_doc,\n 'size_foot': _size_foot_doc,\n 'mode': _mode_doc,\n 'mode_multiple': _mode_multiple_doc,\n 'cval': _cval_doc,\n 'origin': _origin_doc,\n 'origin_multiple': _origin_multiple_doc,\n 'extra_arguments': _extra_arguments_doc,\n 'extra_keywords': _extra_keywords_doc,\n 'prefilter': _prefilter_doc\n }\n\ndocfiller = doccer.filldoc(docdict)\n", "\"\"\"Cholesky decomposition functions.\"\"\"\n\nfrom numpy import asarray_chkfinite, asarray, atleast_2d\n\n# Local imports\nfrom .misc import LinAlgError, _datacopied\nfrom .lapack import get_lapack_funcs\n\n__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',\n 'cho_solve_banded']\n\n\ndef _cholesky(a, lower=False, overwrite_a=False, clean=True,\n check_finite=True):\n \"\"\"Common code for cholesky() and cho_factor().\"\"\"\n\n a1 = asarray_chkfinite(a) if check_finite else asarray(a)\n a1 = atleast_2d(a1)\n\n # Dimension check\n if a1.ndim != 2:\n raise ValueError('Input array needs to be 2D but received '\n 'a {}d-array.'.format(a1.ndim))\n # Squareness check\n if a1.shape[0] != a1.shape[1]:\n raise ValueError('Input array is expected to be square but has '\n 'the shape: {}.'.format(a1.shape))\n\n # Quick return for square empty array\n if a1.size == 0:\n return a1.copy(), lower\n\n overwrite_a = overwrite_a or _datacopied(a1, a)\n potrf, = get_lapack_funcs(('potrf',), (a1,))\n c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean)\n if info > 0:\n raise LinAlgError(\"%d-th leading minor of the array is not positive \"\n \"definite\" % info)\n if info < 0:\n raise ValueError('LAPACK reported an illegal value in {}-th argument'\n 'on entry to \"POTRF\".'.format(-info))\n return c, lower\n\n\ndef cholesky(a, lower=False, overwrite_a=False, check_finite=True):\n \"\"\"\n Compute the Cholesky decomposition of a matrix.\n\n Returns the Cholesky decomposition, :math:`A = L L^*` or\n :math:`A = U^* U` of a Hermitian positive-definite matrix A.\n\n Parameters\n ----------\n a : (M, M) array_like\n Matrix to be decomposed\n lower : bool, optional\n Whether to compute the upper- or lower-triangular Cholesky\n factorization. Default is upper-triangular.\n overwrite_a : bool, optional\n Whether to overwrite data in `a` (may improve performance).\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n c : (M, M) ndarray\n Upper- or lower-triangular Cholesky factor of `a`.\n\n Raises\n ------\n LinAlgError : if decomposition fails.\n\n Examples\n --------\n >>> from scipy.linalg import cholesky\n >>> a = np.array([[1,-2j],[2j,5]])\n >>> L = cholesky(a, lower=True)\n >>> L\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> L @ L.T.conj()\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n\n \"\"\"\n c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True,\n check_finite=check_finite)\n return c\n\n\ndef cho_factor(a, lower=False, overwrite_a=False, check_finite=True):\n \"\"\"\n Compute the Cholesky decomposition of a matrix, to use in cho_solve\n\n Returns a matrix containing the Cholesky decomposition,\n ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.\n The return value can be directly used as the first parameter to cho_solve.\n\n .. warning::\n The returned matrix also contains random data in the entries not\n used by the Cholesky decomposition. If you need to zero these\n entries, use the function `cholesky` instead.\n\n Parameters\n ----------\n a : (M, M) array_like\n Matrix to be decomposed\n lower : bool, optional\n Whether to compute the upper or lower triangular Cholesky factorization\n (Default: upper-triangular)\n overwrite_a : bool, optional\n Whether to overwrite data in a (may improve performance)\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n c : (M, M) ndarray\n Matrix whose upper or lower triangle contains the Cholesky factor\n of `a`. Other parts of the matrix contain random data.\n lower : bool\n Flag indicating whether the factor is in the lower or upper triangle\n\n Raises\n ------\n LinAlgError\n Raised if decomposition fails.\n\n See also\n --------\n cho_solve : Solve a linear set equations using the Cholesky factorization\n of a matrix.\n\n Examples\n --------\n >>> from scipy.linalg import cho_factor\n >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])\n >>> c, low = cho_factor(A)\n >>> c\n array([[3. , 1. , 0.33333333, 1.66666667],\n [3. , 2.44948974, 1.90515869, -0.27216553],\n [1. , 5. , 2.29330749, 0.8559528 ],\n [5. , 1. , 2. , 1.55418563]])\n >>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4)))\n True\n\n \"\"\"\n c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False,\n check_finite=check_finite)\n return c, lower\n\n\ndef cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True):\n \"\"\"Solve the linear equations A x = b, given the Cholesky factorization of A.\n\n Parameters\n ----------\n (c, lower) : tuple, (array, bool)\n Cholesky factorization of a, as given by cho_factor\n b : array\n Right-hand side\n overwrite_b : bool, optional\n Whether to overwrite data in b (may improve performance)\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n x : array\n The solution to the system A x = b\n\n See also\n --------\n cho_factor : Cholesky factorization of a matrix\n\n Examples\n --------\n >>> from scipy.linalg import cho_factor, cho_solve\n >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])\n >>> c, low = cho_factor(A)\n >>> x = cho_solve((c, low), [1, 1, 1, 1])\n >>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4))\n True\n\n \"\"\"\n (c, lower) = c_and_lower\n if check_finite:\n b1 = asarray_chkfinite(b)\n c = asarray_chkfinite(c)\n else:\n b1 = asarray(b)\n c = asarray(c)\n if c.ndim != 2 or c.shape[0] != c.shape[1]:\n raise ValueError(\"The factored matrix c is not square.\")\n if c.shape[1] != b1.shape[0]:\n raise ValueError(\"incompatible dimensions.\")\n\n overwrite_b = overwrite_b or _datacopied(b1, b)\n\n potrs, = get_lapack_funcs(('potrs',), (c, b1))\n x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b)\n if info != 0:\n raise ValueError('illegal value in %dth argument of internal potrs'\n % -info)\n return x\n\n\ndef cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True):\n \"\"\"\n Cholesky decompose a banded Hermitian positive-definite matrix\n\n The matrix a is stored in ab either in lower-diagonal or upper-\n diagonal ordered form::\n\n ab[u + i - j, j] == a[i,j] (if upper form; i <= j)\n ab[ i - j, j] == a[i,j] (if lower form; i >= j)\n\n Example of ab (shape of a is (6,6), u=2)::\n\n upper form:\n * * a02 a13 a24 a35\n * a01 a12 a23 a34 a45\n a00 a11 a22 a33 a44 a55\n\n lower form:\n a00 a11 a22 a33 a44 a55\n a10 a21 a32 a43 a54 *\n a20 a31 a42 a53 * *\n\n Parameters\n ----------\n ab : (u + 1, M) array_like\n Banded matrix\n overwrite_ab : bool, optional\n Discard data in ab (may enhance performance)\n lower : bool, optional\n Is the matrix in the lower form. (Default is upper form)\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n c : (u + 1, M) ndarray\n Cholesky factorization of a, in the same banded format as ab\n\n See also\n --------\n cho_solve_banded : Solve a linear set equations, given the Cholesky factorization\n of a banded hermitian.\n\n Examples\n --------\n >>> from scipy.linalg import cholesky_banded\n >>> from numpy import allclose, zeros, diag\n >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])\n >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)\n >>> A = A + A.conj().T + np.diag(Ab[2, :])\n >>> c = cholesky_banded(Ab)\n >>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :])\n >>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5)))\n True\n\n \"\"\"\n if check_finite:\n ab = asarray_chkfinite(ab)\n else:\n ab = asarray(ab)\n\n pbtrf, = get_lapack_funcs(('pbtrf',), (ab,))\n c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab)\n if info > 0:\n raise LinAlgError(\"%d-th leading minor not positive definite\" % info)\n if info < 0:\n raise ValueError('illegal value in %d-th argument of internal pbtrf'\n % -info)\n return c\n\n\ndef cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True):\n \"\"\"\n Solve the linear equations ``A x = b``, given the Cholesky factorization of\n the banded hermitian ``A``.\n\n Parameters\n ----------\n (cb, lower) : tuple, (ndarray, bool)\n `cb` is the Cholesky factorization of A, as given by cholesky_banded.\n `lower` must be the same value that was given to cholesky_banded.\n b : array_like\n Right-hand side\n overwrite_b : bool, optional\n If True, the function will overwrite the values in `b`.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n x : array\n The solution to the system A x = b\n\n See also\n --------\n cholesky_banded : Cholesky factorization of a banded matrix\n\n Notes\n -----\n\n .. versionadded:: 0.8.0\n\n Examples\n --------\n >>> from scipy.linalg import cholesky_banded, cho_solve_banded\n >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])\n >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)\n >>> A = A + A.conj().T + np.diag(Ab[2, :])\n >>> c = cholesky_banded(Ab)\n >>> x = cho_solve_banded((c, False), np.ones(5))\n >>> np.allclose(A @ x - np.ones(5), np.zeros(5))\n True\n\n \"\"\"\n (cb, lower) = cb_and_lower\n if check_finite:\n cb = asarray_chkfinite(cb)\n b = asarray_chkfinite(b)\n else:\n cb = asarray(cb)\n b = asarray(b)\n\n # Validate shapes.\n if cb.shape[-1] != b.shape[0]:\n raise ValueError(\"shapes of cb and b are not compatible.\")\n\n pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b))\n x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b)\n if info > 0:\n raise LinAlgError(\"%dth leading minor not positive definite\" % info)\n if info < 0:\n raise ValueError('illegal value in %dth argument of internal pbtrs'\n % -info)\n return x\n", "\"\"\"\nUnit tests for the differential global minimization algorithm.\n\"\"\"\nimport multiprocessing\nimport platform\n\nfrom scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,\n _ConstraintWrapper)\nfrom scipy.optimize import differential_evolution\nfrom scipy.optimize._constraints import (Bounds, NonlinearConstraint,\n LinearConstraint)\nfrom scipy.optimize import rosen\nfrom scipy.sparse import csr_matrix\nfrom scipy._lib._pep440 import Version\n\nimport numpy as np\nfrom numpy.testing import (assert_equal, assert_allclose,\n assert_almost_equal, assert_array_equal,\n assert_string_equal, assert_, suppress_warnings)\nfrom pytest import raises as assert_raises, warns\nimport pytest\n\n\nclass TestDifferentialEvolutionSolver(object):\n\n def setup_method(self):\n self.old_seterr = np.seterr(invalid='raise')\n self.limits = np.array([[0., 0.],\n [2., 2.]])\n self.bounds = [(0., 2.), (0., 2.)]\n\n self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,\n [(0, 100)])\n\n # dummy_solver2 will be used to test mutation strategies\n self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,\n [(0, 1)],\n popsize=7,\n mutation=0.5)\n # create a population that's only 7 members long\n # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T\n self.dummy_solver2.population = population\n\n def teardown_method(self):\n np.seterr(**self.old_seterr)\n\n def quadratic(self, x):\n return x[0]**2\n\n def test__strategy_resolves(self):\n # test that the correct mutation function is resolved by\n # different requested strategy arguments\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='best1exp')\n assert_equal(solver.strategy, 'best1exp')\n assert_equal(solver.mutation_func.__name__, '_best1')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='best1bin')\n assert_equal(solver.strategy, 'best1bin')\n assert_equal(solver.mutation_func.__name__, '_best1')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='rand1bin')\n assert_equal(solver.strategy, 'rand1bin')\n assert_equal(solver.mutation_func.__name__, '_rand1')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='rand1exp')\n assert_equal(solver.strategy, 'rand1exp')\n assert_equal(solver.mutation_func.__name__, '_rand1')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='rand2exp')\n assert_equal(solver.strategy, 'rand2exp')\n assert_equal(solver.mutation_func.__name__, '_rand2')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='best2bin')\n assert_equal(solver.strategy, 'best2bin')\n assert_equal(solver.mutation_func.__name__, '_best2')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='rand2bin')\n assert_equal(solver.strategy, 'rand2bin')\n assert_equal(solver.mutation_func.__name__, '_rand2')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='rand2exp')\n assert_equal(solver.strategy, 'rand2exp')\n assert_equal(solver.mutation_func.__name__, '_rand2')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='randtobest1bin')\n assert_equal(solver.strategy, 'randtobest1bin')\n assert_equal(solver.mutation_func.__name__, '_randtobest1')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='randtobest1exp')\n assert_equal(solver.strategy, 'randtobest1exp')\n assert_equal(solver.mutation_func.__name__, '_randtobest1')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='currenttobest1bin')\n assert_equal(solver.strategy, 'currenttobest1bin')\n assert_equal(solver.mutation_func.__name__, '_currenttobest1')\n\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='currenttobest1exp')\n assert_equal(solver.strategy, 'currenttobest1exp')\n assert_equal(solver.mutation_func.__name__, '_currenttobest1')\n\n def test__mutate1(self):\n # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.\n result = np.array([0.05])\n trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))\n assert_allclose(trial, result)\n\n result = np.array([0.25])\n trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))\n assert_allclose(trial, result)\n\n def test__mutate2(self):\n # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.\n # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n\n result = np.array([-0.1])\n trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))\n assert_allclose(trial, result)\n\n result = np.array([0.1])\n trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))\n assert_allclose(trial, result)\n\n def test__randtobest1(self):\n # strategies randtobest/1/*\n result = np.array([0.15])\n trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6))\n assert_allclose(trial, result)\n\n def test__currenttobest1(self):\n # strategies currenttobest/1/*\n result = np.array([0.1])\n trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6))\n assert_allclose(trial, result)\n\n def test_can_init_with_dithering(self):\n mutation = (0.5, 1)\n solver = DifferentialEvolutionSolver(self.quadratic,\n self.bounds,\n mutation=mutation)\n\n assert_equal(solver.dither, list(mutation))\n\n def test_invalid_mutation_values_arent_accepted(self):\n func = rosen\n mutation = (0.5, 3)\n assert_raises(ValueError,\n DifferentialEvolutionSolver,\n func,\n self.bounds,\n mutation=mutation)\n\n mutation = (-1, 1)\n assert_raises(ValueError,\n DifferentialEvolutionSolver,\n func,\n self.bounds,\n mutation=mutation)\n\n mutation = (0.1, np.nan)\n assert_raises(ValueError,\n DifferentialEvolutionSolver,\n func,\n self.bounds,\n mutation=mutation)\n\n mutation = 0.5\n solver = DifferentialEvolutionSolver(func,\n self.bounds,\n mutation=mutation)\n assert_equal(0.5, solver.scale)\n assert_equal(None, solver.dither)\n\n def test__scale_parameters(self):\n trial = np.array([0.3])\n assert_equal(30, self.dummy_solver._scale_parameters(trial))\n\n # it should also work with the limits reversed\n self.dummy_solver.limits = np.array([[100], [0.]])\n assert_equal(30, self.dummy_solver._scale_parameters(trial))\n\n def test__unscale_parameters(self):\n trial = np.array([30])\n assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))\n\n # it should also work with the limits reversed\n self.dummy_solver.limits = np.array([[100], [0.]])\n assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))\n\n def test__ensure_constraint(self):\n trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])\n self.dummy_solver._ensure_constraint(trial)\n\n assert_equal(trial[2], 0.9)\n assert_(np.logical_and(trial >= 0, trial <= 1).all())\n\n def test_differential_evolution(self):\n # test that the Jmin of DifferentialEvolutionSolver\n # is the same as the function evaluation\n solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])\n result = solver.solve()\n assert_almost_equal(result.fun, self.quadratic(result.x))\n\n def test_best_solution_retrieval(self):\n # test that the getter property method for the best solution works.\n solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])\n result = solver.solve()\n assert_almost_equal(result.x, solver.x)\n\n def test_callback_terminates(self):\n # test that if the callback returns true, then the minimization halts\n bounds = [(0, 2), (0, 2)]\n expected_msg = 'callback function requested stop early by returning True'\n\n def callback_python_true(param, convergence=0.):\n return True\n\n result = differential_evolution(rosen, bounds, callback=callback_python_true)\n assert_string_equal(result.message, expected_msg)\n\n def callback_evaluates_true(param, convergence=0.):\n # DE should stop if bool(self.callback) is True\n return [10]\n\n result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)\n assert_string_equal(result.message, expected_msg)\n\n def callback_evaluates_false(param, convergence=0.):\n return []\n\n result = differential_evolution(rosen, bounds, callback=callback_evaluates_false)\n assert result.success\n\n def test_args_tuple_is_passed(self):\n # test that the args tuple is passed to the cost function properly.\n bounds = [(-10, 10)]\n args = (1., 2., 3.)\n\n def quadratic(x, *args):\n if type(args) != tuple:\n raise ValueError('args should be a tuple')\n return args[0] + args[1] * x + args[2] * x**2.\n\n result = differential_evolution(quadratic,\n bounds,\n args=args,\n polish=True)\n assert_almost_equal(result.fun, 2 / 3.)\n\n def test_init_with_invalid_strategy(self):\n # test that passing an invalid strategy raises ValueError\n func = rosen\n bounds = [(-3, 3)]\n assert_raises(ValueError,\n differential_evolution,\n func,\n bounds,\n strategy='abc')\n\n def test_bounds_checking(self):\n # test that the bounds checking works\n func = rosen\n bounds = [(-3)]\n assert_raises(ValueError,\n differential_evolution,\n func,\n bounds)\n bounds = [(-3, 3), (3, 4, 5)]\n assert_raises(ValueError,\n differential_evolution,\n func,\n bounds)\n\n # test that we can use a new-type Bounds object\n result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))\n assert_almost_equal(result.x, (1., 1.))\n\n def test_select_samples(self):\n # select_samples should return 5 separate random numbers.\n limits = np.arange(12., dtype='float64').reshape(2, 6)\n bounds = list(zip(limits[0, :], limits[1, :]))\n solver = DifferentialEvolutionSolver(None, bounds, popsize=1)\n candidate = 0\n r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)\n assert_equal(\n len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)\n\n def test_maxiter_stops_solve(self):\n # test that if the maximum number of iterations is exceeded\n # the solver stops.\n solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)\n result = solver.solve()\n assert_equal(result.success, False)\n assert_equal(result.message,\n 'Maximum number of iterations has been exceeded.')\n\n def test_maxfun_stops_solve(self):\n # test that if the maximum number of function evaluations is exceeded\n # during initialisation the solver stops\n solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,\n polish=False)\n result = solver.solve()\n\n assert_equal(result.nfev, 2)\n assert_equal(result.success, False)\n assert_equal(result.message,\n 'Maximum number of function evaluations has '\n 'been exceeded.')\n\n # test that if the maximum number of function evaluations is exceeded\n # during the actual minimisation, then the solver stops.\n # Have to turn polishing off, as this will still occur even if maxfun\n # is reached. For popsize=5 and len(bounds)=2, then there are only 10\n # function evaluations during initialisation.\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n popsize=5,\n polish=False,\n maxfun=40)\n result = solver.solve()\n\n assert_equal(result.nfev, 41)\n assert_equal(result.success, False)\n assert_equal(result.message,\n 'Maximum number of function evaluations has '\n 'been exceeded.')\n\n # now repeat for updating='deferred version\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n popsize=5,\n polish=False,\n maxfun=40,\n updating='deferred')\n result = solver.solve()\n\n assert_equal(result.nfev, 40)\n assert_equal(result.success, False)\n assert_equal(result.message,\n 'Maximum number of function evaluations has '\n 'been reached.')\n\n def test_quadratic(self):\n # test the quadratic function from object\n solver = DifferentialEvolutionSolver(self.quadratic,\n [(-100, 100)],\n tol=0.02)\n solver.solve()\n assert_equal(np.argmin(solver.population_energies), 0)\n\n def test_quadratic_from_diff_ev(self):\n # test the quadratic function from differential_evolution function\n differential_evolution(self.quadratic,\n [(-100, 100)],\n tol=0.02)\n\n def test_seed_gives_repeatability(self):\n result = differential_evolution(self.quadratic,\n [(-100, 100)],\n polish=False,\n seed=1,\n tol=0.5)\n result2 = differential_evolution(self.quadratic,\n [(-100, 100)],\n polish=False,\n seed=1,\n tol=0.5)\n assert_equal(result.x, result2.x)\n assert_equal(result.nfev, result2.nfev)\n\n @pytest.mark.skipif(Version(np.__version__) < Version('1.17'),\n reason='Generator not available for numpy, < 1.17')\n def test_random_generator(self):\n # check that np.random.Generator can be used (numpy >= 1.17)\n # obtain a np.random.Generator object\n rng = np.random.default_rng()\n\n inits = ['random', 'latinhypercube']\n for init in inits:\n differential_evolution(self.quadratic,\n [(-100, 100)],\n polish=False,\n seed=rng,\n tol=0.5,\n init=init)\n\n def test_exp_runs(self):\n # test whether exponential mutation loop runs\n solver = DifferentialEvolutionSolver(rosen,\n self.bounds,\n strategy='best1exp',\n maxiter=1)\n\n solver.solve()\n\n def test_gh_4511_regression(self):\n # This modification of the differential evolution docstring example\n # uses a custom popsize that had triggered an off-by-one error.\n # Because we do not care about solving the optimization problem in\n # this test, we use maxiter=1 to reduce the testing time.\n bounds = [(-5, 5), (-5, 5)]\n # result = differential_evolution(rosen, bounds, popsize=1815,\n # maxiter=1)\n\n # the original issue arose because of rounding error in arange, with\n # linspace being a much better solution. 1815 is quite a large popsize\n # to use and results in a long test time (~13s). I used the original\n # issue to figure out the lowest number of samples that would cause\n # this rounding error to occur, 49.\n differential_evolution(rosen, bounds, popsize=49, maxiter=1)\n\n def test_calculate_population_energies(self):\n # if popsize is 3, then the overall generation has size (6,)\n solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)\n solver._calculate_population_energies(solver.population)\n solver._promote_lowest_energy()\n assert_equal(np.argmin(solver.population_energies), 0)\n\n # initial calculation of the energies should require 6 nfev.\n assert_equal(solver._nfev, 6)\n\n def test_iteration(self):\n # test that DifferentialEvolutionSolver is iterable\n # if popsize is 3, then the overall generation has size (6,)\n solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,\n maxfun=12)\n x, fun = next(solver)\n assert_equal(np.size(x, 0), 2)\n\n # 6 nfev are required for initial calculation of energies, 6 nfev are\n # required for the evolution of the 6 population members.\n assert_equal(solver._nfev, 12)\n\n # the next generation should halt because it exceeds maxfun\n assert_raises(StopIteration, next, solver)\n\n # check a proper minimisation can be done by an iterable solver\n solver = DifferentialEvolutionSolver(rosen, self.bounds)\n _, fun_prev = next(solver)\n for i, soln in enumerate(solver):\n x_current, fun_current = soln\n assert(fun_prev >= fun_current)\n _, fun_prev = x_current, fun_current\n # need to have this otherwise the solver would never stop.\n if i == 50:\n break\n\n def test_convergence(self):\n solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,\n polish=False)\n solver.solve()\n assert_(solver.convergence < 0.2)\n\n def test_maxiter_none_GH5731(self):\n # Pre 0.17 the previous default for maxiter and maxfun was None.\n # the numerical defaults are now 1000 and np.inf. However, some scripts\n # will still supply None for both of those, this will raise a TypeError\n # in the solve method.\n solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,\n maxfun=None)\n solver.solve()\n\n def test_population_initiation(self):\n # test the different modes of population initiation\n\n # init must be either 'latinhypercube' or 'random'\n # raising ValueError is something else is passed in\n assert_raises(ValueError,\n DifferentialEvolutionSolver,\n *(rosen, self.bounds),\n **{'init': 'rubbish'})\n\n solver = DifferentialEvolutionSolver(rosen, self.bounds)\n\n # check that population initiation:\n # 1) resets _nfev to 0\n # 2) all population energies are np.inf\n solver.init_population_random()\n assert_equal(solver._nfev, 0)\n assert_(np.all(np.isinf(solver.population_energies)))\n\n solver.init_population_lhs()\n assert_equal(solver._nfev, 0)\n assert_(np.all(np.isinf(solver.population_energies)))\n\n # we should be able to initialize with our own array\n population = np.linspace(-1, 3, 10).reshape(5, 2)\n solver = DifferentialEvolutionSolver(rosen, self.bounds,\n init=population,\n strategy='best2bin',\n atol=0.01, seed=1, popsize=5)\n\n assert_equal(solver._nfev, 0)\n assert_(np.all(np.isinf(solver.population_energies)))\n assert_(solver.num_population_members == 5)\n assert_(solver.population_shape == (5, 2))\n\n # check that the population was initialized correctly\n unscaled_population = np.clip(solver._unscale_parameters(population),\n 0, 1)\n assert_almost_equal(solver.population[:5], unscaled_population)\n\n # population values need to be clipped to bounds\n assert_almost_equal(np.min(solver.population[:5]), 0)\n assert_almost_equal(np.max(solver.population[:5]), 1)\n\n # shouldn't be able to initialize with an array if it's the wrong shape\n # this would have too many parameters\n population = np.linspace(-1, 3, 15).reshape(5, 3)\n assert_raises(ValueError,\n DifferentialEvolutionSolver,\n *(rosen, self.bounds),\n **{'init': population})\n\n def test_infinite_objective_function(self):\n # Test that there are no problems if the objective function\n # returns inf on some runs\n def sometimes_inf(x):\n if x[0] < .5:\n return np.inf\n return x[1]\n bounds = [(0, 1), (0, 1)]\n differential_evolution(sometimes_inf, bounds=bounds, disp=False)\n\n def test_deferred_updating(self):\n # check setting of deferred updating, with default workers\n bounds = [(0., 2.), (0., 2.)]\n solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')\n assert_(solver._updating == 'deferred')\n assert_(solver._mapwrapper._mapfunc is map)\n solver.solve()\n\n def test_immediate_updating(self):\n # check setting of immediate updating, with default workers\n bounds = [(0., 2.), (0., 2.)]\n solver = DifferentialEvolutionSolver(rosen, bounds)\n assert_(solver._updating == 'immediate')\n\n # should raise a UserWarning because the updating='immediate'\n # is being overridden by the workers keyword\n with warns(UserWarning):\n with DifferentialEvolutionSolver(rosen, bounds, workers=2) as solver:\n pass\n assert_(solver._updating == 'deferred')\n\n def test_parallel(self):\n # smoke test for parallelization with deferred updating\n bounds = [(0., 2.), (0., 2.)]\n with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver(\n rosen, bounds, updating='deferred', workers=p.map) as solver:\n assert_(solver._mapwrapper.pool is not None)\n assert_(solver._updating == 'deferred')\n solver.solve()\n\n with DifferentialEvolutionSolver(rosen, bounds, updating='deferred',\n workers=2) as solver:\n assert_(solver._mapwrapper.pool is not None)\n assert_(solver._updating == 'deferred')\n solver.solve()\n\n def test_converged(self):\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])\n solver.solve()\n assert_(solver.converged())\n\n def test_constraint_violation_fn(self):\n def constr_f(x):\n return [x[0] + x[1]]\n\n def constr_f2(x):\n return [x[0]**2 + x[1], x[0] - x[1]]\n\n nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)\n\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc))\n\n cv = solver._constraint_violation_fn([1.0, 1.0])\n assert_almost_equal(cv, 0.1)\n\n nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc, nlc2))\n\n # for multiple constraints the constraint violations should\n # be concatenated.\n cv = solver._constraint_violation_fn([1.2, 1.])\n assert_almost_equal(cv, [0.3, 0.64, 0])\n\n cv = solver._constraint_violation_fn([2., 2.])\n assert_almost_equal(cv, [2.1, 4.2, 0])\n\n # should accept valid values\n cv = solver._constraint_violation_fn([0.5, 0.5])\n assert_almost_equal(cv, [0., 0., 0.])\n\n def test_constraint_population_feasibilities(self):\n def constr_f(x):\n return [x[0] + x[1]]\n\n def constr_f2(x):\n return [x[0]**2 + x[1], x[0] - x[1]]\n\n nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)\n\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc))\n\n # are population feasibilities correct\n # [0.5, 0.5] corresponds to scaled values of [1., 1.]\n feas, cv = solver._calculate_population_feasibilities(\n np.array([[0.5, 0.5], [1., 1.]]))\n assert_equal(feas, [False, False])\n assert_almost_equal(cv, np.array([[0.1], [2.1]]))\n assert cv.shape == (2, 1)\n\n nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc, nlc2))\n\n feas, cv = solver._calculate_population_feasibilities(\n np.array([[0.5, 0.5], [0.6, 0.5]]))\n assert_equal(feas, [False, False])\n assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))\n\n feas, cv = solver._calculate_population_feasibilities(\n np.array([[0.5, 0.5], [1., 1.]]))\n assert_equal(feas, [False, False])\n assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))\n assert cv.shape == (2, 3)\n\n feas, cv = solver._calculate_population_feasibilities(\n np.array([[0.25, 0.25], [1., 1.]]))\n assert_equal(feas, [True, False])\n assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))\n assert cv.shape == (2, 3)\n\n def test_constraint_solve(self):\n def constr_f(x):\n return np.array([x[0] + x[1]])\n\n nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)\n\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc))\n\n # trust-constr warns if the constraint function is linear\n with warns(UserWarning):\n res = solver.solve()\n\n assert constr_f(res.x) <= 1.9\n assert res.success\n\n def test_impossible_constraint(self):\n def constr_f(x):\n return np.array([x[0] + x[1]])\n\n nlc = NonlinearConstraint(constr_f, -np.inf, -1)\n\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc), popsize=3,\n seed=1)\n\n # a UserWarning is issued because the 'trust-constr' polishing is\n # attempted on the least infeasible solution found.\n with warns(UserWarning):\n res = solver.solve()\n\n assert res.maxcv > 0\n assert not res.success\n\n # test _promote_lowest_energy works when none of the population is\n # feasible. In this case, the solution with the lowest constraint\n # violation should be promoted.\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc), polish=False)\n next(solver)\n assert not solver.feasible.all()\n assert not np.isfinite(solver.population_energies).all()\n\n # now swap two of the entries in the population\n l = 20\n cv = solver.constraint_violation[0]\n\n solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]\n solver.population[[0, l], :] = solver.population[[l, 0], :]\n solver.constraint_violation[[0, l], :] = (\n solver.constraint_violation[[l, 0], :])\n\n solver._promote_lowest_energy()\n assert_equal(solver.constraint_violation[0], cv)\n\n def test_accept_trial(self):\n # _accept_trial(self, energy_trial, feasible_trial, cv_trial,\n # energy_orig, feasible_orig, cv_orig)\n def constr_f(x):\n return [x[0] + x[1]]\n nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)\n solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],\n constraints=(nlc))\n fn = solver._accept_trial\n # both solutions are feasible, select lower energy\n assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))\n assert (fn(1.0, True, np.array([0.]), 0.1, True, np.array([0.]))\n == False)\n assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))\n\n # trial is feasible, original is not\n assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))\n\n # trial and original are infeasible\n # cv_trial have to be <= cv_original to be better\n assert (fn(0.1, False, np.array([0.5, 0.5]),\n 1.0, False, np.array([1., 1.0])))\n assert (fn(0.1, False, np.array([0.5, 0.5]),\n 1.0, False, np.array([1., 0.50])))\n assert (fn(1.0, False, np.array([0.5, 0.5]),\n 1.0, False, np.array([1., 0.4])) == False)\n\n def test_constraint_wrapper(self):\n lb = np.array([0, 20, 30])\n ub = np.array([0.5, np.inf, 70])\n x0 = np.array([1, 2, 3])\n pc = _ConstraintWrapper(Bounds(lb, ub), x0)\n assert (pc.violation(x0) > 0).any()\n assert (pc.violation([0.25, 21, 31]) == 0).all()\n\n x0 = np.array([1, 2, 3, 4])\n A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])\n pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)\n assert (pc.violation(x0) > 0).any()\n assert (pc.violation([-10, 2, -10, 4]) == 0).all()\n\n pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),\n x0)\n assert (pc.violation(x0) > 0).any()\n assert (pc.violation([-10, 2, -10, 4]) == 0).all()\n\n def fun(x):\n return A.dot(x)\n\n nonlinear = NonlinearConstraint(fun, -np.inf, 0)\n pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])\n assert (pc.violation(x0) > 0).any()\n assert (pc.violation([-10, 2, -10, 4]) == 0).all()\n\n def test_constraint_wrapper_violation(self):\n def cons_f(x):\n return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]])\n\n nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])\n pc = _ConstraintWrapper(nlc, [0.5, 1])\n assert np.size(pc.bounds[0]) == 2\n\n assert_array_equal(pc.violation([0.5, 1]), [0., 0.])\n assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1])\n assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0])\n assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0])\n assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14])\n\n def test_L1(self):\n # Lampinen ([5]) test problem 1\n\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])\n return fun\n\n A = np.zeros((10, 14)) # 1-indexed to match reference\n A[1, [1, 2, 10, 11]] = 2, 2, 1, 1\n A[2, [1, 10]] = -8, 1\n A[3, [4, 5, 10]] = -2, -1, 1\n A[4, [1, 3, 10, 11]] = 2, 2, 1, 1\n A[5, [2, 11]] = -8, 1\n A[6, [6, 7, 11]] = -2, -1, 1\n A[7, [2, 3, 11, 12]] = 2, 2, 1, 1\n A[8, [3, 12]] = -8, 1\n A[9, [8, 9, 12]] = -2, -1, 1\n A = A[1:, 1:]\n\n b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])\n\n L = LinearConstraint(A, -np.inf, b)\n\n bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]\n\n # using a lower popsize to speed the test up\n res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,\n constraints=(L), popsize=2)\n\n x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)\n f_opt = -15\n\n assert_allclose(f(x_opt), f_opt)\n assert res.success\n assert_allclose(res.x, x_opt, atol=5e-4)\n assert_allclose(res.fun, f_opt, atol=5e-3)\n assert_(np.all([email protected] <= b))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n # now repeat the same solve, using the same overall constraints,\n # but using a sparse matrix for the LinearConstraint instead of an\n # array\n\n L = LinearConstraint(csr_matrix(A), -np.inf, b)\n\n # using a lower popsize to speed the test up\n res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,\n constraints=(L), popsize=2)\n\n assert_allclose(f(x_opt), f_opt)\n assert res.success\n assert_allclose(res.x, x_opt, atol=5e-4)\n assert_allclose(res.fun, f_opt, atol=5e-3)\n assert_(np.all([email protected] <= b))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n # now repeat the same solve, using the same overall constraints,\n # but specify half the constraints in terms of LinearConstraint,\n # and the other half by NonlinearConstraint\n def c1(x):\n x = np.hstack(([0], x))\n return [2*x[2] + 2*x[3] + x[11] + x[12],\n -8*x[3] + x[12]]\n\n def c2(x):\n x = np.hstack(([0], x))\n return -2*x[8] - x[9] + x[12]\n\n L = LinearConstraint(A[:5, :], -np.inf, b[:5])\n L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])\n N = NonlinearConstraint(c1, -np.inf, b[6:8])\n N2 = NonlinearConstraint(c2, -np.inf, b[8:9])\n constraints = (L, N, L2, N2)\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning)\n res = differential_evolution(f, bounds, strategy='rand1bin',\n seed=1234, constraints=constraints,\n popsize=2)\n\n assert_allclose(res.x, x_opt, atol=5e-4)\n assert_allclose(res.fun, f_opt, atol=5e-3)\n assert_(np.all([email protected] <= b))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n def test_L2(self):\n # Lampinen ([5]) test problem 2\n\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +\n 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -\n 8*x[7])\n return fun\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],\n 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],\n 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],\n -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -\n 5*x[6] + 11*x[7]]\n\n N = NonlinearConstraint(c1, 0, np.inf)\n bounds = [(-10, 10)]*7\n constraints = (N)\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning)\n res = differential_evolution(f, bounds, strategy='rand1bin',\n seed=1234, constraints=constraints)\n\n f_opt = 680.6300599487869\n x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,\n -0.6244870, 1.038131, 1.594227)\n\n assert_allclose(f(x_opt), f_opt)\n assert_allclose(res.fun, f_opt)\n assert_allclose(res.x, x_opt, atol=1e-5)\n assert res.success\n assert_(np.all(np.array(c1(res.x)) >= 0))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n def test_L3(self):\n # Lampinen ([5]) test problem 3\n\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +\n (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +\n 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +\n (x[10] - 7)**2 + 45\n )\n return fun # maximize\n\n A = np.zeros((4, 11))\n A[1, [1, 2, 7, 8]] = -4, -5, 3, -9\n A[2, [1, 2, 7, 8]] = -10, 8, 17, -2\n A[3, [1, 2, 9, 10]] = 8, -2, -5, 2\n A = A[1:, 1:]\n b = np.array([-105, 0, -12])\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],\n -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,\n -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],\n -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,\n -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]\n\n L = LinearConstraint(A, b, np.inf)\n N = NonlinearConstraint(c1, 0, np.inf)\n bounds = [(-10, 10)]*10\n constraints = (L, N)\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning)\n res = differential_evolution(f, bounds, seed=1234,\n constraints=constraints, popsize=3)\n\n x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,\n 1.430574, 1.321644, 9.828726, 8.280092, 8.375927)\n f_opt = 24.3062091\n\n assert_allclose(f(x_opt), f_opt, atol=1e-5)\n assert_allclose(res.x, x_opt, atol=1e-6)\n assert_allclose(res.fun, f_opt, atol=1e-5)\n assert res.success\n assert_(np.all(A @ res.x >= b))\n assert_(np.all(np.array(c1(res.x)) >= 0))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n def test_L4(self):\n # Lampinen ([5]) test problem 4\n def f(x):\n return np.sum(x[:3])\n\n A = np.zeros((4, 9))\n A[1, [4, 6]] = 0.0025, 0.0025\n A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025\n A[3, [8, 5]] = 0.01, -0.01\n A = A[1:, 1:]\n b = np.array([1, 1, 1])\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,\n x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],\n x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]\n\n L = LinearConstraint(A, -np.inf, 1)\n N = NonlinearConstraint(c1, 0, np.inf)\n\n bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5\n constraints = (L, N)\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning)\n res = differential_evolution(f, bounds, strategy='rand1bin',\n seed=1234, constraints=constraints,\n popsize=3)\n\n f_opt = 7049.248\n\n x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,\n 217.9823, 286.416528, 395.601172]\n\n assert_allclose(f(x_opt), f_opt, atol=0.001)\n assert_allclose(res.fun, f_opt, atol=0.001)\n\n # selectively use higher tol here for 32-bit\n # Windows based on gh-11693\n if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):\n assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)\n else:\n assert_allclose(res.x, x_opt, atol=0.002)\n\n assert res.success\n assert_(np.all(A @ res.x <= b))\n assert_(np.all(np.array(c1(res.x)) >= 0))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n def test_L5(self):\n # Lampinen ([5]) test problem 5\n\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /\n (x[1]**3*(x[1]+x[2])))\n return -fun # maximize\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [x[1]**2 - x[2] + 1,\n 1 - x[1] + (x[2]-4)**2]\n\n N = NonlinearConstraint(c1, -np.inf, 0)\n bounds = [(0, 10)]*2\n constraints = (N)\n\n res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,\n constraints=constraints)\n\n x_opt = (1.22797135, 4.24537337)\n f_opt = -0.095825\n print(res)\n assert_allclose(f(x_opt), f_opt, atol=2e-5)\n assert_allclose(res.fun, f_opt, atol=1e-4)\n assert res.success\n assert_(np.all(np.array(c1(res.x)) <= 0))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n def test_L6(self):\n # Lampinen ([5]) test problem 6\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n fun = (x[1]-10)**3 + (x[2] - 20)**3\n return fun\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,\n -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]\n\n N = NonlinearConstraint(c1, 0, np.inf)\n bounds = [(13, 100), (0, 100)]\n constraints = (N)\n res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,\n constraints=constraints, tol=1e-7)\n x_opt = (14.095, 0.84296)\n f_opt = -6961.814744\n\n assert_allclose(f(x_opt), f_opt, atol=1e-6)\n assert_allclose(res.fun, f_opt, atol=0.001)\n assert_allclose(res.x, x_opt, atol=1e-4)\n assert res.success\n assert_(np.all(np.array(c1(res.x)) >= 0))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n def test_L7(self):\n # Lampinen ([5]) test problem 7\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +\n 37.293239*x[1] - 40792.141)\n return fun\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [\n 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -\n 0.0022053*x[3]*x[5],\n\n 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +\n 0.0021813*x[3]**2,\n\n 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +\n 0.0019085*x[3]*x[4]\n ]\n\n N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])\n\n bounds = [(78, 102), (33, 45)] + [(27, 45)]*3\n constraints = (N)\n\n res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,\n constraints=constraints)\n\n # using our best solution, rather than Lampinen/Koziel. Koziel solution\n # doesn't satisfy constraints, Lampinen f_opt just plain wrong.\n x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,\n 36.77579979]\n\n f_opt = -30665.537578\n\n assert_allclose(f(x_opt), f_opt)\n assert_allclose(res.x, x_opt, atol=1e-3)\n assert_allclose(res.fun, f_opt, atol=1e-3)\n\n assert res.success\n assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))\n assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n @pytest.mark.slow\n @pytest.mark.xfail(platform.machine() == 'ppc64le',\n reason=\"fails on ppc64le\")\n def test_L8(self):\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3\n return fun\n\n A = np.zeros((3, 5))\n A[1, [4, 3]] = 1, -1\n A[2, [3, 4]] = 1, -1\n A = A[1:, 1:]\n b = np.array([-.55, -.55])\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [\n 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +\n 894.8 - x[1],\n 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +\n 894.8 - x[2],\n 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +\n 1294.8\n ]\n L = LinearConstraint(A, b, np.inf)\n N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))\n\n bounds = [(0, 1200)]*2+[(-.55, .55)]*2\n constraints = (L, N)\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning)\n # original Lampinen test was with rand1bin, but that takes a\n # huge amount of CPU time. Changing strategy to best1bin speeds\n # things up a lot\n res = differential_evolution(f, bounds, strategy='best1bin',\n seed=1234, constraints=constraints,\n maxiter=5000)\n\n x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)\n f_opt = 5126.4981\n\n assert_allclose(f(x_opt), f_opt, atol=1e-3)\n assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)\n assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)\n assert_allclose(res.fun, f_opt, atol=2e-2)\n assert res.success\n assert_(np.all([email protected] >= b))\n assert_(np.all(np.array(c1(res.x)) >= -0.001))\n assert_(np.all(np.array(c1(res.x)) <= 0.001))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n\n def test_L9(self):\n # Lampinen ([5]) test problem 9\n\n def f(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return x[1]**2 + (x[2]-1)**2\n\n def c1(x):\n x = np.hstack(([0], x)) # 1-indexed to match reference\n return [x[2] - x[1]**2]\n\n N = NonlinearConstraint(c1, [-.001], [0.001])\n\n bounds = [(-1, 1)]*2\n constraints = (N)\n res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,\n constraints=constraints)\n\n x_opt = [np.sqrt(2)/2, 0.5]\n f_opt = 0.75\n\n assert_allclose(f(x_opt), f_opt)\n assert_allclose(np.abs(res.x), x_opt, atol=1e-3)\n assert_allclose(res.fun, f_opt, atol=1e-3)\n assert res.success\n assert_(np.all(np.array(c1(res.x)) >= -0.001))\n assert_(np.all(np.array(c1(res.x)) <= 0.001))\n assert_(np.all(res.x >= np.array(bounds)[:, 0]))\n assert_(np.all(res.x <= np.array(bounds)[:, 1]))\n" ]
[ [ "scipy._lib.doccer.filldoc" ], [ "numpy.asarray", "numpy.atleast_2d", "numpy.asarray_chkfinite" ], [ "numpy.sqrt", "numpy.linspace", "numpy.testing.assert_string_equal", "numpy.dtype", "numpy.all", "numpy.seterr", "numpy.max", "numpy.argmin", "numpy.random.default_rng", "numpy.testing.assert_equal", "numpy.hstack", "scipy.optimize.differential_evolution", "scipy.optimize._constraints.NonlinearConstraint", "numpy.testing.suppress_warnings", "numpy.arange", "scipy.optimize._constraints.LinearConstraint", "numpy.full", "numpy.sin", "numpy.testing.assert_almost_equal", "numpy.size", "numpy.zeros", "scipy._lib._pep440.Version", "numpy.min", "scipy.optimize._differentialevolution._ConstraintWrapper", "scipy.optimize._constraints.Bounds", "scipy.sparse.csr_matrix", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.array", "numpy.logical_and", "numpy.sum", "numpy.abs", "numpy.isfinite", "scipy.optimize._differentialevolution.DifferentialEvolutionSolver", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JanBrabec/UCI-ML-API
[ "59f6c680ac914df55e93e05545eb198887510943" ]
[ "UCI_ML_Functions.py" ]
[ "# Functions to read, analyze, and download from UCI ML portal\n\n# ==========================================\n# Function to read UCI ML datasets table\n# ==========================================\ndef read_dataset_table(\n url=\"https://archive.ics.uci.edu/ml/datasets.php\", msg_flag=True\n):\n \"\"\"\n Reads the table of datasets from the url: \"https://archive.ics.uci.edu/ml/datasets.php\" and process it further to clean and categorize\n \"\"\"\n import pandas as pd\n\n try:\n if msg_flag:\n print(\"Reading the dataset table from UCI ML repo...\")\n datasets = pd.read_html(url)\n if msg_flag:\n print(\"Finished reading the table!\")\n except:\n print(\"Could not read the table from UCI ML portal, Sorry!\")\n\n df = datasets[5] # Fifth entry of this table is the main datasets information\n df.columns = [\n \"Name\",\n \"Data Types\",\n \"Default Task\",\n \"Attribute Types\",\n \"Number of Instances\",\n \"Number of Attributes\",\n \"Year\",\n ]\n # Remove first row which contains table header\n df = df.iloc[1:, :]\n\n return df\n\n\n# ==============================================================================================\n# Function to remove entries with unknown number of samples and cleanly define task categories\n# ==============================================================================================\ndef clean_dataset_table(df, msg_flag=True):\n \"\"\"\n Accepts the raw dataset table (a DataFrame object) and returns a cleaned up version removing entries with unknown number of samples and attributes\n Also creates a 'Task' category column indicating the main machine learning task associated with the dataset\n \"\"\"\n import time\n import pandas as pd\n\n if msg_flag:\n print(\"Cleaning up the dataset table\", end=\"\")\n for i in range(11):\n time.sleep(0.2)\n print(\".\", end=\"\")\n print(\" \", end=\"\")\n print()\n print(\"Rationalizing the task categories\", end=\"\")\n for i in range(11):\n time.sleep(0.2)\n print(\".\", end=\"\")\n print(\" \", end=\"\")\n\n pd.set_option(\"mode.chained_assignment\", None)\n\n df_copy = df.copy()\n df_clean = df_copy.dropna(subset=[\"Number of Instances\"])\n df_clean[\"Number of Instances\"] = df_clean[\"Number of Instances\"].apply(int)\n\n def size_instances(n):\n if n <= 100:\n return \"Small\"\n elif n <= 1000:\n return \"Medium\"\n elif n <= 10000:\n return \"Large\"\n else:\n return \"Extra Large\"\n\n df_clean[\"Sample size\"] = df_clean[\"Number of Instances\"].apply(size_instances)\n\n def categorize_task(task):\n if len(task) > 1:\n tasks = task.split(\", \")\n else:\n tasks = list(task)\n\n if len(tasks) == 1 and tasks[0] == \"Classification\":\n return \"Classification\"\n elif \"Clustering\" in tasks:\n return \"Clustering\"\n elif \"Regression\" in tasks:\n return \"Regression\"\n elif \"Recommender-Systems\" in tasks:\n return \"Recommender Systems\"\n elif \"Causal-Discovery\" in tasks:\n return \"Causal Discovery\"\n else:\n return \"Other/Unknown\"\n\n df_clean[\"Default Task\"] = df_clean[\"Default Task\"].apply(str)\n df_clean[\"Default Task\"] = df_clean[\"Default Task\"].apply(categorize_task)\n\n if msg_flag:\n print(\"\\nFinished processing the table!\")\n\n return df_clean\n\n\n# ======================================================================================================\n# Function to build a local table (CSV file) with name, attributes, machine learning tasks, size, etc\n# ======================================================================================================\ndef build_local_table(filename=None, msg_flag=True):\n \"\"\"\n Reads through the UCI ML portal and builds a local table with information such as: \\\n name, size, ML task, data type\n filename: Optional filename that can be chosen by the user\n \"\"\"\n df_table = read_dataset_table(msg_flag=msg_flag)\n df_clean = clean_dataset_table(df_table, msg_flag=msg_flag)\n try:\n if filename != None:\n df_clean.to_csv(filename)\n else:\n df_clean.to_csv(\"UCI table.csv\")\n except:\n print(\n \"Sorry, could not create the CSV table. Please make sure to close an already opened file, \\\n or to have sufficient permission to write files in the current directory\"\n )\n\n\n# ==================================================================\n# Function to read the main page text and create list of datasets\n# ==================================================================\ndef build_dataset_list(url=\"https://archive.ics.uci.edu/ml/datasets\", msg_flag=True):\n \"\"\"\n Scrapes through the UCI ML datasets page and builds a list of all datasets.\n \"\"\"\n\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n # Read the HTML from the URL and pass on to BeautifulSoup\n url = url\n if msg_flag:\n print(\"Opening the file connection...\")\n try:\n uh = urllib.request.urlopen(url, context=ctx)\n # print(\"HTTP status\",uh.getcode())\n html = uh.read()\n # print(f\"Reading done. Total {len(html)} characters read.\")\n except:\n print(\"Could not open the UCI ML portal successfully. Sorry!\")\n return -1\n\n soup = BeautifulSoup(html, \"html5lib\")\n\n dataset_list = []\n lst = []\n\n for link in soup.find_all(\"a\"):\n lst.append(link.attrs)\n\n if msg_flag:\n print()\n print(\"Adding datasets to the list\", end=\"\")\n\n for i in range(11):\n time.sleep(0.3)\n print(\".\", end=\"\")\n print(\" \", end=\"\")\n\n for l in lst:\n a = l[\"href\"]\n if a.find(\"/\") != -1:\n x = a.split(\"/\")\n if len(x) == 2:\n dataset_list.append(x[1])\n\n dataset_list = list(set(dataset_list))\n dataset_list = sorted(dataset_list)\n\n if msg_flag:\n print(\"\\nFinished adding datasets to the list!\")\n\n return dataset_list\n\n\n# ======================================================================================\n# Function to create dictionary of datasets' name, description, and identifier string\n# ======================================================================================\ndef build_dataset_dictionary(\n url=\"https://archive.ics.uci.edu/ml/datasets.php?format=&task=&att=&area=&numAtt=&numIns=&type=&sort=nameUp&view=list\",\n msg_flag=True,\n):\n \"\"\"\n Scrapes through the UCI ML datasets page and builds a dictionary of all datasets with names and description.\n Also stores the unique identifier corresponding to the dataset.\n This identifier string is needed by the downloader function to download the data file. Generic name won't work.\n \"\"\"\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n import re\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n url = url\n if msg_flag:\n print(\"Opening the file connection...\")\n try:\n uh = urllib.request.urlopen(url, context=ctx)\n html = uh.read()\n except:\n print(\"Could not open the UCI ML portal successfully. Sorry!\")\n return -1\n\n soup = BeautifulSoup(html, \"html5lib\")\n\n lst = []\n for tag in soup.find_all(\"p\"):\n lst.append(tag.contents)\n\n i = 0\n description_dict = {}\n\n for l in lst:\n if len(l) > 2:\n if str(l[1]).find(\"datasets/\") != -1:\n string = str(l[1])\n s = re.search('\">.*</a>', string)\n x, y = s.span()\n name = string[x + 2 : y - 4]\n desc = l[2][2:]\n tmp_list = []\n description_dict[name] = tmp_list\n description_dict[name].append(desc)\n s = re.search('\".*\"', string)\n x, y = s.span()\n identifier = string[x + 10 : y - 1]\n description_dict[name].append(identifier)\n i += 1\n if msg_flag:\n if i % 10 == 0 and i != 0:\n print(f\"Record {i} processed!\")\n\n return description_dict\n\n\n# ===============================================================\n# Function to create a DataFrame with all information together\n# ===============================================================\ndef build_full_dataframe(msg_flag=False):\n \"\"\"\n Builds a DataFrame with all information together including the url link for downloading the data.\n \"\"\"\n import pandas as pd\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n i = 0\n d = build_dataset_dictionary(msg_flag=False)\n new_d = {}\n dataset_list = build_dataset_list(msg_flag=False)\n\n for k, v in d.items():\n a = extract_url_dataset(v[1], msg_flag=msg_flag)\n if a != None:\n desc = v[0]\n identifier = v[1]\n v[0] = k\n v[1] = desc\n v.append(identifier)\n v.append(a)\n new_d[k] = v\n i += 1\n if msg_flag:\n print(f\"Dataset processed:{k}\")\n else:\n desc = v[0]\n identifier = v[1]\n v[0] = k\n v[1] = desc\n v.append(identifier)\n v.append(\"URL not available\")\n new_d[k] = v\n if msg_flag:\n print(f\"Dataset processed:{k}\")\n if msg_flag:\n print(\"\\nTotal datasets analyzed: \", i)\n\n df_dataset = pd.DataFrame(data=new_d)\n df_dataset = df_dataset.T\n df_dataset.columns = [\"Name\", \"Abstract\", \"Identifier string\", \"Datapage URL\"]\n df_dataset.index.set_names([\"Dataset\"], inplace=True)\n\n return df_dataset\n\n\n# ================================================================================================\n# Function to build a local database (CSV file) with name and URL (of raw data page) information\n# ================================================================================================\ndef build_local_database(filename=None, msg_flag=True):\n \"\"\"\n Reads through the UCI ML portal and builds a local table with information such as: \\\n name, size, ML task, data type\n filename: Optional filename that can be chosen by the user\n \"\"\"\n df_local = build_full_dataframe(msg_flag=msg_flag)\n try:\n if filename != None:\n df_local.to_csv(filename)\n else:\n df_local.to_csv(\"UCI database.csv\")\n except:\n print(\n \"Sorry, could not create the CSV table. Please make sure to close an already opened file, \\\n or to have sufficient permission to write files in the current directory\"\n )\n\n\n# ===============================================================================\n# Function to extract abstract/description of a particular dataset by searching\n# ===============================================================================\ndef return_abstract(name, local_database=None, msg_flag=False):\n \"\"\"\n Returns one-liner description (and webpage link for further information) of a particular dataset by searching the given name.\n local_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains information about all the datasets on UCI ML repo.\n msg_flag: Controls verbosity\n \"\"\"\n\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n if msg_flag:\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n if msg_flag:\n print(\"Done!\")\n\n # Number of rows\n nrows = df.shape[0]\n found = 0\n abstracts = []\n for r in range(nrows):\n if name in df.iloc[r][\"Name\"]:\n found += 1\n abstracts.append(\n df.iloc[r][\"Name\"]\n + \": \"\n + df.iloc[r][\"Abstract\"]\n + \". For more info, visit this link: \"\n + \"https://archive.ics.uci.edu/ml/datasets/\"\n + df.iloc[r][\"Identifier string\"]\n )\n if found == 0:\n print(\"Could not find your search term.\")\n return None\n else:\n print(\n f\"Total {found} instances found including partial match of the search term. Here they are...\\n\"\n )\n for a in abstracts:\n print(a)\n print(\"=\" * 100)\n\n\n# =============================================\n# Function to print all dataset descriptions\n# =============================================\ndef describe_all_dataset(msg_flag=False):\n \"\"\"\n Calls the build_dictionary function and prints description of all datasets from that.\n \"\"\"\n\n dict1 = build_dataset_dictionary(msg_flag=msg_flag)\n\n for k, v in dict1.items():\n print(f\"{k}: {v[0]}\")\n print(\"=\" * 100)\n\n\n# =======================================\n# Function to print all dataset names\n# =======================================\ndef print_all_datasets_names(msg_flag=False):\n \"\"\"\n Calls the build_dictionary function and prints names of all datasets from that.\n \"\"\"\n\n dict1 = build_dataset_dictionary(msg_flag=msg_flag)\n\n for key in dict1.keys():\n print(key)\n print(\"-\" * 100)\n\n\n# ==========================================\n# Function for extracting dataset page url\n# ==========================================\ndef extract_url_dataset(dataset, msg_flag=False):\n \"\"\"\n Given a dataset identifier this function extracts the URL for the page where the actual raw data resides.\n \"\"\"\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n dataset_dict = {}\n baseurl = \"https://archive.ics.uci.edu/ml/datasets/\"\n url = baseurl + dataset\n\n try:\n uh = urllib.request.urlopen(url, context=ctx)\n html = uh.read().decode()\n soup = BeautifulSoup(html, \"html5lib\")\n if soup.text.find(\"does not appear to exist\") != -1:\n if msg_flag:\n print(f\"{dataset} not found\")\n return None\n else:\n for link in soup.find_all(\"a\"):\n if link.attrs[\"href\"].find(\"machine-learning-databases\") != -1:\n a = link.attrs[\"href\"]\n a = a[2:]\n dataurl = \"https://archive.ics.uci.edu/ml/\" + str(a)\n # print(dataurl)\n return str(dataurl)\n # dataurls.append(dataurl)\n\n # After finishing the for-loop with a-tags, the first dataurl is added to the dictionary\n # dataset_dict['dataurl']=dataurls[0]\n except:\n # print(\"Could not retrieve\")\n return None\n\n\n# ================================\n# File download helper function\n# ================================\ndef download_file(url, directory):\n \"\"\"\n Downloads a file from a given url into the given directory.\n \"\"\"\n import requests\n from pathlib import Path\n\n local_filename = Path(directory) / Path(url.split(\"/\")[-1])\n # NOTE the stream=True parameter\n r = requests.get(url, stream=True)\n try:\n with open(local_filename, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n except:\n print(\"Sorry could not write this particular file!\")\n # f.flush()\n\n\n# =====================================================\n# Function for downloading the data set from a page\n# =====================================================\ndef download_dataset_url(url, directory, msg_flag=False, download_flag=True):\n \"\"\"\n Download all the files from the links in the given url.\n msg_flag: Controls verbosity.\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose).\n \"\"\"\n\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import os\n from pathlib import Path\n\n if url == \"URL not available\":\n return None\n\n cwd = os.getcwd()\n directory = directory.replace(\":\", \"-\")\n local_directory = Path(cwd) / Path(str(directory))\n if not os.path.exists(local_directory):\n try:\n os.makedirs(local_directory)\n except:\n print(f\"Cannot create directory: {directory}\")\n\n if download_flag:\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n uh = urllib.request.urlopen(url, context=ctx)\n html = uh.read().decode()\n soup = BeautifulSoup(html, \"html5lib\")\n\n links = []\n for link in soup.find_all(\"a\"):\n links.append(link.attrs[\"href\"])\n\n links_to_download = []\n\n if \"Index\" in links:\n idx = links.index(\"Index\")\n else:\n idx = len(links) - 2\n for i in range(idx + 1, len(links)):\n links_to_download.append(url + str(links[i]))\n\n for file_url in links_to_download:\n download_file(file_url, local_directory)\n\n if msg_flag:\n print(f\"Downloaded dataset from {url}\")\n\n\n# =================================================================================================\n# User API Function for downloading a given number of datasets and storing in a local directory\n# =================================================================================================\ndef download_datasets(num=10, local_database=None, msg_flag=True, download_flag=True):\n \"\"\"\n Downloads datasets and puts them in a local directory named after the dataset.\n By default downloads first 10 datasets only. User can choose the number of dataets to be downloaded.\n msg_flag: Controls verbosity.\n\tdownload_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose).\n \"\"\"\n\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n if msg_flag:\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n if msg_flag:\n print(\"Done!\")\n\n if num < 1:\n print(\"Invalid entry for the number of datasets.\")\n else:\n for i in range(num):\n if msg_flag:\n print(f\"Downloading dataset(s) for: {df['Name'][i]}\")\n download_dataset_url(\n df[\"Datapage URL\"][i],\n df[\"Name\"][i],\n msg_flag=False,\n download_flag=download_flag,\n )\n print(\"\\nFinished downloading.\")\n\n\n# ============================================================================\n# User API function to download dataset by searching a for particular name\n# ============================================================================\ndef download_dataset_name(name, local_database=None, msg_flag=True, download_flag=True):\n \"\"\"\n Downloads a particular dataset by searching the given name.\n local_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains information about all the datasets on UCI ML repo.\n msg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose)\n \"\"\"\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n if msg_flag:\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n if msg_flag:\n print(\"Done!\")\n\n urls_to_download = {}\n\n for i in df.index.values:\n if name in i:\n urls_to_download[df.loc[i][\"Name\"]] = df.loc[i][\"Datapage URL\"]\n\n if len(urls_to_download) == 0:\n print(f'Serach term \"{name}\" not found in the database. Nothing downloaded!')\n else:\n if len(urls_to_download) > 1:\n print(\n f\"{len(urls_to_download)} instances of search term found including partial match. Downloading datasets for all...\\n\"\n )\n\n for u in urls_to_download:\n if msg_flag:\n print(f\"Downloading dataset(s) for: {u}\")\n download_dataset_url(\n urls_to_download[u],\n directory=u,\n msg_flag=False,\n download_flag=download_flag,\n )\n\n print(\"\\nFinished downloading.\")\n\n\n# =========================================================\n# Function to download all datasets in a given dataframe\n# =========================================================\ndef download_all_from_dataframe(df, msg_flag=False, download_flag=True):\n \"\"\"\n Downloads all datasets which appear in the given dataframe.\n Assumes that the datapage URL information is in the dataframe.\n msg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose)\n \"\"\"\n\n nrows = df.shape[0]\n if download_flag == False:\n print(\"Not downloading anything, just creating empty directories.\\n\")\n for r in range(nrows):\n if msg_flag:\n print(f\"Downloading the dataset: {df.iloc[r]['Name']}\")\n download_dataset_url(\n df.iloc[r][\"Datapage URL\"], df.iloc[r][\"Name\"], download_flag=download_flag\n )\n\n\n# =======================================================\n# User API Function to download datasets based on size\n# =======================================================\ndef download_datasets_size(\n size=\"Small\",\n local_database=None,\n local_table=None,\n msg_flag=False,\n download_flag=True,\n):\n \"\"\"\n Downloads all datasets which satisfy the 'size' criteria.\n size: Size of the dataset which user wants to download. Could be any of the following: 'Small', 'Medium', 'Large','Extra Large'.\n local_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains name and URL information about all the datasets on UCI ML repo.\n local_table: Name of the database (CSV file) stored locally i.e. in the same directory, which contains features information about all the datasets on UCI ML repo i.e. number of samples, type of machine learning task to be performed with the dataset.\n msg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose)\n \"\"\"\n import pandas as pd\n\n assert type(size) == str\n assert str(size) in [\"Small\", \"Medium\", \"Large\", \"Extra Large\"]\n\n if local_database != None:\n local_df_flag = True\n df_local = pd.read_csv(local_database, index_col=\"Dataset\")\n df = df_local\n else:\n local_df_flag = False\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n print(\"Master database build done!\")\n\n if local_table != None:\n local_table_flag = True\n table_local = pd.read_csv(local_table)\n df_clean = clean_dataset_table(table_local, msg_flag=msg_flag)\n else:\n local_table_flag = False\n print(\n \"Local table not supplied.\\nBuilding the master table by reading from the website...\"\n )\n df_table = read_dataset_table(msg_flag=msg_flag)\n df_clean = clean_dataset_table(df_table, msg_flag=msg_flag)\n\n df_merged = df_clean.merge(df, on=\"Name\")\n df_filter = df_merged[df_merged[\"Sample size\"] == str(size)]\n\n download_all_from_dataframe(\n df_filter, msg_flag=msg_flag, download_flag=download_flag\n )\n\n\n# ===========================================================================\n# User API Function to download datasets based on the machine learning task\n# ===========================================================================\ndef download_datasets_task(\n task=\"Classification\",\n local_database=None,\n local_table=None,\n msg_flag=False,\n download_flag=True,\n):\n \"\"\"\n Downloads all datasets which satisfy the size criteria.\n task: Machine learning task for which user wants to download the datasets. Could be any of the following:\n\t 'Classification',\n\t\t'Recommender Systems',\n\t\t'Regression',\n\t\t'Other/Unknown',\n\t\t'Clustering',\n\t\t'Causal Discovery'.\n\tlocal_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains name and URL information about all the datasets on UCI ML repo.\n\tlocal_table: Name of the database (CSV file) stored locally i.e. in the same directory, which contains features information about all the datasets on UCI ML repo i.e. number of samples, type of machine learning task to be performed with the dataset.\n\tmsg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose).\n \"\"\"\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n print(\"Master database build done!\")\n\n if local_table != None:\n local_table_flag = True\n df_clean = pd.read_csv(local_table)\n else:\n local_table_flag = False\n print(\n \"Local table not supplied.\\nBuilding the master table by reading from the website...\"\n )\n df_table = read_dataset_table(msg_flag=msg_flag)\n df_clean = clean_dataset_table(df_table, msg_flag=msg_flag)\n\n df_merged = df_clean.merge(df, on=\"Name\")\n df_filter = df_merged[df_merged[\"Default Task\"] == str(task)]\n\n download_all_from_dataframe(\n df_filter, msg_flag=msg_flag, download_flag=download_flag\n )\n" ]
[ [ "pandas.set_option", "pandas.read_csv", "pandas.DataFrame", "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
h-vetinari/triton
[ "d9dd97492f228020573b39a9cec14ee3b8776957", "d9dd97492f228020573b39a9cec14ee3b8776957" ]
[ "python/tutorials/03-matrix-multiplication.py", "python/test/unit/operators/test_cross_entropy.py" ]
[ "\"\"\"\nMatrix Multiplication\n======================\nIn this tutorial, you will write a 25-lines high-performance FP16 matrix multiplication\nkernel that achieves performance on par with cuBLAS.\nYou will specifically learn about:\n\n- Block-level matrix multiplications\n- Multi-dimensional pointer arithmetic\n- Program re-ordering for improved L2 cache hit rate\n- Automatic performance tuning\n\"\"\"\n\n# %%\n# Motivations\n# -------------\n# Matrix multiplications are a key building block of most modern high-performance computing systems.\n# They are notoriously hard to optimize, hence their implementation is generally done by\n# hardware vendors themselves as part of so-called \"kernel libraries\" (e.g., cuBLAS).\n# Unfortunately, these libraries are often proprietary and cannot be easily customized\n# to accomodate the needs of modern deep learning workloads (e.g., fused activation functions).\n# In this tutorial, you will learn how to implement efficient matrix multiplications by\n# yourself with Triton, in a way that is easy to customize and extend.\n#\n# Roughly speaking, the kernel that we will write will implement the following blocked\n# algorithm to multiply a (M, K) by a (K, N) matrix:\n#\n# .. code-block:: python\n#\n# # do in parallel\n# for m in range(0, M, BLOCK_SIZE_M):\n# # do in parallel\n# for n in range(0, N, BLOCK_SIZE_N):\n# acc = zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=float32)\n# for k in range(0, K, BLOCK_SIZE_K):\n# a = A[m : m+BLOCK_SIZE_M, k : k+BLOCK_SIZE_K]\n# b = B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]\n# acc += dot(a, b)\n# C[m : m+BLOCK_SIZE_M, n : n+BLOCK_SIZE_N] = acc;\n#\n# where each iteration of the doubly-nested for-loop is performed by a dedicated Triton program instance.\n\n# %%\n# Compute Kernel\n# ----------------\n#\n# The above algorithm is, actually, fairly straightforward to implement in Triton.\n# The main difficulty comes from the computation of the memory locations at which blocks\n# of :code:`A` and :code:`B` must be read in the inner loop. For that, we need\n# multi-dimensional pointer arithmetics.\n#\n# Pointer Arithmetics\n# ~~~~~~~~~~~~~~~~~~~~\n#\n# For a row-major 2D tensor :code:`X`, the memory location of :code:`X[i, j]` is given b\n# y :code:`&X[i, j] = X + i*stride_xi + j*stride_xj`.\n# Therefore, blocks of pointers for :code:`A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K]` and\n# :code:`B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]` can be defined in pseudo-code as:\n#\n# .. code-block:: python\n#\n# &A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K] = a_ptr + (m : m+BLOCK_SIZE_M)[:, None]*A.stride(0) + (k : k+BLOCK_SIZE_K)[None, :]*A.stride(1);\n# &B[k : k+BLOCK_SIZE_K, n:n+BLOCK_SIZE_N] = b_ptr + (k : k+BLOCK_SIZE_K)[:, None]*B.stride(0) + (n : n+BLOCK_SIZE_N)[None, :]*B.stride(1);\n#\n# Which means that pointers for blocks of A and B can be initialized (i.e., :code:`k=0`) in Triton as:\n#\n# .. code-block:: python\n#\n# offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n# offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n# offs_k = tl.arange(0, BLOCK_SIZE_K)\n# a_ptrs = a_ptr + (offs_am[:, None]*stride_am + offs_k [None, :]*stride_ak)\n# b_ptrs = b_ptr + (offs_k [:, None]*stride_bk + offs_bn[None, :]*stride_bn)\n#\n# And then updated in the inner loop as follows:\n#\n# .. code-block:: python\n#\n# pa += BLOCK_SIZE_K * stride_ak;\n# pb += BLOCK_SIZE_K * stride_bk;\n#\n#\n# L2 Cache Optimizations\n# ~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# As mentioned above, each program instance computes a :code:`[BLOCK_SIZE_M, BLOCK_SIZE_N]`\n# block of :code:`C`.\n# It is important to remember that the order in which these blocks are computed does\n# matter, since it affects the L2 cache hit rate of our program. and unfortunately, a\n# a simple row-major ordering\n#\n# .. code-block:: Python\n#\n# pid = triton.program_id(0);\n# grid_m = (M + BLOCK_SIZE_M - 1) // BLOCK_SIZE_M;\n# grid_n = (N + BLOCK_SIZE_N - 1) // BLOCK_SIZE_N;\n# pid_m = pid / grid_n;\n# pid_n = pid % grid_n;\n#\n# is just not going to cut it.\n#\n# One possible solution is to launch blocks in an order that promotes data reuse.\n# This can be done by 'super-grouping' blocks in groups of :code:`GROUP_M` rows before\n# switching to the next column:\n#\n# .. code-block:: python\n#\n# # program ID\n# pid = tl.program_id(axis=0)\n# # number of program ids along the M axis\n# num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)\n# # number of programs ids along the N axis\n# num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)\n# # number of programs in group\n# num_pid_in_group = GROUP_SIZE_M * num_pid_n\n# # id of the group this program is in\n# group_id = pid // num_pid_in_group\n# # row-id of the first program in the group\n# first_pid_m = group_id * GROUP_SIZE_M\n# # if `num_pid_m` isn't divisible by `GROUP_SIZE_M`, the last group is smaller\n# group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)\n# # *within groups*, programs are ordered in a column-major order\n# # row-id of the program in the *launch grid*\n# pid_m = first_pid_m + (pid % group_size_m)\n# # col-id of the program in the *launch grid*\n# pid_n = (pid % num_pid_in_group) // group_size_m\n#\n# For example, in the following matmul where each matrix is 9 blocks by 9 blocks,\n# we can see that if we compute the output in row-major ordering, we need to load 90\n# blocks into SRAM to compute the first 9 output blocks, but if we do it in grouped\n# ordering, we only need to load 54 blocks.\n# .. image:: grouped_vs_row_major_ordering.png\n#\n# In practice, this can improve the performance of our matrix multiplication kernel by\n# more than 10\\% on some hardware architecture (e.g., 220 to 245 TFLOPS on A100).\n#\n\n# %%\n# Final Result\n# -------------\n#\n\nimport torch\n\nimport triton\nimport triton.language as tl\n\n# %\n# :code:`triton.jit`'ed functions can be auto-tuned by using the `triton.autotune`\n# decorator, which consumes:\n# - A list of :code:`triton.Config` objects that define different configurations of\n# meta-parameters (e.g., BLOCK_SIZE_M) and compilation options (e.g., num_warps) to try\n# - An autotuning *key* whose change in values will trigger evaluation of all the\n# provided configs\n\n\[email protected](\n configs=[\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n ],\n key=['M', 'N', 'K'],\n)\[email protected]\ndef matmul_kernel(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension. E.g. stride_am is how much to increase a_ptr\n # by to get the element one row down (A has M rows)\n stride_am, stride_ak,\n stride_bk, stride_bn,\n stride_cm, stride_cn,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE_M: tl.constexpr,\n ACTIVATION: tl.constexpr,\n):\n \"\"\"Kernel for computing the matmul C = A x B.\n A has shape (M, K), B has shape (K, N) and C has shape (M, N)\n \"\"\"\n # -----------------------------------------------------------\n # Map program ids `pid` to the block of C it should compute.\n # This is done in a grouped ordering to promote L2 data reuse\n # See above `L2 Cache Optimizations` section for details\n pid = tl.program_id(axis=0)\n num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)\n num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)\n num_pid_in_group = GROUP_SIZE_M * num_pid_n\n group_id = pid // num_pid_in_group\n first_pid_m = group_id * GROUP_SIZE_M\n group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)\n pid_m = first_pid_m + (pid % group_size_m)\n pid_n = (pid % num_pid_in_group) // group_size_m\n\n # ----------------------------------------------------------\n # Create pointers for the first blocks of A and B.\n # We will advance this pointer as we move in the K direction\n # and accumulate\n # a_ptrs is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers\n # b_ptrs is a block of [BLOCK_SIZE_K, BLOCK_SIZE_n] pointers\n # see above `Pointer Arithmetics` section for details\n offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n offs_k = tl.arange(0, BLOCK_SIZE_K)\n a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)\n b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)\n\n # -----------------------------------------------------------\n # Iterate to compute a block of the C matrix\n # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block\n # of fp32 values for higher accuracy.\n # `accumulator` will be converted back to fp16 after the loop\n accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)\n for k in range(0, K, BLOCK_SIZE_K):\n # Note that for simplicity, we don't apply a mask here.\n # This means that if K is not a multiple of BLOCK_SIZE_K,\n # this will access out-of-bounds memory and produce an\n # error or (worse!) incorrect results.\n a = tl.load(a_ptrs)\n b = tl.load(b_ptrs)\n # We accumulate along the K dimension\n accumulator += tl.dot(a, b)\n # Advance the ptrs to the next K block\n a_ptrs += BLOCK_SIZE_K * stride_ak\n b_ptrs += BLOCK_SIZE_K * stride_bk\n # you can fuse arbitrary activation functions here\n # while the accumulator is still in FP32!\n if ACTIVATION:\n accumulator = ACTIVATION(accumulator)\n c = accumulator.to(tl.float16)\n\n # -----------------------------------------------------------\n # Write back the block of the output matrix C\n offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]\n c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)\n tl.store(c_ptrs, c, mask=c_mask)\n\n\n# we can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`\[email protected]\ndef leaky_relu(x):\n return tl.where(x >= 0, x, 0.01 * x)\n\n\n# %%\n# We can now create a convenience wrapper function that only takes two input tensors\n# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel\n\n\ndef matmul(a, b, activation=None):\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n assert a.is_contiguous(), \"matrix A must be contiguous\"\n assert b.is_contiguous(), \"matrix B must be contiguous\"\n M, K = a.shape\n K, N = b.shape\n assert (\n K % 32 == 0\n ), \"We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_SIZE_K\"\n # allocates output\n c = torch.empty((M, N), device=a.device, dtype=a.dtype)\n # 1D launch kernel where each block gets its own program.\n grid = lambda META: (\n triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n matmul_kernel[grid](\n a, b, c,\n M, N, K,\n a.stride(0), a.stride(1),\n b.stride(0), b.stride(1),\n c.stride(0), c.stride(1),\n ACTIVATION=activation,\n )\n return c\n\n\n# %%\n# Unit Test\n# -----------\n#\n# We can test our custom matrix multiplication operation against a native torch implementation (i.e., cuBLAS)\n\ntorch.manual_seed(0)\na = torch.randn((512, 512), device='cuda', dtype=torch.float16)\nb = torch.randn((512, 512), device='cuda', dtype=torch.float16)\ntriton_output = matmul(a, b, activation=None)\ntorch_output = torch.matmul(a, b)\nprint(f\"triton_output={triton_output}\")\nprint(f\"torch_output={torch_output}\")\nif triton.testing.allclose(triton_output, torch_output):\n print(\"✅ Triton and Torch match\")\nelse:\n print(\"❌ Triton and Torch differ\")\n\n# %%\n# Benchmark\n# --------------\n#\n# Square Matrix Performance\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~\n# We can now compare the performance of our kernel against that of cuBLAS. Here we focus on square matrices, but feel free to arrange this script as you wish to benchmark any other matrix shape.\n\n\[email protected]_report(\n triton.testing.Benchmark(\n x_names=['M', 'N', 'K'], # argument names to use as an x-axis for the plot\n x_vals=[\n 128 * i for i in range(2, 33)\n ], # different possible values for `x_name`\n line_arg='provider', # argument name whose value corresponds to a different line in the plot\n # possible values for `line_arg``\n line_vals=['cublas', 'cublas + relu', 'triton', 'triton + relu'],\n # label name for the lines\n line_names=[\"cuBLAS\", \"cuBLAS (+ torch.nn.LeakyReLU)\", \"Triton\", \"Triton (+ LeakyReLU)\"],\n # line styles\n styles=[('green', '-'), ('green', '--'), ('blue', '-'), ('blue', '--')],\n ylabel=\"TFLOPS\", # label name for the y-axis\n plot_name=\"matmul-performance\", # name for the plot. Used also as a file name for saving the plot.\n args={},\n )\n)\ndef benchmark(M, N, K, provider):\n a = torch.randn((M, K), device='cuda', dtype=torch.float16)\n b = torch.randn((K, N), device='cuda', dtype=torch.float16)\n if provider == 'cublas':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b))\n if provider == 'triton':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b))\n if provider == 'cublas + relu':\n torch_relu = torch.nn.ReLU(inplace=True)\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: torch_relu(torch.matmul(a, b))\n )\n if provider == 'triton + relu':\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: matmul(a, b, activation=leaky_relu)\n )\n perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)\n return perf(ms), perf(max_ms), perf(min_ms)\n\n\nbenchmark.run(show_plots=True, print_data=True)\n", "import pytest\nimport torch\n\nimport triton\n\n\[email protected](\"M, N, dtype, mode\",\n [\n (M, N, dtype, mode) for M in [1024, 821]\n for N in [512, 857, 1871, 2089, 8573, 31000]\n for dtype in ['float16', 'float32']\n for mode in ['forward', 'backward']\n ]\n )\ndef test_op(M, N, dtype, mode):\n dtype = {'float16': torch.float16, 'float32': torch.float32}[dtype]\n # create inputs\n x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)\n idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')\n # forward pass\n tt_y = triton.ops.cross_entropy(x, idx)\n th_y = torch.nn.CrossEntropyLoss(reduction=\"none\")(x, idx)\n if mode == 'forward':\n triton.testing.assert_almost_equal(th_y, tt_y)\n # backward pass\n elif mode == 'backward':\n dy = torch.randn_like(tt_y)\n # triton backward\n tt_y.backward(dy)\n tt_dx = x.grad.clone()\n # torch backward\n x.grad.zero_()\n th_y.backward(dy)\n th_dx = x.grad.clone()\n triton.testing.assert_almost_equal(th_dx, tt_dx)\n" ]
[ [ "torch.empty", "torch.manual_seed", "torch.randn", "torch.matmul", "torch.nn.ReLU" ], [ "torch.randn_like", "torch.randn", "torch.nn.CrossEntropyLoss", "torch.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jasonleeinf/ParlAI
[ "1f7f6d5b7481195b0214e835bb5d782db768d71c" ]
[ "tests/test_torch_agent.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\nfrom parlai.core.agents import Agent\n\nfrom collections import deque\n\nSKIP_TESTS = False\ntry:\n from parlai.core.torch_agent import TorchAgent, Output\n import torch\nexcept ImportError:\n SKIP_TESTS = True\n\n\nclass MockDict(Agent):\n \"\"\"Mock Dictionary Agent which just implements indexing and txt2vec.\"\"\"\n\n null_token = '__null__'\n NULL_IDX = 0\n start_token = '__start__'\n BEG_IDX = 1001\n end_token = '__end__'\n END_IDX = 1002\n p1_token = '__p1__'\n P1_IDX = 2001\n p2_token = '__p2__'\n P2_IDX = 2002\n\n def __init__(self, opt, shared=None):\n \"\"\"Initialize idx for incremental indexing.\"\"\"\n self.idx = 0\n\n def __getitem__(self, key):\n \"\"\"Return index of special token or return the token.\"\"\"\n if key == self.null_token:\n return self.NULL_IDX\n elif key == self.start_token:\n return self.BEG_IDX\n elif key == self.end_token:\n return self.END_IDX\n elif key == self.p1_token:\n return self.P1_IDX\n elif key == self.p2_token:\n return self.P2_IDX\n else:\n self.idx += 1\n return self.idx\n\n def __setitem__(self, key, value):\n pass\n\n def add_cmdline_args(self, *args, **kwargs):\n pass\n\n def txt2vec(self, txt):\n \"\"\"Return index of special tokens or range from 1 for each token.\"\"\"\n self.idx = 0\n return [self[tok] for tok in txt.split()]\n\n\nclass TorchAgent(TorchAgent):\n \"\"\"Use MockDict instead of regular DictionaryAgent.\"\"\"\n\n @staticmethod\n def dictionary_class():\n \"\"\"Replace normal dictionary class with mock one.\"\"\"\n return MockDict\n\n def train_step(self, batch):\n \"\"\"Return confirmation of training.\"\"\"\n return Output([f'Training {i}!' for i in range(len(batch.text_vec))])\n\n def eval_step(self, batch):\n \"\"\"Return confirmation of evaluation.\"\"\"\n return Output([f'Evaluating {i}!' for i in range(len(batch.text_vec))])\n\n\ndef get_agent(**kwargs):\n \"\"\"Return opt-initialized agent.\n\n :param kwargs: any kwargs you want to set using parser.set_params(**kwargs)\n \"\"\"\n if 'no_cuda' not in kwargs:\n kwargs['no_cuda'] = True\n from parlai.core.params import ParlaiParser\n parser = ParlaiParser()\n TorchAgent.add_cmdline_args(parser)\n parser.set_params(**kwargs)\n opt = parser.parse_args(print_args=False)\n return TorchAgent(opt)\n\n\nclass TestTorchAgent(unittest.TestCase):\n \"\"\"Basic tests on the util functions in TorchAgent.\"\"\"\n\n def test_mock(self):\n \"\"\"Just make sure we can instantiate a mock agent.\"\"\"\n agent = get_agent()\n self.assertTrue(isinstance(agent.dict, MockDict))\n\n def test_share(self):\n \"\"\"Make sure share works and shares dictionary.\"\"\"\n agent = get_agent()\n shared = agent.share()\n self.assertTrue('dict' in shared)\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test__vectorize_text(self):\n \"\"\"Test _vectorize_text and its different options.\"\"\"\n agent = get_agent()\n text = \"I'm sorry, Dave\"\n\n # test add_start and add_end\n vec = agent._vectorize_text(text, add_start=False, add_end=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False)\n self.assertEqual(len(vec), 4)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2, 3])\n vec = agent._vectorize_text(text, add_start=False, add_end=True)\n self.assertEqual(len(vec), 4)\n self.assertEqual(vec.tolist(), [1, 2, 3, MockDict.END_IDX])\n vec = agent._vectorize_text(text, add_start=True, add_end=True)\n self.assertEqual(len(vec), 5)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2, 3,\n MockDict.END_IDX])\n\n # now do it again with truncation=3\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [2, 3, MockDict.END_IDX])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [2, 3, MockDict.END_IDX])\n\n # now do it again with truncation=2\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [2, 3])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [3, MockDict.END_IDX])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [3, MockDict.END_IDX])\n\n # now do it again with truncation=2, don't truncate_left\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [1, 2])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [1, 2])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1])\n\n # now do it again with truncation=3, don't truncate_left\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2])\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test__check_truncate(self):\n \"\"\"Make sure we are truncating when needed.\"\"\"\n agent = get_agent()\n inp = torch.LongTensor([1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, None).tolist(), [1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, 4).tolist(), [1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, 3).tolist(), [1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, 2).tolist(), [1, 2])\n self.assertEqual(agent._check_truncate(inp, 1).tolist(), [1])\n self.assertEqual(agent._check_truncate(inp, 0).tolist(), [])\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_vectorize(self):\n \"\"\"Test the vectorization of observations.\n\n Make sure they do not recompute results, and respect the different\n param options.\n \"\"\"\n agent = get_agent()\n obs_labs = {'text': 'No. Try not.', 'labels': ['Do.', 'Do not.']}\n obs_elabs = {'text': 'No. Try not.', 'eval_labels': ['Do.', 'Do not.']}\n\n for obs in (obs_labs, obs_elabs):\n lab_key = 'labels' if 'labels' in obs else 'eval_labels'\n lab_vec = lab_key + '_vec'\n lab_chc = lab_key + '_choice'\n\n inp = obs.copy()\n # test add_start=True, add_end=True\n agent.history.reset()\n agent.history.update_history(inp)\n out = agent.vectorize(inp, agent.history, add_start=True,\n add_end=True)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertEqual(out[lab_vec][1].item(), 1)\n self.assertEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test add_start=True, add_end=False\n inp = obs.copy()\n out = agent.vectorize(inp, agent.history, add_start=True,\n add_end=False)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertNotEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test add_start=False, add_end=True\n inp = obs.copy()\n out = agent.vectorize(inp, agent.history, add_start=False,\n add_end=True)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertNotEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test add_start=False, add_end=False\n inp = obs.copy()\n out = agent.vectorize(inp, agent.history, add_start=False,\n add_end=False)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertNotEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertNotEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test caching of tensors\n out_again = agent.vectorize(out, agent.history)\n # should have cached result from before\n self.assertIs(out['text_vec'], out_again['text_vec'])\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # next: should truncate cached result\n prev_vec = out['text_vec']\n out_again = agent.vectorize(out, agent.history,\n text_truncate=1)\n self.assertIsNot(prev_vec, out_again['text_vec'])\n self.assertEqual(out['text_vec'].tolist(), [3])\n\n # test split_lines\n agent = get_agent(split_lines=True)\n obs = {\n 'text': 'Hello.\\nMy name is Inogo Montoya.\\n'\n 'You killed my father.\\nPrepare to die.',\n }\n agent.history.update_history(obs)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(vecs,\n [[1], [1, 2, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3]])\n\n # check cache\n out_again = agent.vectorize(obs, agent.history)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(vecs,\n [[1], [1, 2, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3]])\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_batchify(self):\n \"\"\"Make sure the batchify function sets up the right fields.\"\"\"\n agent = get_agent(rank_candidates=True)\n obs_labs = [\n {'text': 'It\\'s only a flesh wound.',\n 'labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'labels': ['General Kenobi.']},\n ]\n obs_elabs = [\n {'text': 'It\\'s only a flesh wound.',\n 'eval_labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'eval_labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'eval_labels': ['General Kenobi.']},\n ]\n for obs_batch in (obs_labs, obs_elabs):\n lab_key = 'labels' if 'labels' in obs_batch[0] else 'eval_labels'\n\n # nothing has been vectorized yet so should be empty\n batch = agent.batchify(obs_batch)\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNone(batch.label_vec)\n self.assertIsNone(batch.label_lengths)\n self.assertIsNone(batch.labels)\n self.assertIsNone(batch.valid_indices)\n self.assertIsNone(batch.candidates)\n self.assertIsNone(batch.candidate_vecs)\n self.assertIsNone(batch.image)\n\n obs_vecs = []\n for o in obs_batch:\n agent.history.reset()\n agent.history.update_history(o)\n obs_vecs.append(agent.vectorize(o, agent.history,\n add_start=False, add_end=False))\n\n # is_valid should map to nothing\n def is_valid(obs):\n return False\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_batch)\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNone(batch.label_vec)\n self.assertIsNone(batch.label_lengths)\n self.assertIsNone(batch.labels)\n self.assertIsNone(batch.valid_indices)\n self.assertIsNone(batch.candidates)\n self.assertIsNone(batch.candidate_vecs)\n self.assertIsNone(batch.image)\n\n # is_valid should check for text_vec\n def is_valid(obs):\n return 'text_vec' in obs\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_vecs)\n # which fields were filled vs should be empty?\n self.assertIsNotNone(batch.text_vec)\n self.assertIsNotNone(batch.text_lengths)\n self.assertIsNotNone(batch.label_vec)\n self.assertIsNotNone(batch.label_lengths)\n self.assertIsNotNone(batch.labels)\n self.assertIsNotNone(batch.valid_indices)\n self.assertIsNone(batch.candidates)\n self.assertIsNone(batch.candidate_vecs)\n self.assertIsNone(batch.image)\n\n # contents of certain fields:\n self.assertEqual(batch.text_vec.tolist(),\n [[1, 2, 3, 4, 5, 0],\n [1, 2, 3, 4, 5, 6],\n [1, 2, 0, 0, 0, 0]])\n self.assertEqual(batch.text_lengths, [5, 6, 2])\n self.assertEqual(batch.label_vec.tolist(),\n [[1, 0, 0, 0, 0],\n [1, 2, 3, 4, 5],\n [1, 2, 0, 0, 0]])\n self.assertEqual(batch.label_lengths, [1, 5, 2])\n self.assertEqual(batch.labels, [o[lab_key][0] for o in obs_batch])\n self.assertEqual(list(batch.valid_indices), [0, 1, 2])\n\n # now sort the batch, make sure fields are in sorted order\n batch = agent.batchify(obs_vecs, sort=True)\n self.assertEqual(batch.text_vec.tolist(),\n [[1, 2, 3, 4, 5, 6],\n [1, 2, 3, 4, 5, 0],\n [1, 2, 0, 0, 0, 0]])\n self.assertEqual(batch.text_lengths, [6, 5, 2])\n self.assertEqual(batch.label_vec.tolist(),\n [[1, 2, 3, 4, 5],\n [1, 0, 0, 0, 0],\n [1, 2, 0, 0, 0]])\n self.assertEqual(batch.label_lengths, [5, 1, 2])\n labs = [o[lab_key][0] for o in obs_batch]\n self.assertEqual(batch.labels, [labs[i] for i in [1, 0, 2]])\n self.assertEqual(list(batch.valid_indices), [1, 0, 2])\n\n # now sort just on ys\n new_vecs = [vecs.copy() for vecs in obs_vecs]\n for vec in new_vecs:\n vec.pop('text')\n vec.pop('text_vec')\n\n def is_valid(obs):\n return 'labels_vec' in obs or 'eval_labels_vec' in obs\n agent.is_valid = is_valid\n\n batch = agent.batchify(new_vecs, sort=True)\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNotNone(batch.label_vec)\n self.assertIsNotNone(batch.label_lengths)\n self.assertEqual(batch.label_vec.tolist(),\n [[1, 2, 3, 4, 5],\n [1, 2, 0, 0, 0],\n [1, 0, 0, 0, 0]])\n self.assertEqual(batch.label_lengths, [5, 2, 1])\n labs = [o[lab_key][0] for o in new_vecs]\n self.assertEqual(batch.labels, [labs[i] for i in [1, 2, 0]])\n self.assertEqual(list(batch.valid_indices), [1, 2, 0])\n\n # test is_valid\n def is_valid(obs):\n return 'text_vec' in obs and len(obs['text_vec']) < 3\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_vecs)\n self.assertEqual(batch.text_vec.tolist(), [[1, 2]])\n self.assertEqual(batch.text_lengths, [2])\n self.assertEqual(batch.label_vec.tolist(), [[1, 2]])\n self.assertEqual(batch.label_lengths, [2])\n self.assertEqual(batch.labels, obs_batch[2][lab_key])\n self.assertEqual(list(batch.valid_indices), [2])\n\n agent.history.reset()\n obs_cands = [\n agent.vectorize({'label_candidates': ['A', 'B', 'C']},\n agent.history),\n agent.vectorize({'label_candidates': ['1', '2', '5', '3', 'Sir']},\n agent.history),\n agent.vectorize({'label_candidates': ['Do', 'Re', 'Mi']},\n agent.history),\n agent.vectorize({'label_candidates': ['Fa', 'So', 'La', 'Ti']},\n agent.history),\n ]\n\n # is_valid should check for label candidates vecs\n def is_valid(obs):\n return 'label_candidates_vecs' in obs\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_cands)\n self.assertTrue(agent.rank_candidates, 'Agent not set up to rank.')\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNone(batch.label_vec)\n self.assertIsNone(batch.label_lengths)\n self.assertIsNone(batch.labels)\n self.assertIsNotNone(batch.valid_indices)\n self.assertIsNotNone(batch.candidates)\n self.assertIsNotNone(batch.candidate_vecs)\n self.assertEqual(list(batch.valid_indices), [0, 1, 2, 3])\n self.assertEqual(batch.candidates,\n [o['label_candidates'] for o in obs_cands])\n self.assertEqual(len(batch.candidate_vecs), len(obs_cands))\n for i, cs in enumerate(batch.candidate_vecs):\n self.assertEqual(len(cs), len(obs_cands[i]['label_candidates']))\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_match_batch(self):\n \"\"\"Make sure predictions are correctly aligned when available.\"\"\"\n agent = get_agent()\n\n # first try empty outputs\n reply = agent.match_batch([{}, {}, {}], [0, 1, 2], Output())\n self.assertEqual([{}, {}, {}], reply)\n reply = agent.match_batch([{}, {}, {}], [0, 1, 2], None)\n self.assertEqual([{}, {}, {}], reply)\n\n # try text in order\n reply = agent.match_batch([{}, {}, {}], [0, 1, 2],\n Output(['E.T.', 'Phone', 'Home']))\n self.assertEqual(\n [{'text': 'E.T.'}, {'text': 'Phone'}, {'text': 'Home'}], reply)\n\n # try text out of order\n reply = agent.match_batch([{}, {}, {}], [2, 0, 1],\n Output(['Home', 'E.T.', 'Phone']))\n self.assertEqual(\n [{'text': 'E.T.'}, {'text': 'Phone'}, {'text': 'Home'}], reply)\n\n # try text_candidates in order\n reply = agent.match_batch([{}, {}], [0, 1],\n Output(None, [['More human than human.',\n 'Less human than human'],\n ['Just walk into Mordor',\n 'Just QWOP into Mordor.']]))\n self.assertEqual(reply[0]['text_candidates'],\n ['More human than human.', 'Less human than human'])\n self.assertEqual(reply[1]['text_candidates'],\n ['Just walk into Mordor', 'Just QWOP into Mordor.'])\n # try text_candidates out of order\n reply = agent.match_batch([{}, {}], [1, 0],\n Output(None, [['More human than human.',\n 'Less human than human'],\n ['Just walk into Mordor',\n 'Just QWOP into Mordor.']]))\n self.assertEqual(reply[0]['text_candidates'],\n ['Just walk into Mordor', 'Just QWOP into Mordor.'])\n self.assertEqual(reply[1]['text_candidates'],\n ['More human than human.', 'Less human than human'])\n\n # try both text and text_candidates in order\n reply = agent.match_batch(\n [{}, {}], [0, 1],\n Output(['You shall be avenged...', 'Man creates dinosaurs...'],\n [['By Grabthar’s hammer.', 'By the suns of Worvan.'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.']]))\n self.assertEqual(reply[0]['text'], 'You shall be avenged...')\n self.assertEqual(reply[0]['text_candidates'],\n ['By Grabthar’s hammer.', 'By the suns of Worvan.'])\n self.assertEqual(reply[1]['text'], 'Man creates dinosaurs...')\n self.assertEqual(reply[1]['text_candidates'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.'])\n\n # try both text and text_candidates out of order\n reply = agent.match_batch(\n [{}, {}], [1, 0],\n Output(['You shall be avenged...', 'Man creates dinosaurs...'],\n [['By Grabthar’s hammer.', 'By the suns of Worvan.'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.']]))\n self.assertEqual(reply[0]['text'], 'Man creates dinosaurs...')\n self.assertEqual(reply[0]['text_candidates'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.'])\n self.assertEqual(reply[1]['text'], 'You shall be avenged...')\n self.assertEqual(reply[1]['text_candidates'],\n ['By Grabthar’s hammer.', 'By the suns of Worvan.'])\n\n def test__add_person_tokens(self):\n \"\"\"Make sure person tokens are added to the write place in text.\"\"\"\n agent = get_agent()\n text = (\n \"I've seen things you people wouldn't believe.\\n\"\n \"Attack ships on fire off the shoulder of Orion.\\n\"\n \"I watched C-beams glitter in the dark near the Tannhauser gate.\\n\"\n \"All those moments will be lost in time, like tears in rain.\")\n prefix = 'PRE'\n out = agent.history._add_person_tokens(text, prefix, add_after_newln=False)\n self.assertEqual(out, prefix + ' ' + text)\n out = agent.history._add_person_tokens(text, prefix, add_after_newln=True)\n idx = text.rfind('\\n') + 1\n self.assertEqual(out, text[:idx] + prefix + ' ' + text[idx:])\n\n def test_history(self):\n \"\"\"Test different dialog history settings.\"\"\"\n # try with unlimited history\n agent = get_agent(history_size=-1)\n obs = {'text': 'I am Groot.', 'labels': ['I am Groot?'],\n 'episode_done': False}\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange, no reply\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.\\nI am Groot.')\n\n # include reply and set episode_done to clear history after this one\n end_obs = obs.copy()\n end_obs['episode_done'] = True\n agent.history.update_history(end_obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text,\n 'I am Groot.\\nI am Groot.\\nI am Groot?\\nI am Groot.')\n\n # because of episode_done, should be same as first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # now try with history size = 1\n agent = get_agent(history_size=1)\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange should change nothing\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # third exchange with reply should change nothing\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # now try with history size = 2\n agent = get_agent(history_size=2)\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange with reply should contain reply\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot?\\nI am Groot.')\n\n # third exchange without reply should have two inputs\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.\\nI am Groot.')\n\n # now try with history size = 3\n agent = get_agent(history_size=3)\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange with reply should contain reply and input\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.\\nI am Groot?\\nI am Groot.')\n\n # now test add_person_tokens\n agent = get_agent(history_size=3, person_tokens=True)\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, f'{agent.P1_TOKEN} I am Groot.')\n\n # second exchange, history should still contain the tokens\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text,\n f'{agent.P1_TOKEN} I am Groot.\\n'\n f'{agent.P2_TOKEN} I am Groot?\\n'\n f'{agent.P1_TOKEN} I am Groot.')\n\n # now add add_p1_after_newln\n agent = get_agent(history_size=3, person_tokens=True,\n add_p1_after_newln=True)\n ctx_obs = obs.copy() # context then utterance in this text field\n ctx_obs['text'] = 'Groot is Groot.\\nI am Groot.'\n agent.history.update_history(ctx_obs)\n text = agent.history.get_history_str()\n self.assertEqual(text,\n f'Groot is Groot.\\n{agent.P1_TOKEN} I am Groot.')\n\n # second exchange, history should still contain context text\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text,\n 'Groot is Groot.\\n'\n f'{agent.P1_TOKEN} I am Groot.\\n'\n f'{agent.P2_TOKEN} I am Groot?\\n'\n f'{agent.P1_TOKEN} I am Groot.')\n\n # test history vecs\n agent.history.reset()\n agent.history.update_history(obs)\n vec = agent.history.get_history_vec()\n self.assertEqual(\n vec,\n deque([2001, 1, 2, 3])\n )\n\n # test history vec list\n agent.history.update_history(obs)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(\n vecs,\n [[2001, 1, 2, 3], [2001, 1, 2, 3]]\n )\n\n # test clearing history\n agent.history.reset()\n text = agent.history.get_history_str()\n self.assertIsNone(text)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(\n vecs,\n []\n )\n\n # test delimiter\n agent = get_agent(\n history_size=-1,\n delimiter=' Groot! ',\n )\n agent.history.update_history(obs)\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(\n text,\n 'I am Groot. Groot! I am Groot.'\n )\n\n def test_last_reply(self):\n \"\"\"Make sure last reply returns expected values.\"\"\"\n agent = get_agent()\n # nothing to retrieve\n self.assertIsNone(agent.last_reply())\n # set agent's generated replies\n agent.replies = {\n 'batch_reply': [{'text': 'It\\'s okay! I\\'m a leaf on the wind.'}]\n }\n # If the observation was previously an episode end, we shouldn't have any\n # older reply\n self.assertEqual(agent.last_reply(), None)\n # now agent should remember what it said\n agent.observation = {'episode_done': False}\n self.assertEqual(agent.last_reply(),\n 'It\\'s okay! I\\'m a leaf on the wind.')\n # now set true observation\n agent.observation = {\n 'text': 'Will that work?',\n 'labels': ['I\\'m a leaf on the wind. Watch how I soar.'],\n 'episode_done': False,\n }\n # now agent should remember true label\n self.assertEqual(agent.last_reply(),\n 'I\\'m a leaf on the wind. Watch how I soar.')\n # but not if we tell it not to\n self.assertEqual(agent.last_reply(use_label=False),\n 'It\\'s okay! I\\'m a leaf on the wind.')\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_observe(self):\n \"\"\"Make sure agent stores and returns observation.\"\"\"\n agent = get_agent()\n obs = {\n 'text': 'I\\'ll be back.',\n 'labels': ['I\\'m back.'],\n 'episode_done': True\n }\n out = agent.observe(obs.copy())\n self.assertIsNotNone(out)\n self.assertIsNotNone(agent.observation)\n self.assertEqual(out['text'], 'I\\'ll be back.')\n # episode was done so shouldn't remember history\n out = agent.observe(obs.copy())\n self.assertEqual(out['text'], 'I\\'ll be back.')\n self.assertTrue('text_vec' in out, 'Text should be vectorized.')\n\n # now try with episode not done\n obs['episode_done'] = False\n out = agent.observe(obs.copy())\n self.assertIsNotNone(out)\n self.assertIsNotNone(agent.observation)\n self.assertEqual(out['text'], 'I\\'ll be back.')\n # should remember history\n out = agent.observe(obs.copy())\n self.assertEqual(out['text'],\n 'I\\'ll be back.\\nI\\'m back.\\nI\\'ll be back.')\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_batch_act(self):\n \"\"\"Make sure batch act calls the right step.\"\"\"\n agent = get_agent()\n\n obs_labs = [\n {'text': 'It\\'s only a flesh wound.',\n 'labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'labels': ['General Kenobi.']},\n ]\n obs_labs_vecs = []\n for o in obs_labs:\n agent.history.reset()\n agent.history.update_history(o)\n obs_labs_vecs.append(agent.vectorize(o, agent.history))\n reply = agent.batch_act(obs_labs_vecs)\n for i in range(len(obs_labs_vecs)):\n self.assertEqual(reply[i]['text'], f'Training {i}!')\n\n obs_elabs = [\n {'text': 'It\\'s only a flesh wound.',\n 'eval_labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'eval_labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'eval_labels': ['General Kenobi.']},\n ]\n obs_elabs_vecs = []\n for o in obs_elabs:\n agent.history.reset()\n agent.history.update_history(o)\n obs_elabs_vecs.append(agent.vectorize(o, agent.history))\n reply = agent.batch_act(obs_elabs_vecs)\n for i in range(len(obs_elabs_vecs)):\n self.assertEqual(reply[i]['text'], f'Evaluating {i}!')\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.LongTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CeasarLee/ncnn
[ "178825d14a16c4059820d9f054a8d857df671027", "178825d14a16c4059820d9f054a8d857df671027", "178825d14a16c4059820d9f054a8d857df671027", "178825d14a16c4059820d9f054a8d857df671027", "178825d14a16c4059820d9f054a8d857df671027" ]
[ "tools/pnnx/tests/test_F_avg_pool1d.py", "tools/pnnx/tests/test_F_tanh.py", "tools/pnnx/tests/ncnn/test_F_conv_transpose2d.py", "tools/pnnx/tests/test_nn_Conv2d.py", "tools/pnnx/tests/ncnn/test_F_hardtanh.py" ]
[ "# Tencent is pleased to support the open source community by making ncnn available.\r\n#\r\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\r\n#\r\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\r\n# in compliance with the License. You may obtain a copy of the License at\r\n#\r\n# https://opensource.org/licenses/BSD-3-Clause\r\n#\r\n# Unless required by applicable law or agreed to in writing, software distributed\r\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\r\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations under the License.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n\r\n def forward(self, x):\r\n x = F.avg_pool1d(x, kernel_size=3)\r\n x = F.avg_pool1d(x, kernel_size=4, stride=2, padding=2)\r\n x = F.avg_pool1d(x, kernel_size=3, stride=1, padding=(0), ceil_mode=False, count_include_pad=True)\r\n x = F.avg_pool1d(x, kernel_size=5, stride=2, padding=(2), ceil_mode=True, count_include_pad=False)\r\n x = F.avg_pool1d(x, kernel_size=3, stride=2, padding=1, ceil_mode=False, count_include_pad=True)\r\n x = F.avg_pool1d(x, kernel_size=2, stride=1, padding=0, ceil_mode=True, count_include_pad=True)\r\n x = F.avg_pool1d(x, kernel_size=4, stride=1, padding=2, ceil_mode=False, count_include_pad=False)\r\n return x\r\n\r\ndef test():\r\n net = Model()\r\n net.eval()\r\n\r\n torch.manual_seed(0)\r\n x = torch.rand(1, 12, 128)\r\n\r\n a = net(x)\r\n\r\n # export torchscript\r\n mod = torch.jit.trace(net, x)\r\n mod.save(\"test_F_avg_pool1d.pt\")\r\n\r\n # torchscript to pnnx\r\n import os\r\n os.system(\"../src/pnnx test_F_avg_pool1d.pt inputshape=[1,12,128]\")\r\n\r\n # pnnx inference\r\n import test_F_avg_pool1d_pnnx\r\n b = test_F_avg_pool1d_pnnx.test_inference()\r\n\r\n return torch.equal(a, b)\r\n\r\nif __name__ == \"__main__\":\r\n if test():\r\n exit(0)\r\n else:\r\n exit(1)\r\n", "# Tencent is pleased to support the open source community by making ncnn available.\r\n#\r\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\r\n#\r\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\r\n# in compliance with the License. You may obtain a copy of the License at\r\n#\r\n# https://opensource.org/licenses/BSD-3-Clause\r\n#\r\n# Unless required by applicable law or agreed to in writing, software distributed\r\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\r\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations under the License.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n\r\n def forward(self, x, y, z, w):\r\n x = F.tanh(x)\r\n y = F.tanh(y)\r\n z = F.tanh(z)\r\n w = F.tanh(w)\r\n return x, y, z, w\r\n\r\ndef test():\r\n net = Model()\r\n net.eval()\r\n\r\n torch.manual_seed(0)\r\n x = torch.rand(1, 16)\r\n y = torch.rand(12, 2, 16)\r\n z = torch.rand(1, 3, 12, 16)\r\n w = torch.rand(1, 5, 7, 9, 11)\r\n\r\n a0, a1, a2, a3 = net(x, y, z, w)\r\n\r\n # export torchscript\r\n mod = torch.jit.trace(net, (x, y, z, w))\r\n mod.save(\"test_F_tanh.pt\")\r\n\r\n # torchscript to pnnx\r\n import os\r\n os.system(\"../src/pnnx test_F_tanh.pt inputshape=[1,16],[12,2,16],[1,3,12,16],[1,5,7,9,11]\")\r\n\r\n # pnnx inference\r\n import test_F_tanh_pnnx\r\n b0, b1, b2, b3 = test_F_tanh_pnnx.test_inference()\r\n\r\n return torch.equal(a0, b0) and torch.equal(a1, b1) and torch.equal(a2, b2) and torch.equal(a3, b3)\r\n\r\nif __name__ == \"__main__\":\r\n if test():\r\n exit(0)\r\n else:\r\n exit(1)\r\n", "# Tencent is pleased to support the open source community by making ncnn available.\r\n#\r\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\r\n#\r\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\r\n# in compliance with the License. You may obtain a copy of the License at\r\n#\r\n# https://opensource.org/licenses/BSD-3-Clause\r\n#\r\n# Unless required by applicable law or agreed to in writing, software distributed\r\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\r\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations under the License.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n\r\n self.w2 = nn.Parameter(torch.rand(6, 12, 4, 4))\r\n self.b2 = nn.Parameter(torch.rand(12))\r\n self.w3 = nn.Parameter(torch.rand(12, 2, 3, 3))\r\n\r\n def forward(self, y):\r\n y = F.conv_transpose2d(y, self.w2, self.b2, stride=(2,2), padding=(1,1), output_padding=(1,1))\r\n y = F.conv_transpose2d(y, self.w3, None, stride=(1,2), padding=(2,1), dilation=(2,1), groups=3)\r\n return y\r\n\r\ndef test():\r\n net = Model()\r\n net.eval()\r\n\r\n torch.manual_seed(0)\r\n y = torch.rand(1, 6, 5, 6)\r\n\r\n a = net(y)\r\n\r\n # export torchscript\r\n mod = torch.jit.trace(net, y)\r\n mod.save(\"test_F_conv_transpose2d.pt\")\r\n\r\n # torchscript to pnnx\r\n import os\r\n os.system(\"../../src/pnnx test_F_conv_transpose2d.pt inputshape=[1,6,5,6]\")\r\n\r\n # ncnn inference\r\n import test_F_conv_transpose2d_ncnn\r\n b = test_F_conv_transpose2d_ncnn.test_inference()\r\n\r\n return torch.allclose(a, b, 1e-4, 1e-4)\r\n\r\nif __name__ == \"__main__\":\r\n if test():\r\n exit(0)\r\n else:\r\n exit(1)\r\n", "# Tencent is pleased to support the open source community by making ncnn available.\r\n#\r\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\r\n#\r\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\r\n# in compliance with the License. You may obtain a copy of the License at\r\n#\r\n# https://opensource.org/licenses/BSD-3-Clause\r\n#\r\n# Unless required by applicable law or agreed to in writing, software distributed\r\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\r\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations under the License.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n\r\n self.conv_0 = nn.Conv2d(in_channels=12, out_channels=16, kernel_size=3)\r\n self.conv_1 = nn.Conv2d(in_channels=16, out_channels=20, kernel_size=(2,4), stride=(2,1), padding=2, dilation=1)\r\n self.conv_2 = nn.Conv2d(in_channels=20, out_channels=24, kernel_size=(1,3), stride=1, padding=(2,4), dilation=1, groups=1, bias=False)\r\n if torch.__version__ < '1.9':\r\n self.conv_3 = nn.Conv2d(in_channels=24, out_channels=28, kernel_size=(5,4), stride=1, padding=0, dilation=1, groups=4, bias=True)\r\n self.conv_4 = nn.Conv2d(in_channels=28, out_channels=32, kernel_size=3, stride=1, padding=1, dilation=(1,2), groups=2, bias=False, padding_mode='zeros')\r\n else:\r\n self.conv_3 = nn.Conv2d(in_channels=24, out_channels=28, kernel_size=(5,4), stride=1, padding='valid', dilation=1, groups=4, bias=True)\r\n self.conv_4 = nn.Conv2d(in_channels=28, out_channels=32, kernel_size=3, stride=1, padding='same', dilation=(1,2), groups=2, bias=False, padding_mode='zeros')\r\n self.conv_5 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=2, stride=2, padding=3, dilation=1, groups=32, bias=True, padding_mode='reflect')\r\n self.conv_6 = nn.Conv2d(in_channels=32, out_channels=28, kernel_size=2, stride=1, padding=2, dilation=1, groups=1, bias=False, padding_mode='replicate')\r\n #self.conv_7 = nn.Conv2d(in_channels=28, out_channels=24, kernel_size=3, stride=2, padding=(5,6), dilation=2, groups=1, bias=True, padding_mode='circular')\r\n\r\n def forward(self, x):\r\n x = self.conv_0(x)\r\n x = self.conv_1(x)\r\n x = self.conv_2(x)\r\n x = self.conv_3(x)\r\n x = self.conv_4(x)\r\n x = self.conv_5(x)\r\n x = self.conv_6(x)\r\n #x = self.conv_7(x)\r\n\r\n return x\r\n\r\ndef test():\r\n net = Model()\r\n net.eval()\r\n\r\n torch.manual_seed(0)\r\n x = torch.rand(1, 12, 64, 64)\r\n\r\n a = net(x)\r\n\r\n # export torchscript\r\n mod = torch.jit.trace(net, x)\r\n mod.save(\"test_nn_Conv2d.pt\")\r\n\r\n # torchscript to pnnx\r\n import os\r\n os.system(\"../src/pnnx test_nn_Conv2d.pt inputshape=[1,12,64,64]\")\r\n\r\n # pnnx inference\r\n import test_nn_Conv2d_pnnx\r\n b = test_nn_Conv2d_pnnx.test_inference()\r\n\r\n return torch.equal(a, b)\r\n\r\nif __name__ == \"__main__\":\r\n if test():\r\n exit(0)\r\n else:\r\n exit(1)\r\n", "# Tencent is pleased to support the open source community by making ncnn available.\r\n#\r\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\r\n#\r\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\r\n# in compliance with the License. You may obtain a copy of the License at\r\n#\r\n# https://opensource.org/licenses/BSD-3-Clause\r\n#\r\n# Unless required by applicable law or agreed to in writing, software distributed\r\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\r\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations under the License.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n\r\n def forward(self, x, y, z):\r\n x = F.hardtanh(x)\r\n y = F.hardtanh(y, -1, 1)\r\n z = F.hardtanh(z, -0.1, 0.1)\r\n return x, y, z\r\n\r\ndef test():\r\n net = Model()\r\n net.eval()\r\n\r\n torch.manual_seed(0)\r\n x = torch.rand(16)\r\n y = torch.rand(2, 16)\r\n z = torch.rand(3, 12, 16)\r\n\r\n a = net(x, y, z)\r\n\r\n # export torchscript\r\n mod = torch.jit.trace(net, (x, y, z))\r\n mod.save(\"test_F_hardtanh.pt\")\r\n\r\n # torchscript to pnnx\r\n import os\r\n os.system(\"../../src/pnnx test_F_hardtanh.pt inputshape=[16],[2,16],[3,12,16]\")\r\n\r\n # ncnn inference\r\n import test_F_hardtanh_ncnn\r\n b = test_F_hardtanh_ncnn.test_inference()\r\n\r\n for a0, b0 in zip(a, b):\r\n if not torch.allclose(a0, b0, 1e-4, 1e-4):\r\n return False\r\n return True\r\n\r\nif __name__ == \"__main__\":\r\n if test():\r\n exit(0)\r\n else:\r\n exit(1)\r\n" ]
[ [ "torch.jit.trace", "torch.manual_seed", "torch.equal", "torch.rand", "torch.nn.functional.avg_pool1d" ], [ "torch.jit.trace", "torch.manual_seed", "torch.equal", "torch.rand", "torch.nn.functional.tanh" ], [ "torch.nn.functional.conv_transpose2d", "torch.jit.trace", "torch.manual_seed", "torch.rand", "torch.allclose" ], [ "torch.jit.trace", "torch.manual_seed", "torch.nn.Conv2d", "torch.equal", "torch.rand" ], [ "torch.jit.trace", "torch.manual_seed", "torch.nn.functional.hardtanh", "torch.rand", "torch.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rmit-ir/al-ef
[ "abffe57ae171cd846ca29b5e1b5a9f337c948912" ]
[ "code/density.py" ]
[ "#!/usr/bin/env python\nimport numpy\n\ndef jaccquard_similarity(a, b):\n if len(b) == 0 or len(a) == 0: return 0.0\n return len(set(a).intersection(b))*1./len(set(a).union(set(b)))\n\ndef similarityMatrix(features):\n a = numpy.zeros((len(features), len(features)),\n dtype=numpy.float)\n ids = list(features.keys())\n id2row = {}\n for i, idi in enumerate(ids):\n id2row[idi] = i\n for j, idj in enumerate(ids):\n if i == j:\n a[i, j] = 1\n break\n a[i, j] = jaccquard_similarity(features[idi][0].keys(),\n features[idj][0].keys())\n a[j, i] = a[i, j]\n return a, id2row\n\ndef density(matrix, row):\n return numpy.mean(matrix[row,:])\n\ndef k_density(matrix, row, k=5):\n r = matrix[row,:]\n return numpy.mean(numpy.sort(r[1:k+1])[::-1])\n\ndef margin_density(distance, matrix, row):\n return (1-density(matrix, row)*(1-distance))\n\ndef margin_k_density(distance, matrix, row, k=5):\n return (1-k_density(matrix, row, k)*(1-distance))\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.mean", "numpy.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adeptflax/image2image
[ "8c7c531176d261789f90a27125b31d6241bc9c27" ]
[ "taming/modules/transformer/mingpt.py" ]
[ "\"\"\"\ntaken from: https://github.com/karpathy/minGPT/\nGPT model:\n- the initial stem consists of a combination of token encoding and a positional encoding\n- the meat of it is a uniform sequence of Transformer blocks\n - each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block\n - all blocks feed into a central residual pathway similar to resnets\n- the final decoder is a linear projection into a vanilla Softmax classifier\n\"\"\"\n\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nlogger = logging.getLogger(__name__)\n\n\nclass GPTConfig:\n \"\"\" base GPT config, params common to all GPT versions \"\"\"\n embd_pdrop = 0.1\n resid_pdrop = 0.1\n attn_pdrop = 0.1\n\n def __init__(self, vocab_size, block_size, **kwargs):\n self.vocab_size = vocab_size\n self.block_size = block_size\n for k,v in kwargs.items():\n setattr(self, k, v)\n\n\nclass GPT1Config(GPTConfig):\n \"\"\" GPT-1 like network roughly 125M params \"\"\"\n n_layer = 12\n n_head = 12\n n_embd = 768\n\n\nclass GPT2Config(GPTConfig):\n \"\"\" GPT-2 like network roughly 1.5B params \"\"\"\n # TODO\n\n\nclass CausalSelfAttention(nn.Module):\n \"\"\"\n A vanilla multi-head masked self-attention layer with a projection at the end.\n It is possible to use torch.nn.MultiheadAttention here but I am including an\n explicit implementation here to show that there is nothing too scary here.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n assert config.n_embd % config.n_head == 0\n # key, query, value projections for all heads\n self.key = nn.Linear(config.n_embd, config.n_embd)\n self.query = nn.Linear(config.n_embd, config.n_embd)\n self.value = nn.Linear(config.n_embd, config.n_embd)\n # regularization\n self.attn_drop = nn.Dropout(config.attn_pdrop)\n self.resid_drop = nn.Dropout(config.resid_pdrop)\n # output projection\n self.proj = nn.Linear(config.n_embd, config.n_embd)\n # causal mask to ensure that attention is only applied to the left in the input sequence\n mask = torch.tril(torch.ones(config.block_size,\n config.block_size))\n if hasattr(config, \"n_unmasked\"):\n mask[:config.n_unmasked, :config.n_unmasked] = 1\n self.register_buffer(\"mask\", mask.view(1, 1, config.block_size, config.block_size))\n self.n_head = config.n_head\n\n def forward(self, x, layer_past=None):\n B, T, C = x.size()\n\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))\n att = F.softmax(att, dim=-1)\n att = self.attn_drop(att)\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n\n # output projection\n y = self.resid_drop(self.proj(y))\n return y\n\n\nclass Block(nn.Module):\n \"\"\" an unassuming Transformer block \"\"\"\n def __init__(self, config):\n super().__init__()\n self.ln1 = nn.LayerNorm(config.n_embd)\n self.ln2 = nn.LayerNorm(config.n_embd)\n self.attn = CausalSelfAttention(config)\n self.mlp = nn.Sequential(\n nn.Linear(config.n_embd, 4 * config.n_embd),\n nn.GELU(), # nice\n nn.Linear(4 * config.n_embd, config.n_embd),\n nn.Dropout(config.resid_pdrop),\n )\n\n def forward(self, x):\n x = x + self.attn(self.ln1(x))\n x = x + self.mlp(self.ln2(x))\n return x\n\n\nclass GPT(nn.Module):\n \"\"\" the full GPT language model, with a context size of block_size \"\"\"\n def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256,\n embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):\n super().__init__()\n config = GPTConfig(vocab_size=vocab_size, block_size=block_size,\n embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,\n n_layer=n_layer, n_head=n_head, n_embd=n_embd,\n n_unmasked=n_unmasked)\n # input embedding stem\n self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)\n self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.ln_f = nn.LayerNorm(config.n_embd)\n self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.block_size = config.block_size\n self.apply(self._init_weights)\n self.config = config\n logger.info(\"number of parameters: %e\", sum(p.numel() for p in self.parameters()))\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def forward(self, idx, embeddings=None, targets=None):\n # forward the GPT model\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n\n if embeddings is not None: # prepend explicit embeddings\n token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)\n \n t = token_embeddings.shape[1]\n assert t <= self.block_size, \"Cannot forward, model block size is exhausted.\"\n position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector\n x = self.drop(token_embeddings + position_embeddings)\n x = self.blocks(x)\n x = self.ln_f(x)\n logits = self.head(x)\n\n # if we are given some desired targets also calculate the loss\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n\n return logits, loss\n\n\nclass DummyGPT(nn.Module):\n # for debugging\n def __init__(self, add_value=1):\n super().__init__()\n self.add_value = add_value\n\n def forward(self, idx):\n return idx + self.add_value, None\n\n\nclass CodeGPT(nn.Module):\n \"\"\"Takes in semi-embeddings\"\"\"\n def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256,\n embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):\n super().__init__()\n config = GPTConfig(vocab_size=vocab_size, block_size=block_size,\n embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,\n n_layer=n_layer, n_head=n_head, n_embd=n_embd,\n n_unmasked=n_unmasked)\n # input embedding stem\n self.tok_emb = nn.Linear(in_channels, config.n_embd)\n self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.ln_f = nn.LayerNorm(config.n_embd)\n self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.block_size = config.block_size\n self.apply(self._init_weights)\n self.config = config\n logger.info(\"number of parameters: %e\", sum(p.numel() for p in self.parameters()))\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def forward(self, idx, embeddings=None, targets=None):\n # forward the GPT model\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n\n if embeddings is not None: # prepend explicit embeddings\n token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)\n\n t = token_embeddings.shape[1]\n assert t <= self.block_size, \"Cannot forward, model block size is exhausted.\"\n position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector\n x = self.drop(token_embeddings + position_embeddings)\n x = self.blocks(x)\n x = self.ln_f(x)\n logits = self.head(x)\n\n # if we are given some desired targets also calculate the loss\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n\n return logits, loss\n\n\n\n#### sampling utils\n\ndef top_k_logits(logits, k):\n v, ix = torch.topk(logits, k)\n out = logits.clone()\n out[out < v[:, [-1]]] = -float('Inf')\n return out\n\[email protected]_grad()\ndef sample(model, x, steps, temperature=1.0, sample=False, top_k=None):\n \"\"\"\n take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in\n the sequence, feeding the predictions back into the model each time. Clearly the sampling\n has quadratic complexity unlike an RNN that is only linear, and has a finite context window\n of block_size, unlike an RNN that has an infinite context window.\n \"\"\"\n block_size = model.get_block_size()\n model.eval()\n for k in range(steps):\n x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed\n logits, _ = model(x_cond)\n # pluck the logits at the final step and scale by temperature\n logits = logits[:, -1, :] / temperature\n # optionally crop probabilities to only the top k options\n if top_k is not None:\n logits = top_k_logits(logits, top_k)\n # apply softmax to convert to probabilities\n probs = F.softmax(logits, dim=-1)\n # sample from the distribution or take the most likely\n if sample:\n ix = torch.multinomial(probs, num_samples=1)\n else:\n _, ix = torch.topk(probs, k=1, dim=-1)\n # append to the sequence and continue\n x = torch.cat((x, ix), dim=1)\n\n return x\n\n\n\n#### clustering utils\n\nclass KMeans(nn.Module):\n def __init__(self, ncluster=512, nc=3, niter=10):\n super().__init__()\n self.ncluster = ncluster\n self.nc = nc\n self.niter = niter\n self.shape = (3,32,32)\n self.register_buffer(\"C\", torch.zeros(self.ncluster,nc))\n self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))\n\n def is_initialized(self):\n return self.initialized.item() == 1\n\n @torch.no_grad()\n def initialize(self, x):\n N, D = x.shape\n assert D == self.nc, D\n c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random\n for i in range(self.niter):\n # assign all pixels to the closest codebook element\n a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1)\n # move each codebook element to be the mean of the pixels that assigned to it\n c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)])\n # re-assign any poorly positioned codebook elements\n nanix = torch.any(torch.isnan(c), dim=1)\n ndead = nanix.sum().item()\n print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead))\n c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters\n\n self.C.copy_(c)\n self.initialized.fill_(1)\n\n\n def forward(self, x, reverse=False, shape=None):\n if not reverse:\n # flatten\n bs,c,h,w = x.shape\n assert c == self.nc\n x = x.reshape(bs,c,h*w,1)\n C = self.C.permute(1,0)\n C = C.reshape(1,c,1,self.ncluster)\n a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices\n return a\n else:\n # flatten\n bs, HW = x.shape\n \"\"\"\n c = self.C.reshape( 1, self.nc, 1, self.ncluster)\n c = c[bs*[0],:,:,:]\n c = c[:,:,HW*[0],:]\n x = x.reshape(bs, 1, HW, 1)\n x = x[:,3*[0],:,:]\n x = torch.gather(c, dim=3, index=x)\n \"\"\"\n x = self.C[x]\n x = x.permute(0,2,1)\n shape = shape if shape is not None else self.shape\n x = x.reshape(bs, *shape)\n\n return x\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.ones", "torch.nn.GELU", "torch.isnan", "torch.cat", "torch.zeros", "torch.randperm", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.multinomial", "torch.nn.Linear", "torch.tensor", "torch.no_grad", "torch.topk" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hee9joon/Face-Generation
[ "caa9b4e0bb61e77ee6d32fc8687bad63f998ec9c" ]
[ "3. BEGAN (Boundary Equilibrium GAN)/celeba.py" ]
[ "from torch.utils.data import DataLoader\nfrom torchvision.datasets import ImageFolder\nimport torchvision.transforms as transforms\n\nfrom config import *\n\n\ndef get_celeba_loader(path, batch_size):\n \"\"\"CelebA Loader\"\"\"\n transform = transforms.Compose([\n transforms.Resize((config.crop_size, config.crop_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n\n celeba_dataset = ImageFolder(root=path, transform=transform)\n celeba_loader = DataLoader(celeba_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n\n return celeba_loader" ]
[ [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
raydouglass/cugraph
[ "228a4e1abc95b9b15ab211d9e397cc61913275e5", "228a4e1abc95b9b15ab211d9e397cc61913275e5" ]
[ "python/cugraph/graph/test_graph.py", "python/cugraph/pagerank/test_pagerank.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nfrom scipy.io import mmread\n\nimport cugraph\nimport cudf\n\n\ndef read_mtx_file(mm_file):\n print('Reading ' + str(mm_file) + '...')\n return mmread(mm_file).asfptype()\n\n\ndef compare_series(series_1, series_2):\n if (len(series_1) != len(series_2)):\n print(\"Series do not match in length\")\n return 0\n for i in range(len(series_1)):\n if(series_1[i] != series_2[i]):\n print(\"Series[\" + str(i) + \"] does not match, \" + str(series_1[i])\n + \", \" + str(series_2[i]))\n return 0\n return 1\n\n\ndef compare_offsets(offset0, offset1):\n if not (len(offset0) <= len(offset1)):\n print(\"Mismatched length: \" + str(len(offset0)) + \" != \"\n + str(len(offset1)))\n return False\n for i in range(len(offset0)):\n if offset0[i] != offset1[i]:\n print(\"Series[\" + str(i) + \"]: \" + str(offset0[i]) + \" != \"\n + str(offset1[i]))\n return False\n return True\n\n\nDATASETS = ['/datasets/networks/karate.mtx',\n '/datasets/networks/dolphins.mtx',\n '/datasets/networks/netscience.mtx']\n\n\[email protected]('graph_file', DATASETS)\ndef test_add_edge_list_to_adj_list(graph_file):\n\n M = read_mtx_file(graph_file)\n sources = cudf.Series(M.row)\n destinations = cudf.Series(M.col)\n\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n offsets_exp = M.indptr\n indices_exp = M.indices\n\n # cugraph add_egde_list to_adj_list call\n G = cugraph.Graph()\n G.add_edge_list(sources, destinations, None)\n offsets_cu, indices_cu = G.view_adj_list()\n assert compare_offsets(offsets_cu, offsets_exp)\n assert compare_series(indices_cu, indices_exp)\n\n\[email protected]('graph_file', DATASETS)\ndef test_add_adj_list_to_edge_list(graph_file):\n M = read_mtx_file(graph_file)\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n\n M = M.tocoo()\n sources_exp = cudf.Series(M.row)\n destinations_exp = cudf.Series(M.col)\n\n # cugraph add_adj_list to_edge_list call\n G = cugraph.Graph()\n G.add_adj_list(offsets, indices, None)\n sources, destinations = G.view_edge_list()\n sources_cu = np.array(sources)\n destinations_cu = np.array(destinations)\n assert compare_series(sources_cu, sources_exp)\n assert compare_series(destinations_cu, destinations_exp)\n\n\[email protected]('graph_file', DATASETS)\ndef test_transpose_from_adj_list(graph_file):\n M = read_mtx_file(graph_file)\n M = M.tocsr()\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n G = cugraph.Graph()\n G.add_adj_list(offsets, indices, None)\n G.add_transpose()\n Mt = M.transpose().tocsr()\n toff, tind = G.view_transpose_adj_list()\n assert compare_series(tind, Mt.indices)\n assert compare_offsets(toff, Mt.indptr)\n\n\[email protected]('graph_file', DATASETS)\ndef test_view_edge_list_from_adj_list(graph_file):\n M = read_mtx_file(graph_file)\n M = M.tocsr()\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n G = cugraph.Graph()\n G.add_adj_list(offsets, indices, None)\n src2, dst2 = G.view_edge_list()\n M = M.tocoo()\n src1 = M.row\n dst1 = M.col\n assert compare_series(src1, src2)\n assert compare_series(dst1, dst2)\n\n\[email protected]('graph_file', DATASETS)\ndef test_delete_edge_list_delete_adj_list(graph_file):\n M = read_mtx_file(graph_file)\n sources = cudf.Series(M.row)\n destinations = cudf.Series(M.col)\n\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n\n # cugraph delete_adj_list delete_edge_list call\n G = cugraph.Graph()\n G.add_edge_list(sources, destinations, None)\n G.delete_edge_list()\n with pytest.raises(cudf.bindings.GDFError.GDFError) as excinfo:\n G.view_adj_list()\n assert excinfo.value.errcode.decode() == 'GDF_INVALID_API_CALL'\n\n G.add_adj_list(offsets, indices, None)\n G.delete_adj_list()\n with pytest.raises(cudf.bindings.GDFError.GDFError) as excinfo:\n G.view_edge_list()\n assert excinfo.value.errcode.decode() == 'GDF_INVALID_API_CALL'\n", "# Copyright (c) 2019, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\n\nimport pytest\nfrom scipy.io import mmread\n\nimport cudf\nimport cugraph\n\n# Temporarily suppress warnings till networkX fixes deprecation warnings\n# (Using or importing the ABCs from 'collections' instead of from\n# 'collections.abc' is deprecated, and in 3.8 it will stop working) for\n# python 3.7. Also, this import networkx needs to be relocated in the\n# third-party group once this gets fixed.\nimport warnings\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n import networkx as nx\n\n\nprint('Networkx version : {} '.format(nx.__version__))\n\n\ndef read_mtx_file(mm_file):\n print('Reading ' + str(mm_file) + '...')\n return mmread(mm_file).asfptype()\n\n\ndef cugraph_call(M, max_iter, tol, alpha):\n # Device data\n sources = cudf.Series(M.row)\n destinations = cudf.Series(M.col)\n # values = cudf.Series(np.ones(len(sources), dtype = np.float64))\n\n # cugraph Pagerank Call\n G = cugraph.Graph()\n G.add_edge_list(sources, destinations, None)\n t1 = time.time()\n df = cugraph.pagerank(G, alpha=alpha, max_iter=max_iter, tol=tol)\n t2 = time.time() - t1\n print('Time : '+str(t2))\n\n # Sort Pagerank values\n sorted_pr = []\n pr_scores = df['pagerank'].to_array()\n for i, rank in enumerate(pr_scores):\n sorted_pr.append((i, rank))\n\n return sorted(sorted_pr, key=lambda x: x[1], reverse=True)\n\n\ndef networkx_call(M, max_iter, tol, alpha):\n nnz_per_row = {r: 0 for r in range(M.get_shape()[0])}\n for nnz in range(M.getnnz()):\n nnz_per_row[M.row[nnz]] = 1 + nnz_per_row[M.row[nnz]]\n for nnz in range(M.getnnz()):\n M.data[nnz] = 1.0/float(nnz_per_row[M.row[nnz]])\n\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n # should be autosorted, but check just to make sure\n if not M.has_sorted_indices:\n print('sort_indices ... ')\n M.sort_indices()\n\n # in NVGRAPH tests we read as CSR and feed as CSC,\n # so here we do this explicitly\n print('Format conversion ... ')\n\n # Directed NetworkX graph\n Gnx = nx.DiGraph(M)\n\n z = {k: 1.0/M.shape[0] for k in range(M.shape[0])}\n\n # Networkx Pagerank Call\n print('Solving... ')\n t1 = time.time()\n\n # same parameters as in NVGRAPH\n pr = nx.pagerank(Gnx, alpha=alpha, nstart=z, max_iter=max_iter*2,\n tol=tol*0.01)\n t2 = time.time() - t1\n\n print('Time : ' + str(t2))\n\n # return Sorted Pagerank values\n return sorted(pr.items(), key=lambda x: x[1], reverse=True)\n\n\nDATASETS = ['/datasets/networks/dolphins.mtx',\n '/datasets/networks/karate.mtx',\n '/datasets/networks/netscience.mtx']\n\nMAX_ITERATIONS = [500]\nTOLERANCE = [1.0e-06]\nALPHA = [0.85]\n\n\[email protected]('graph_file', DATASETS)\[email protected]('max_iter', MAX_ITERATIONS)\[email protected]('tol', TOLERANCE)\[email protected]('alpha', ALPHA)\ndef test_pagerank(graph_file, max_iter, tol, alpha):\n M = read_mtx_file(graph_file)\n\n networkx_pr = networkx_call(M, max_iter, tol, alpha)\n cugraph_pr = cugraph_call(M, max_iter, tol, alpha)\n\n # Calculating mismatch\n\n err = 0\n assert len(cugraph_pr) == len(networkx_pr)\n for i in range(len(cugraph_pr)):\n if(abs(cugraph_pr[i][1]-networkx_pr[i][1]) > tol*1.1\n and cugraph_pr[i][0] == networkx_pr[i][0]):\n err = err + 1\n print(err)\n assert err < (0.01*len(cugraph_pr))\n" ]
[ [ "scipy.io.mmread", "numpy.array" ], [ "scipy.io.mmread" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
54hanxiucao/gym-electric-motor
[ "911432388b00675e8a93f4a7937fdc575f106f22" ]
[ "gym_electric_motor/visualization/motor_dashboard_plots/state_plot.py" ]
[ "import numpy as np\n\nfrom .base_plots import TimePlot\n\n\nclass StatePlot(TimePlot):\n \"\"\"Plot to display the environments states and their references.\"\"\"\n\n _default_limit_line_cfg = {\n 'color': 'red',\n 'linestyle': '--',\n 'linewidth': 1\n }\n\n # Labels for each state variable.\n state_labels = {\n 'omega': r'$\\omega$/(1/s)',\n 'torque': '$T$/Nm',\n 'i': '$i$/A',\n 'i_a': '$i_{a}$/A',\n 'i_e': '$i_{e}$/A',\n 'i_b': '$i_{b}$/A',\n 'i_c': '$i_{c}$/A',\n 'i_sq': '$i_{sq}$/A',\n 'i_sd': '$i_{sd}$/A',\n 'u': '$u$/V',\n 'u_a': '$u_{a}$/V',\n 'u_b': '$u_{b}$/V',\n 'u_c': '$u_{c}$/V',\n 'u_sq': '$u_{sq}$/V',\n 'u_sd': '$u_{sd}$/V',\n 'u_e': '$u_{e}$/V',\n 'u_sup': '$u_{sup}$/V',\n 'epsilon': r'$\\epsilon$/rad'\n }\n\n def __init__(self, state):\n \"\"\"\n Args:\n state(str): Name of the state to plot\n \"\"\"\n super().__init__()\n\n self._state_line_config = self._default_time_line_cfg.copy()\n self._ref_line_config = self._default_time_line_cfg.copy()\n self._limit_line_config = self._default_limit_line_cfg.copy()\n\n #: State space of the plotted variable\n self._state_space = None\n #: State name of the plotted variable\n self._state = state\n #: Index in the state array of the plotted variable\n self._state_idx = None\n #: Maximal value of the plotted variable\n self._limits = None\n # Bool: Flag if the plotted variable is referenced.\n self._referenced = None\n\n # matplotlib-Lines for the state and reference\n self._state_line = None\n self._reference_line = None\n\n # Data containers\n self._state_data = []\n self._ref_data = []\n\n # Flag, if the passed data is normalized\n self._normalized = True\n\n def set_env(self, env):\n # Docstring of superclass\n super().set_env(env)\n ps = env.physical_system\n rg = env.reference_generator\n # Save the index of the state.\n self._state_idx = ps.state_positions[self._state]\n # The maximal values of the state.\n self._limits = ps.limits[self._state_idx]\n self._state_space = ps.state_space.low[self._state_idx], ps.state_space.high[self._state_idx]\n # Bool: if the state is referenced.\n self._referenced = rg.referenced_states[self._state_idx]\n # Bool: if the data is already normalized to an interval of [-1, 1]\n self._normalized = self._limits != self._state_space[1]\n # Initialize the data containers\n self._state_data = np.ones(self._x_width) * np.nan\n self._ref_data = np.ones(self._x_width) * np.nan\n\n min_limit = self._limits * self._state_space[0] if self._normalized else self._state_space[0]\n max_limit = self._limits * self._state_space[1] if self._normalized else self._state_space[1]\n spacing = 0.1 * (max_limit - min_limit)\n\n # Set the y-axis limits to fixed initital values\n self._y_lim = (min_limit - spacing, max_limit + spacing)\n\n # Set the y-axis label\n self._label = self.state_labels.get(self._state, self._state)\n\n def initialize(self, axis):\n # Docstring of superclass\n super().initialize(axis)\n\n # Line to plot the state data\n self._state_line, = self._axis.plot(self._x_data, self._state_data, **self._state_line_config)\n self._lines = [self._state_line]\n\n # If the state is referenced plot also the reference line\n if self._referenced:\n self._reference_line, = self._axis.plot(self._x_data, self._ref_data, **self._ref_line_config)\n # Plot state line in front\n axis.lines = axis.lines[::-1]\n self._lines.append(self._reference_line)\n min_limit = self._limits * self._state_space[0] if self._normalized else self._state_space[0]\n max_limit = self._limits * self._state_space[1] if self._normalized else self._state_space[1]\n if self._state_space[0] < 0:\n self._axis.axhline(min_limit, **self._limit_line_config)\n lim = self._axis.axhline(max_limit, **self._limit_line_config)\n\n y_label = self._label\n unit_split = y_label.find('/')\n if unit_split == -1:\n unit_split = len(y_label)\n limit_label = y_label[:unit_split] + r'$_{\\mathrm{max}}$' + y_label[unit_split:]\n\n if self._referenced:\n ref_label = y_label[:unit_split] + r'$^*$' + y_label[unit_split:]\n self._axis.legend(\n (self._state_line, self._reference_line, lim), (y_label, ref_label, limit_label), loc='upper left',\n numpoints=20\n )\n else:\n self._axis.legend((self._state_line, lim), (y_label, limit_label), loc='upper left', numpoints=20)\n\n self._y_data = [self._state_data, self._ref_data]\n\n def on_step_end(self, k, state, reference, reward, done):\n super().on_step_end(k, state, reference, reward, done)\n # Write the data to the data containers\n state_ = state[self._state_idx]\n ref = reference[self._state_idx]\n idx = self.data_idx\n self._x_data[idx] = self._t\n self._state_data[idx] = state_ * self._limits\n if self._referenced:\n self._ref_data[idx] = ref * self._limits\n" ]
[ [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
belivem/Study
[ "7e4633b988985735100f2ddd17ae62b8348dbb8e", "7e4633b988985735100f2ddd17ae62b8348dbb8e" ]
[ "src/mnist_fully_network/mnist_practice/mnist_data_info.py", "src/alexNet/inference.py" ]
[ "import os\nimport sys\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist_path = \"/Users/liyanan/Documents/Test/Tensorflow/data/mnist_data/\"\n\n#print mnist data\ndef mnistInfo():\n \n batch_size = 100\n\n #read mnist data\n mnist = input_data.read_data_sets(mnist_path,one_hot = True)\n\n #training data size\n print(\"Training data size ==> \"+str(mnist.train.num_examples))\n\n #validating data size\n print(\"Validating data size ==> \"+str(mnist.validation.num_examples))\n\n #testing data size\n print(\"Testing data size ==> \"+str(mnist.test.num_examples))\n\n #traing data shape\n print(\"Shape of training images ==> \"+str(mnist.train.images.shape))\n print(\"Shape of training labels ==> \"+str(mnist.train.labels.shape))\n\n #print image\n print(\"Image ==> \")\n print(mnist.train.images[0])\n\n #print lable\n print(\"Lable ==> \")\n print(mnist.train.labels[0])\n\n #next batch size \n xs,ys = mnist.train.next_batch(batch_size)\n print(\"X shape ==> \"+str(xs.shape))\n print(\"Y shape ==> \"+str(ys.shape))\n\ndef getmnist():\n mnist = input_data.read_data_sets(mnist_path,one_hot = True)\n return mnist\n\n#Get current dir and execute file\ndef getcwd():\n print(\"Get current working dir ==> \"+os.getcwd())\n print(\"Get current execute file ==> \"+sys.argv[0])\n\ndef get_minst_class_num():\n #read mnist data\n mnist = input_data.read_data_sets(mnist_path,one_hot = True)\n \n labels = tf.placeholder(tf.float32,shape=[None,10],name=\"labels\")\n class_tensor = tf.argmax(labels,axis=1)\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n print(\"class and num ==>\")\n class_def,idx,count = sess.run(tf.unique_with_counts(class_tensor),feed_dict={labels:mnist.train.labels})\n print(class_def)\n print(count)\n\n\nif __name__ == \"__main__\":\n #getcwd()\n mnistInfo()\n #get_minst_class_num()", "import tensorflow as tf\nimport numpy as np\n\n\n\nif __name__ == \"__main__\":\n a = 2*np.ones([2,2,2,3])\n b = tf.nn.local_response_normalization(a,depth_radius=1,bias=0,alpha=1,beta=1)\n\n\n with tf.Session() as sess:\n print(\"Orinal data ==> \")\n print(a)\n print(\"After LRN data ==> \")\n print(sess.run(b))" ]
[ [ "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.unique_with_counts", "tensorflow.Session", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets" ], [ "tensorflow.Session", "tensorflow.nn.local_response_normalization", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
filippovitale/tensorflow
[ "fe9b5008ff63a70e4092cdc7968b1327a9470f77" ]
[ "tensorflow/python/kernel_tests/cwise_ops_test.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functional tests for coefficient-wise operations.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\nimport tensorflow as tf\n\n\n_ADD = lambda x, y: x + y\n_SUB = lambda x, y: x - y\n_MUL = lambda x, y: x * y\n_POW = lambda x, y: x ** y\n_TRUEDIV = lambda x, y: x / y\n_FLOORDIV = lambda x, y: x // y\n_MOD = lambda x, y: x % y\n_NEG = lambda x: -x\n_ABS = abs\n\n_LT = lambda x, y: x < y\n_LE = lambda x, y: x <= y\n_GT = lambda x, y: x > y\n_GE = lambda x, y: x >= y\n\n_AND = lambda x, y: x & y\n_OR = lambda x, y: x | y\n_XOR = lambda x, y: x ^ y\n_INV = lambda x: ~x\n\n\nclass UnaryOpTest(tf.test.TestCase):\n\n def _compareCpu(self, x, np_func, tf_func):\n np_ans = np_func(x)\n with self.test_session(use_gpu=False):\n inx = tf.convert_to_tensor(x)\n if x.dtype in (np.float32, np.float64):\n y = 1.1 * tf_func(inx)\n np_ans *= 1.1\n else:\n y = tf_func(inx)\n tf_cpu = y.eval()\n self.assertShapeEqual(np_ans, y)\n if x.dtype == np.float16:\n self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)\n else:\n self.assertAllClose(np_ans, tf_cpu)\n\n if x.dtype == np.complex64 and tf_func in (\n tf.sign, tf.sqrt, tf.rsqrt, tf.log):\n return # Return early\n\n if x.dtype == np.float16:\n s = list(np.shape(x))\n jacob_t, _ = tf.test.compute_gradient(inx,\n s,\n y,\n s,\n x_init_value=x)\n xf = x.astype(np.float)\n inxf = tf.convert_to_tensor(xf)\n yf = tf_func(inxf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n s,\n yf,\n s,\n x_init_value=xf)\n jacob_n = jacob_n.astype(np.float16)\n self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)\n elif x.dtype == np.float32 or x.dtype == np.complex64:\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n y,\n s,\n x_init_value=x)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n y,\n s,\n x_init_value=x)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGpu(self, x, np_func, tf_func):\n np_ans = np_func(x)\n with self.test_session(use_gpu=True):\n result = tf_func(tf.convert_to_tensor(x))\n tf_gpu = result.eval()\n if x.dtype == np.float16:\n self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)\n else:\n self.assertAllClose(np_ans, tf_gpu)\n # TODO(zhifengc/ke): make gradient checker work on GPU.\n\n def _compareBoth(self, x, np_func, tf_func):\n self._compareCpu(x, np_func, tf_func)\n self._compareGpu(x, np_func, tf_func)\n\n def _inv(self, x):\n return 1.0 / x\n\n def _rsqrt(self, x):\n return self._inv(np.sqrt(x))\n\n def _sigmoid(self, x):\n return 1.0 / (1.0 + np.exp(-x))\n\n def _replace_domain_error_with_inf(self, fn):\n def func(x):\n try:\n return fn(x)\n except ValueError as e:\n if \"domain error\" in str(e):\n return np.inf * np.ones_like(x)\n else:\n raise e\n return func\n\n def testFloatBasic(self):\n x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)\n y = (x + .5).astype(np.float32) # no zero\n z = (x + 15.5).astype(np.float32) # all positive\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(y, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(z, np.sqrt, tf.sqrt)\n self._compareBoth(z, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(z, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(y, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n self._compareBoth(\n y,\n np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),\n tf.lgamma)\n self._compareBoth(x, np.vectorize(math.erf), tf.erf)\n self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)\n\n def testFloatTanhEdge(self):\n x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)\n self._compareBoth(x, np.tanh, tf.tanh)\n x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)\n self._compareBoth(x, np.tanh, tf.tanh)\n\n def testFloatEmpty(self):\n x = np.empty((2, 0, 5), dtype=np.float32)\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(x, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(x, np.sqrt, tf.sqrt)\n self._compareBoth(x, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(x, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(x, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n # Can't use vectorize below, so just use some arbitrary function\n self._compareBoth(x, np.sign, tf.lgamma)\n self._compareBoth(x, np.sign, tf.erf)\n self._compareBoth(x, np.sign, tf.erfc)\n\n def testDoubleBasic(self):\n x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)\n y = (x + .5).astype(np.float64) # no zero\n z = (x + 15.5).astype(np.float64) # all positive\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(y, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(z, np.sqrt, tf.sqrt)\n self._compareBoth(z, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(z, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(y, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n self._compareBoth(\n y,\n np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),\n tf.lgamma)\n self._compareBoth(x, np.vectorize(math.erf), tf.erf)\n self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)\n\n def testHalfBasic(self):\n x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)\n y = (x + .5).astype(np.float16) # no zero\n z = (x + 15.5).astype(np.float16) # all positive\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(y, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(z, np.sqrt, tf.sqrt)\n self._compareBoth(z, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(z, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(y, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n self._compareBoth(\n y,\n np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),\n tf.lgamma)\n self._compareBoth(x, np.vectorize(math.erf), tf.erf)\n self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)\n\n def testInt32Basic(self):\n x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)\n self._compareCpu(x, np.abs, tf.abs)\n self._compareCpu(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(x, np.square, tf.square)\n self._compareCpu(x, np.sign, tf.sign)\n\n def testInt64Basic(self):\n x = np.arange(\n -6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)\n self._compareCpu(x, np.abs, tf.abs)\n self._compareCpu(x, np.abs, _ABS)\n self._compareCpu(x, np.negative, tf.neg)\n self._compareCpu(x, np.negative, _NEG)\n self._compareCpu(x, np.square, tf.square)\n self._compareCpu(x, np.sign, tf.sign)\n\n def testComplex64Basic(self):\n x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(\n np.complex64)\n y = x + 0.5 # no zeros\n self._compareCpu(x, np.abs, tf.complex_abs)\n self._compareCpu(x, np.abs, _ABS)\n self._compareCpu(x, np.negative, tf.neg)\n self._compareCpu(x, np.negative, _NEG)\n self._compareCpu(y, self._inv, tf.inv)\n self._compareCpu(x, np.square, tf.square)\n self._compareCpu(x, np.sqrt, tf.sqrt)\n self._compareCpu(y, self._rsqrt, tf.rsqrt)\n self._compareCpu(x, np.exp, tf.exp)\n self._compareCpu(y, np.log, tf.log)\n self._compareCpu(x, np.tanh, tf.tanh)\n self._compareCpu(x, self._sigmoid, tf.sigmoid)\n self._compareCpu(x, np.sin, tf.sin)\n self._compareCpu(x, np.cos, tf.cos)\n\n # Numpy uses an incorrect definition of sign; use the right one instead.\n def complex_sign(x):\n return x / np.abs(x)\n self._compareCpu(y, complex_sign, tf.sign)\n\n\nclass BinaryOpTest(tf.test.TestCase):\n\n def _compareCpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=False):\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_cpu = out.eval()\n # Test that the op takes precedence over numpy operators.\n np_left = tf_func(x, iny).eval()\n np_right = tf_func(inx, y).eval()\n\n if np_ans.dtype != np.object:\n self.assertAllClose(np_ans, tf_cpu)\n self.assertAllClose(np_ans, np_left)\n self.assertAllClose(np_ans, np_right)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, x, y, np_func, tf_func,\n numeric_gradient_type=None):\n z = np_func(x, y)\n zs = list(z.shape)\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n if x.dtype in (np.float32, np.float64):\n out = 1.1 * tf_func(inx, iny)\n else:\n out = tf_func(inx, iny)\n xs = list(x.shape)\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n xs,\n out,\n zs,\n x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf_func(inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n xs,\n outf,\n zs,\n x_init_value=xf,\n delta=1e-3)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, x, y, np_func, tf_func,\n numeric_gradient_type=None):\n z = np_func(x, y)\n zs = list(z.shape)\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n if x.dtype in (np.float32, np.float64):\n out = 1.1 * tf_func(inx, iny)\n else:\n out = tf_func(inx, iny)\n ys = list(np.shape(y))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n ys,\n out,\n zs,\n x_init_value=y)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf_func(inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inyf,\n ys,\n outf,\n zs,\n x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=True):\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_gpu = out.eval()\n self.assertAllClose(np_ans, tf_gpu)\n self.assertShapeEqual(np_ans, out)\n # TODO(zhifengc/ke): make gradient checker work on GPU.\n\n def _compareBoth(self, x, y, np_func, tf_func):\n self._compareCpu(x, y, np_func, tf_func)\n if x.dtype in (np.float16, np.float32, np.float64):\n if tf_func not in (_FLOORDIV, tf.floordiv, tf.igamma, tf.igammac, tf.zeta, tf.polygamma):\n self._compareGradientX(x, y, np_func, tf_func)\n self._compareGradientY(x, y, np_func, tf_func)\n if tf_func in (tf.igamma, tf.igammac, tf.zeta, tf.polygamma):\n # These methods only support gradients in the second parameter\n self._compareGradientY(x, y, np_func, tf_func)\n self._compareGpu(x, y, np_func, tf_func)\n\n def testFloatBasic(self):\n x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)\n y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(x, y, np.add, tf.add)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)\n self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.add, _ADD)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)\n try:\n from scipy import special # pylint: disable=g-import-not-at-top\n a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)\n x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)\n self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)\n # Need x > 1\n self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)\n n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)\n except ImportError as e:\n tf.logging.warn(\"Cannot test special functions: %s\" % str(e))\n\n def testFloatDifferentShapes(self):\n x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)\n y = np.array([1, 2]).reshape(2, 1).astype(np.float32)\n with self.test_session() as sess:\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n s = tf.reduce_sum(inx * iny)\n gx, gy = sess.run(tf.gradients(s, [inx, iny]))\n # gx is simply the broadcasted y\n self.assertAllEqual(gx, np.array([1, 1, 2, 2])\n .reshape(2, 2).astype(np.float32))\n # gy is x's column summed up\n self.assertAllEqual(gy, np.array([3, 7]).\n reshape(2, 1).astype(np.float32))\n\n def testDoubleBasic(self):\n x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)\n y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)\n self._compareBoth(x, y, np.add, tf.add)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)\n self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.add, _ADD)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)\n try:\n from scipy import special # pylint: disable=g-import-not-at-top\n a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)\n x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)\n self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)\n except ImportError as e:\n tf.logging.warn(\"Cannot test special functions: %s\" % str(e))\n\n def testInt8Basic(self):\n x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.multiply, _MUL)\n\n def testInt16Basic(self):\n x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.multiply, _MUL)\n\n def testInt32Basic(self):\n x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)\n self._compareBoth(x, y, np.add, tf.add)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.true_divide, tf.truediv)\n self._compareBoth(x, y, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.mod, tf.mod)\n self._compareBoth(x, y, np.add, _ADD)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y, np.floor_divide, _FLOORDIV)\n self._compareBoth(x, y, np.mod, _MOD)\n\n def testInt64Basic(self):\n x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.true_divide, tf.truediv)\n self._compareBoth(x, y, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.mod, tf.mod)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y, np.floor_divide, _FLOORDIV)\n self._compareBoth(x, y, np.mod, _MOD)\n\n def testComplex64Basic(self):\n x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(\n np.complex64)\n y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(\n np.complex64)\n self._compareCpu(x, y, np.add, tf.add)\n self._compareCpu(x, y, np.subtract, tf.sub)\n self._compareCpu(x, y, np.multiply, tf.mul)\n self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)\n self._compareCpu(x, y, np.add, _ADD)\n self._compareCpu(x, y, np.subtract, _SUB)\n self._compareCpu(x, y, np.multiply, _MUL)\n self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)\n\n def testStringComparison(self):\n x = np.array([[\"abc\", \"bh\"], [\"c\", \"\"]])\n y = np.array([[\"abc\", \"bh\"], [\"def\", \"hi\"]])\n with self.test_session(use_gpu=False) as sess:\n cmp_eq = tf.equal(x, y)\n cmp_not_eq = tf.not_equal(x, y)\n values = sess.run([cmp_eq, cmp_not_eq])\n self.assertAllEqual([[True, True], [False, False]], values[0])\n self.assertAllEqual([[False, False], [True, True]], values[1])\n\n def testString(self):\n x = np.array([[\"x_0_0\", \"x_0_1\", \"x_0_2\"],\n [\"x_1_0\", \"x_1_1\", \"x_1_2\"],\n [\"x_2_0\", \"x_2_1\", \"x_2_2\"]], dtype=np.object)\n y = np.array([[\"y_0_0\", \"y_0_1\", \"y_0_2\"],\n [\"y_1_0\", \"y_1_1\", \"y_1_2\"],\n [\"y_2_0\", \"y_2_1\", \"y_2_2\"]], dtype=np.object)\n z = np.array([[\"z_0\", \"z_1\", \"z_2\"]], dtype=np.object)\n w = np.array(\"w\", dtype=np.object)\n self._compareCpu(x, y, _ADD, _ADD)\n self._compareCpu(x, z, _ADD, _ADD)\n self._compareCpu(x, w, _ADD, _ADD)\n self._compareCpu(z, w, _ADD, _ADD)\n\n def _compareBCast(self, xs, ys, dtype, np_func, tf_func):\n x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)\n y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)\n self._compareCpu(x, y, np_func, tf_func)\n if x.dtype in (np.float16, np.float32, np.float64):\n if tf_func not in (_FLOORDIV, tf.floordiv):\n if x.dtype == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(x, y, np_func, tf_func, np.float)\n self._compareGradientY(x, y, np_func, tf_func, np.float)\n else:\n self._compareGradientX(x, y, np_func, tf_func)\n self._compareGradientY(x, y, np_func, tf_func)\n self._compareGpu(x, y, np_func, tf_func)\n\n # TODO(josh11b,vrv): Refactor this to use parameterized tests.\n def _testBCastByFunc(self, funcs, xs, ys):\n dtypes = [\n np.float16,\n np.float32,\n np.float64,\n np.int32,\n np.int64,\n np.complex64\n ]\n for dtype in dtypes:\n for (np_func, tf_func) in funcs:\n if dtype == np.complex64 and tf_func in (_FLOORDIV, tf.floordiv):\n continue # floordiv makes no sense for complex numbers\n self._compareBCast(xs, ys, dtype, np_func, tf_func)\n self._compareBCast(ys, xs, dtype, np_func, tf_func)\n\n def _testBCastA(self, xs, ys):\n funcs = [\n (np.add, tf.add),\n (np.add, _ADD),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def _testBCastB(self, xs, ys):\n funcs = [\n (np.subtract, tf.sub),\n (np.subtract, _SUB),\n (np.power, tf.pow),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def _testBCastC(self, xs, ys):\n funcs = [\n (np.multiply, tf.mul),\n (np.multiply, _MUL),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def _testBCastD(self, xs, ys):\n funcs = [\n (np.true_divide, tf.truediv),\n (np.floor_divide, tf.floordiv),\n (np.true_divide, _TRUEDIV),\n (np.floor_divide, _FLOORDIV),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def testBCast_0A(self):\n self._testBCastA([1, 3, 2], [1])\n\n def testBCast_0B(self):\n self._testBCastB([1, 3, 2], [1])\n\n def testBCast_0C(self):\n self._testBCastC([1, 3, 2], [1])\n\n def testBCast_0D(self):\n self._testBCastD([1, 3, 2], [1])\n\n def testBCast_1A(self):\n self._testBCastA([1, 3, 2], [2])\n\n def testBCast_1B(self):\n self._testBCastB([1, 3, 2], [2])\n\n def testBCast_1C(self):\n self._testBCastC([1, 3, 2], [2])\n\n def testBCast_1D(self):\n self._testBCastD([1, 3, 2], [2])\n\n def testBCast_2A(self):\n self._testBCastA([1, 3, 2], [3, 2])\n\n def testBCast_2B(self):\n self._testBCastB([1, 3, 2], [3, 2])\n\n def testBCast_2C(self):\n self._testBCastC([1, 3, 2], [3, 2])\n\n def testBCast_2D(self):\n self._testBCastD([1, 3, 2], [3, 2])\n\n def testBCast_3A(self):\n self._testBCastA([1, 3, 2], [3, 1])\n\n def testBCast_3B(self):\n self._testBCastB([1, 3, 2], [3, 1])\n\n def testBCast_3C(self):\n self._testBCastC([1, 3, 2], [3, 1])\n\n def testBCast_3D(self):\n self._testBCastD([1, 3, 2], [3, 1])\n\n def testBCast_4A(self):\n self._testBCastA([1, 3, 2], [1, 3, 2])\n\n def testBCast_4B(self):\n self._testBCastB([1, 3, 2], [1, 3, 2])\n\n def testBCast_4C(self):\n self._testBCastC([1, 3, 2], [1, 3, 2])\n\n def testBCast_4D(self):\n self._testBCastD([1, 3, 2], [1, 3, 2])\n\n def testBCast_5A(self):\n self._testBCastA([1, 3, 2], [2, 3, 1])\n\n def testBCast_5B(self):\n self._testBCastB([1, 3, 2], [2, 3, 1])\n\n def testBCast_5C(self):\n self._testBCastC([1, 3, 2], [2, 3, 1])\n\n def testBCast_5D(self):\n self._testBCastD([1, 3, 2], [2, 3, 1])\n\n def testBCast_6A(self):\n self._testBCastA([1, 3, 2], [2, 1, 1])\n\n def testBCast_6B(self):\n self._testBCastB([1, 3, 2], [2, 1, 1])\n\n def testBCast_6C(self):\n self._testBCastC([1, 3, 2], [2, 1, 1])\n\n def testBCast_6D(self):\n self._testBCastD([1, 3, 2], [2, 1, 1])\n\n def testBCast_7A(self):\n self._testBCastA([1, 3, 2], [1, 3, 1])\n\n def testBCast_7B(self):\n self._testBCastB([1, 3, 2], [1, 3, 1])\n\n def testBCast_7C(self):\n self._testBCastC([1, 3, 2], [1, 3, 1])\n\n def testBCast_7D(self):\n self._testBCastD([1, 3, 2], [1, 3, 1])\n\n def testBCast_8A(self):\n self._testBCastA([2, 1, 5], [2, 3, 1])\n\n def testBCast_8B(self):\n self._testBCastB([2, 1, 5], [2, 3, 1])\n\n def testBCast_8C(self):\n self._testBCastC([2, 1, 5], [2, 3, 1])\n\n def testBCast_8D(self):\n self._testBCastD([2, 1, 5], [2, 3, 1])\n\n def testBCast_9A(self):\n self._testBCastA([2, 0, 5], [2, 0, 1])\n\n def testBCast_9B(self):\n self._testBCastB([2, 0, 5], [2, 0, 1])\n\n def testBCast_9C(self):\n self._testBCastC([2, 0, 5], [2, 0, 1])\n\n def testBCast_9D(self):\n self._testBCastD([2, 0, 5], [2, 0, 1])\n\n def testBCast_10A(self):\n self._testBCastA([2, 3, 0], [2, 3, 1])\n\n def testBCast_10B(self):\n self._testBCastB([2, 3, 0], [2, 3, 1])\n\n def testBCast_10C(self):\n self._testBCastC([2, 3, 0], [2, 3, 1])\n\n def testBCast_10D(self):\n self._testBCastD([2, 3, 0], [2, 3, 1])\n\n def testBCast_11A(self):\n self._testBCastA([1, 3, 2], [1, 3, 2])\n\n def testBCast_11B(self):\n self._testBCastB([1, 3, 2], [1, 3, 2])\n\n def testBCast_11C(self):\n self._testBCastC([1, 3, 2], [1, 3, 2])\n\n def testBCast_11D(self):\n self._testBCastD([1, 3, 2], [1, 3, 2])\n\n def testBCast_12A(self):\n self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_12B(self):\n self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_12C(self):\n self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_12D(self):\n self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_13A(self):\n self._testBCastA([1, 3, 2, 1, 1], [1])\n\n def testBCast_13B(self):\n self._testBCastB([1, 3, 2, 1, 1], [1])\n\n def testBCast_13C(self):\n self._testBCastC([1, 3, 2, 1, 1], [1])\n\n def testBCast_13D(self):\n self._testBCastD([1, 3, 2, 1, 1], [1])\n\n def testBCast_14A(self):\n self._testBCastA([2, 3, 1, 1, 5], [1])\n\n def testBCast_14B(self):\n self._testBCastB([2, 3, 1, 1, 5], [1])\n\n def testBCast_14C(self):\n self._testBCastC([2, 3, 1, 1, 5], [1])\n\n def testBCast_14D(self):\n self._testBCastD([2, 3, 1, 1, 5], [1])\n\n def testBCast_15A(self):\n self._testBCastA([10, 3, 1, 2], [3, 1, 2])\n\n def testBCast_15B(self):\n self._testBCastB([10, 3, 1, 2], [3, 1, 2])\n\n def testBCast_15C(self):\n self._testBCastC([10, 3, 1, 2], [3, 1, 2])\n\n def testBCast_15D(self):\n self._testBCastD([10, 3, 1, 2], [3, 1, 2])\n\n def testMismatchedDimensions(self):\n for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,\n _FLOORDIV]:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Incompatible shapes\" in str(e)):\n func(tf.convert_to_tensor([10.0, 20.0, 30.0]),\n tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))\n\n\nclass ComparisonOpTest(tf.test.TestCase):\n\n def _compare(self, func, x, y, dtype):\n with self.test_session(use_gpu=False):\n out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),\n tf.convert_to_tensor(np.array([y]).astype(dtype)))\n ret = out.eval()\n return ret[0]\n\n def testScalarCompareScalar(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n data = [-1, 0, 1]\n for t in dtypes:\n for x in data:\n for y in data:\n self.assertEqual(self._compare(tf.less, x, y, t),\n x < y)\n self.assertEqual(self._compare(tf.less_equal, x, y, t),\n x <= y)\n self.assertEqual(self._compare(tf.greater, x, y, t),\n x > y)\n self.assertEqual(self._compare(tf.greater_equal, x, y, t),\n x >= y)\n self.assertEqual(self._compare(tf.equal, x, y, t),\n x == y)\n self.assertEqual(self._compare(tf.not_equal, x, y, t),\n x != y)\n\n def _compareCpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=False):\n out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))\n tf_cpu = out.eval()\n self.assertAllEqual(np_ans, tf_cpu)\n\n def _compareGpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=True):\n out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))\n tf_gpu = out.eval()\n self.assertAllEqual(np_ans, tf_gpu)\n\n def _compareBoth(self, x, y, np_func, tf_func):\n self._compareCpu(x, y, np_func, tf_func)\n if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:\n self._compareGpu(x, y, np_func, tf_func)\n\n def testTensorCompareTensor(self):\n x = np.linspace(-15, 15, 6).reshape(1, 3, 2)\n y = np.linspace(20, -10, 6).reshape(1, 3, 2)\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compareBoth(xt, yt, np.less, tf.less)\n self._compareBoth(xt, yt, np.less_equal, tf.less_equal)\n self._compareBoth(xt, yt, np.greater, tf.greater)\n self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)\n self._compareBoth(xt, yt, np.equal, tf.equal)\n self._compareBoth(xt, yt, np.not_equal, tf.not_equal)\n # TODO(zhifengc): complex64 doesn't work on GPU yet.\n self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),\n np.equal, tf.equal)\n self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),\n np.not_equal, tf.not_equal)\n\n def _compareBCast(self, xs, ys, dtype, np_func, tf_func):\n x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)\n y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)\n self._compareCpu(x, y, np_func, tf_func)\n self._compareCpu(y, x, np_func, tf_func)\n if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:\n self._compareGpu(x, y, np_func, tf_func)\n self._compareGpu(y, x, np_func, tf_func)\n\n def _testBCastByFunc(self, np_func, tf_func):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n dtypes = [\n np.float16,\n np.float32,\n np.float64,\n np.int32,\n np.int64,\n ]\n for (xs, ys) in shapes:\n for dtype in dtypes:\n self._compareBCast(xs, ys, dtype, np_func, tf_func)\n\n def testBCastLess(self):\n self._testBCastByFunc(np.less, tf.less)\n\n def testBCastLessEqual(self):\n self._testBCastByFunc(np.less_equal, tf.less_equal)\n\n def testBCastGreater(self):\n self._testBCastByFunc(np.greater, tf.greater)\n\n def testBCastGreaterEqual(self):\n self._testBCastByFunc(np.greater_equal, tf.greater_equal)\n\n def testBCastEqual(self):\n self._testBCastByFunc(np.equal, tf.equal)\n\n def testBCastNotEqual(self):\n self._testBCastByFunc(np.not_equal, tf.not_equal)\n\n def testShapeMismatch(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n funcs = [tf.less, tf.less_equal, tf.greater,\n tf.greater_equal, tf.equal, tf.not_equal]\n x = np.arange(0, 10).reshape([2, 5])\n y = np.arange(0, 10).reshape([5, 2])\n for t in dtypes:\n for f in funcs:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Incompatible shapes\" in str(e)):\n f(x.astype(t), y.astype(t))\n\n\nclass LogicalOpTest(tf.test.TestCase):\n\n def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=use_gpu):\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_val = out.eval()\n self.assertEqual(out.dtype, tf.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def _not(self, x, use_gpu=False):\n np_ans = np.logical_not(x)\n with self.test_session(use_gpu=use_gpu):\n out = tf.logical_not(tf.convert_to_tensor(x))\n tf_val = out.eval()\n self.assertEqual(out.dtype, tf.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def testScalar(self):\n data = [np.array([True]), np.array([False])]\n for use_gpu in [True, False]:\n for x in data:\n self._not(x, use_gpu)\n for x in data:\n for y in data:\n self._compareBinary(\n x, y, np.logical_and, tf.logical_and, use_gpu)\n self._compareBinary(\n x, y, np.logical_or, tf.logical_or, use_gpu)\n self._compareBinary(\n x, y, np.logical_xor, tf.logical_xor, use_gpu)\n\n def testTensor(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n for use_gpu in [True, False]:\n self._not(x, use_gpu)\n self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)\n\n def testBCast(self):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n for (xs, ys) in shapes:\n x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)\n y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)\n for use_gpu in [True, False]:\n self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)\n\n def testShapeMismatch(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)\n for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Incompatible shapes\" in str(e)):\n f(x, y)\n\n def testUsingAsPythonValueFails(self):\n # Ensure that we raise an error when the user attempts to treat a\n # `Tensor` as a Python `bool`.\n b = tf.constant(False)\n with self.assertRaises(TypeError):\n if b:\n pass\n\n x = tf.constant(3)\n y = tf.constant(4)\n with self.assertRaises(TypeError):\n if x > y:\n pass\n\n z = tf.constant(7)\n\n # The chained comparison should fail because Python computes `x <\n # y` and short-circuits the comparison with `z` if it is `False`.\n with self.assertRaises(TypeError):\n _ = x < y < z\n\n\nclass SelectOpTest(tf.test.TestCase):\n\n def _compare(self, c, x, y, use_gpu):\n np_ans = np.where(c, x, y)\n with self.test_session(use_gpu=use_gpu):\n out = tf.select(c, x, y)\n tf_ans = out.eval()\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(c))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n out,\n s,\n x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n s,\n outf,\n s,\n x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(c))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n s,\n out,\n s,\n x_init_value=y,\n delta=1.0)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inyf,\n s,\n outf,\n s,\n x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testBasic(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(c, xt, yt, use_gpu=True)\n\n def testGradients(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(c, xt, yt, np.float)\n self._compareGradientY(c, xt, yt, np.float)\n else:\n self._compareGradientX(c, xt, yt)\n self._compareGradientY(c, xt, yt)\n\n def testShapeMismatch(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(2, 5, 3) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n tf.select(c, xt, yt)\n\n def testEmptyTensor(self):\n c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)\n x = np.random.rand(1, 3, 0) * 100\n y = np.random.rand(1, 3, 0) * 100\n z_expected = np.zeros((1, 3, 0), dtype=np.float32)\n with self.test_session():\n xt = x.astype(np.float32)\n yt = y.astype(np.float32)\n z = tf.select(c, xt, yt).eval()\n self.assertAllEqual(z_expected, z)\n\n\nclass BatchSelectOpTest(tf.test.TestCase):\n \"\"\"Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+.\"\"\"\n\n def _compare(self, c, x, y, use_gpu):\n np_ans = np.dstack(\n [x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(\n [2, 0, 1])\n with self.test_session(use_gpu=use_gpu):\n out = tf.select(c, x, y)\n tf_ans = out.eval()\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n out,\n s,\n x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n s,\n outf,\n s,\n x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n s,\n out,\n s,\n x_init_value=y)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inyf,\n s,\n outf,\n s,\n x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testBasic(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(c, xt, yt, use_gpu=True)\n\n def testGradients(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(c, xt, yt, np.float)\n self._compareGradientY(c, xt, yt, np.float)\n else:\n self._compareGradientX(c, xt, yt)\n self._compareGradientY(c, xt, yt)\n\n def testShapeMismatch(self):\n c = np.random.randint(0, 2, 8).astype(np.bool)\n x = np.random.rand(16, 3, 2) * 100\n y = np.random.rand(16, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n tf.select(c, xt, yt)\n\n\nclass MinMaxOpTest(tf.test.TestCase):\n\n def _compare(self, x, y, use_gpu):\n np_min, np_max = np.minimum(x, y), np.maximum(x, y)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)\n tf_min, tf_max = sess.run([omin, omax])\n self.assertAllEqual(np_min, tf_min)\n self.assertAllEqual(np_max, tf_max)\n\n def testBasic(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(1, 3, 2) * 100.\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testDifferentShapes(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(2) * 100. # should broadcast\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testScalar(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.asscalar(np.random.rand(1) * 100.) # should broadcast\n # dropped np.float64, int64 because TF automatically converts to 32 bit\n for t in [np.float32, np.int32]:\n self._compare(x.astype(t), t(y), use_gpu=False)\n self._compare(x.astype(t), t(y), use_gpu=True)\n\n def _compareGradientX(self, func, x, y):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n out,\n s,\n x_init_value=x)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, func, x, y):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n s,\n out,\n s,\n x_init_value=y)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testGradients(self):\n x = np.random.rand(1, 3, 2) * 100.\n # ensure x != y\n y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1\n self._compareGradientX(tf.maximum, x, y)\n self._compareGradientY(tf.maximum, x, y)\n self._compareGradientX(tf.minimum, x, y)\n self._compareGradientY(tf.minimum, x, y)\n\n\nclass MathOpsOverloadTest(tf.test.TestCase):\n\n def _computeTensorAndLiteral(self, x, y, dtype, func):\n with self.test_session(use_gpu=False):\n inx = tf.convert_to_tensor(x, dtype=dtype)\n z = func(inx, y) # Should use __add__, __sub__, etc.\n return z.eval()\n\n def _computeLiteralAndTensor(self, x, y, dtype, func):\n with self.test_session(use_gpu=False):\n iny = tf.convert_to_tensor(y, dtype=dtype)\n z = func(x, iny) # Should use __radd__, __rsub__, etc.\n return z.eval()\n\n def _compareBinary(self, x, y, dtype, np_func, tf_func):\n np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)\n self.assertAllClose(np_ans, self._computeTensorAndLiteral(\n x, y, dtype, tf_func))\n self.assertAllClose(np_ans, self._computeLiteralAndTensor(\n x, y, dtype, tf_func))\n\n def _compareUnary(self, x, dtype, np_func, tf_func):\n np_ans = np_func(x).astype(dtype.as_numpy_dtype)\n with self.test_session(use_gpu=False):\n self.assertAllClose(np_ans, tf_func(tf.convert_to_tensor(x, dtype=dtype)).eval())\n\n def testOverload(self):\n dtypes = [\n tf.float16,\n tf.float32,\n tf.float64,\n tf.int32,\n tf.int64,\n tf.complex64,\n ]\n funcs = [\n (np.add, _ADD),\n (np.subtract, _SUB),\n (np.multiply, _MUL),\n (np.power, _POW),\n (np.true_divide, _TRUEDIV),\n (np.floor_divide, _FLOORDIV),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n if dtype == tf.complex64 and tf_func == _FLOORDIV:\n continue # floordiv makes no sense for complex\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n # Mod only works for int32 and int64.\n for dtype in [tf.int32, tf.int64]:\n self._compareBinary(10, 3, dtype, np.mod, _MOD)\n\n def testOverloadComparisons(self):\n dtypes = [\n tf.float16,\n tf.float32,\n tf.float64,\n tf.int32,\n tf.int64,\n ]\n funcs = [\n (np.less, _LT),\n (np.less_equal, _LE),\n (np.greater, _GT),\n (np.greater_equal, _GE),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n logical_funcs = [\n (np.logical_and, _AND),\n (np.logical_or, _OR),\n (np.logical_xor, _XOR),\n (np.equal, tf.equal),\n (np.not_equal, tf.not_equal)\n ]\n for np_func, tf_func in logical_funcs:\n self._compareBinary(True, False, tf.bool, np_func, tf_func)\n self._compareBinary(True, True, tf.bool, np_func, tf_func)\n self._compareBinary(False, False, tf.bool, np_func, tf_func)\n self._compareBinary(False, True, tf.bool, np_func, tf_func)\n self._compareBinary([True, True, False, False],\n [True, False, True, False],\n tf.bool, np_func, tf_func)\n self._compareUnary(True, tf.bool, np.logical_not, _INV)\n self._compareUnary(False, tf.bool, np.logical_not, _INV)\n self._compareUnary([True, False], tf.bool, np.logical_not, _INV)\n\n\nclass IsFiniteInfNanTest(tf.test.TestCase):\n\n def _compare(self, x, use_gpu):\n np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(x)\n ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(\n inx), tf.is_nan(inx)\n tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])\n self.assertAllEqual(np_inf, tf_inf)\n self.assertAllEqual(np_nan, tf_nan)\n self.assertAllEqual(np_finite, tf_finite)\n self.assertShapeEqual(np_inf, oinf)\n self.assertShapeEqual(np_nan, onan)\n self.assertShapeEqual(np_finite, ofinite)\n\n def _testDtype(self, dtype):\n fi = np.finfo(dtype)\n data = np.array([0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max,\n -np.inf, np.inf, np.nan]).astype(dtype)\n self._compare(data, use_gpu=False)\n self._compare(data, use_gpu=True)\n\n def testHalf(self):\n self._testDtype(np.float16)\n\n def testFloat(self):\n self._testDtype(np.float32)\n\n def testDouble(self):\n self._testDtype(np.float64)\n\n\nclass RoundingTest(tf.test.TestCase):\n\n def _compare(self, x, use_gpu):\n np_floor, np_ceil = np.floor(x), np.ceil(x)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(x)\n ofloor, oceil = tf.floor(inx), tf.ceil(inx)\n tf_floor, tf_ceil = sess.run([ofloor, oceil])\n self.assertAllEqual(np_floor, tf_floor)\n self.assertAllEqual(np_ceil, tf_ceil)\n self.assertShapeEqual(np_floor, ofloor)\n self.assertShapeEqual(np_ceil, oceil)\n\n def _testDtype(self, dtype):\n data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)\n self._compare(data, use_gpu=True)\n self._compare(data, use_gpu=True)\n\n def testTypes(self):\n for dtype in [np.float16, np.float32, np.float64]:\n self._testDtype(dtype)\n\n\nclass ComplexMakeRealImagTest(tf.test.TestCase):\n\n def _compareMake(self, real, imag, use_gpu):\n np_ans = real + (1j) * imag\n with self.test_session(use_gpu=use_gpu):\n real = tf.convert_to_tensor(real)\n imag = tf.convert_to_tensor(imag)\n tf_ans = tf.complex(real, imag)\n out = tf_ans.eval()\n self.assertAllEqual(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def testMake(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n for use_gpu in [False, True]:\n self._compareMake(real, imag, use_gpu)\n self._compareMake(real, 12.0, use_gpu)\n self._compareMake(23.0, imag, use_gpu)\n\n def _compareRealImag(self, cplx, use_gpu):\n np_real, np_imag = np.real(cplx), np.imag(cplx)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(cplx)\n tf_real = tf.real(inx)\n tf_imag = tf.imag(inx)\n tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])\n self.assertAllEqual(np_real, tf_real_val)\n self.assertAllEqual(np_imag, tf_imag_val)\n self.assertShapeEqual(np_real, tf_real)\n self.assertShapeEqual(np_imag, tf_imag)\n\n def testRealImag(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + (1j) * imag\n self._compareRealImag(cplx, use_gpu=False)\n self._compareRealImag(cplx, use_gpu=True)\n\n def _compareConj(self, cplx, use_gpu):\n np_ans = np.conj(cplx)\n with self.test_session(use_gpu=use_gpu):\n inx = tf.convert_to_tensor(cplx)\n tf_conj = tf.conj(inx)\n tf_ans = tf_conj.eval()\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, tf_conj)\n\n def testConj(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + (1j) * imag\n self._compareConj(cplx, use_gpu=False)\n self._compareConj(cplx, use_gpu=True)\n\n def _compareGradient(self, x):\n # x[:, 0] is real, x[:, 1] is imag. We combine real and imag into\n # complex numbers. Then, we extract real and imag parts and\n # computes the squared sum. This is obviously the same as sum(real\n # * real) + sum(imag * imag). We just want to make sure the\n # gradient function is checked.\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n real, imag = tf.split(1, 2, inx)\n real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])\n cplx = tf.complex(real, imag)\n cplx = tf.conj(cplx)\n loss = tf.reduce_sum(\n tf.square(tf.real(cplx))) + tf.reduce_sum(\n tf.square(tf.imag(cplx)))\n epsilon = 1e-3\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n list(x.shape),\n loss,\n [1],\n x_init_value=x,\n delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n def testGradient(self):\n data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)\n self._compareGradient(data)\n\n def _compareMulGradient(self, data):\n # data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],\n # data[:, 2], data[:, 3] are real parts of x, imaginary parts of\n # x, real parts of y and imaginary parts of y.\n with self.test_session():\n inp = tf.convert_to_tensor(data)\n xr, xi, yr, yi = tf.split(1, 4, inp)\n\n def vec(x): # Reshape to a vector\n return tf.reshape(x, [-1])\n xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)\n\n def cplx(r, i): # Combine to a complex vector\n return tf.complex(r, i)\n x, y = cplx(xr, xi), cplx(yr, yi)\n # z is x times y in complex plane.\n z = x * y\n # Defines the loss function as the sum of all coefficients of z.\n loss = tf.reduce_sum(tf.real(z) + tf.imag(z))\n epsilon = 0.005\n jacob_t, jacob_n = tf.test.compute_gradient(inp,\n list(data.shape),\n loss,\n [1],\n x_init_value=data,\n delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n def testMulGradient(self):\n data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)\n self._compareMulGradient(data)\n\n\nclass AccumulateTest(tf.test.TestCase):\n\n def testSimple(self):\n with self.test_session():\n random_arrays = [np.random.rand(16, 16, 16, 16).astype(np.float32)\n for _ in range(20)]\n random_tensors = [tf.convert_to_tensor(x, dtype=tf.float32)\n for x in random_arrays]\n tf_val = tf.accumulate_n(random_tensors)\n np_val = random_arrays[0]\n for random_array in random_arrays[1:]:\n np_val += random_array\n self.assertAllClose(np_val, tf_val.eval())\n\n def testZeroArgs(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n tf_val = tf.accumulate_n([])\n tf_val.eval()\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.is_nan", "numpy.imag", "numpy.minimum", "numpy.sqrt", "numpy.linspace", "tensorflow.is_finite", "tensorflow.reduce_sum", "tensorflow.equal", "tensorflow.minimum", "tensorflow.accumulate_n", "tensorflow.test.compute_gradient", "numpy.exp", "numpy.where", "numpy.random.randint", "numpy.ones_like", "tensorflow.real", "numpy.arange", "tensorflow.floor", "tensorflow.gradients", "tensorflow.test.main", "numpy.finfo", "numpy.ceil", "numpy.real", "tensorflow.select", "tensorflow.ceil", "tensorflow.complex", "tensorflow.imag", "numpy.zeros", "numpy.logical_not", "numpy.isnan", "tensorflow.conj", "tensorflow.is_inf", "numpy.random.rand", "numpy.floor", "tensorflow.split", "numpy.array", "tensorflow.not_equal", "tensorflow.constant", "numpy.conj", "numpy.maximum", "numpy.isfinite", "numpy.abs", "tensorflow.maximum", "tensorflow.reshape", "numpy.complex", "numpy.vectorize", "numpy.shape", "numpy.prod", "numpy.isinf", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "0.12", "1.7" ] } ]
carboncoo/UNITER
[ "dfe007c2cea55430a847fd1cf318e88ae8ffe88f" ]
[ "data/data.py" ]
[ "\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\nDataset interfaces\n\"\"\"\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nimport io\nimport json\nfrom os.path import exists\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, ConcatDataset\nimport horovod.torch as hvd\nfrom tqdm import tqdm\nimport lmdb\nfrom lz4.frame import compress, decompress\n\nimport msgpack\nimport msgpack_numpy\nmsgpack_numpy.patch()\n\n\ndef _fp16_to_fp32(feat_dict):\n out = {k: arr.astype(np.float32)\n if arr.dtype == np.float16 else arr\n for k, arr in feat_dict.items()}\n return out\n\n\ndef compute_num_bb(confs, conf_th, min_bb, max_bb):\n num_bb = max(min_bb, (confs > conf_th).sum())\n num_bb = min(max_bb, num_bb)\n return num_bb\n\n\ndef _check_distributed():\n try:\n dist = hvd.size() != hvd.local_size()\n except ValueError:\n # not using horovod\n dist = False\n return dist\n\n\nclass DetectFeatLmdb(object):\n def __init__(self, img_dir, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36,\n compress=True):\n self.img_dir = img_dir\n if conf_th == -1:\n db_name = f'feat_numbb{num_bb}'\n self.name2nbb = defaultdict(lambda: num_bb)\n else:\n db_name = f'feat_th{conf_th}_max{max_bb}_min{min_bb}'\n nbb = f'nbb_th{conf_th}_max{max_bb}_min{min_bb}.json'\n if not exists(f'{img_dir}/{nbb}'):\n # nbb is not pre-computed\n self.name2nbb = None\n else:\n self.name2nbb = json.load(open(f'{img_dir}/{nbb}'))\n self.compress = compress\n if compress:\n db_name += '_compressed'\n\n if self.name2nbb is None:\n if compress:\n db_name = 'all_compressed'\n else:\n db_name = 'all'\n # only read ahead on single node training\n self.env = lmdb.open(f'{img_dir}/{db_name}',\n readonly=True, create=False,\n readahead=not _check_distributed())\n self.txn = self.env.begin(buffers=True)\n if self.name2nbb is None:\n self.name2nbb = self._compute_nbb()\n\n def _compute_nbb(self):\n name2nbb = {}\n fnames = json.loads(self.txn.get(key=b'__keys__').decode('utf-8'))\n for fname in tqdm(fnames, desc='reading images'):\n dump = self.txn.get(fname.encode('utf-8'))\n if self.compress:\n with io.BytesIO(dump) as reader:\n img_dump = np.load(reader, allow_pickle=True)\n confs = img_dump['conf']\n else:\n img_dump = msgpack.loads(dump, raw=False)\n confs = img_dump['conf']\n name2nbb[fname] = compute_num_bb(confs, self.conf_th,\n self.min_bb, self.max_bb)\n\n return name2nbb\n\n def __del__(self):\n self.env.close()\n\n def get_dump(self, file_name):\n # hack for MRC\n dump = self.txn.get(file_name.encode('utf-8'))\n nbb = self.name2nbb[file_name]\n if self.compress:\n with io.BytesIO(dump) as reader:\n img_dump = np.load(reader, allow_pickle=True)\n img_dump = _fp16_to_fp32(img_dump)\n else:\n img_dump = msgpack.loads(dump, raw=False)\n img_dump = _fp16_to_fp32(img_dump)\n img_dump = {k: arr[:nbb, ...] for k, arr in img_dump.items()}\n return img_dump\n\n def __getitem__(self, file_name):\n dump = self.txn.get(file_name.encode('utf-8'))\n nbb = self.name2nbb[file_name]\n if self.compress:\n with io.BytesIO(dump) as reader:\n img_dump = np.load(reader, allow_pickle=True)\n img_dump = {'features': img_dump['features'],\n 'norm_bb': img_dump['norm_bb']}\n else:\n img_dump = msgpack.loads(dump, raw=False)\n img_feat = torch.tensor(img_dump['features'][:nbb, :]).float()\n img_bb = torch.tensor(img_dump['norm_bb'][:nbb, :]).float()\n return img_feat, img_bb\n\n\n@contextmanager\ndef open_lmdb(db_dir, readonly=False):\n db = TxtLmdb(db_dir, readonly)\n try:\n yield db\n finally:\n del db\n\n\nclass TxtLmdb(object):\n def __init__(self, db_dir, readonly=True):\n self.readonly = readonly\n if readonly:\n # training\n self.env = lmdb.open(db_dir,\n readonly=True, create=False,\n readahead=not _check_distributed())\n self.txn = self.env.begin(buffers=True)\n self.write_cnt = None\n else:\n # prepro\n self.env = lmdb.open(db_dir, readonly=False, create=True,\n map_size=4 * 1024**4)\n self.txn = self.env.begin(write=True)\n self.write_cnt = 0\n\n def __del__(self):\n if self.write_cnt:\n self.txn.commit()\n self.env.close()\n\n def __getitem__(self, key):\n return msgpack.loads(decompress(self.txn.get(key.encode('utf-8'))),\n raw=False)\n\n def __setitem__(self, key, value):\n # NOTE: not thread safe\n if self.readonly:\n raise ValueError('readonly text DB')\n ret = self.txn.put(key.encode('utf-8'),\n compress(msgpack.dumps(value, use_bin_type=True)))\n self.write_cnt += 1\n if self.write_cnt % 1000 == 0:\n self.txn.commit()\n self.txn = self.env.begin(write=True)\n self.write_cnt = 0\n return ret\n\n\nclass TxtTokLmdb(object):\n def __init__(self, db_dir, max_txt_len=60):\n if max_txt_len == -1:\n self.id2len = json.load(open(f'{db_dir}/id2len.json'))\n else:\n self.id2len = {\n id_: len_\n for id_, len_ in json.load(open(f'{db_dir}/id2len.json')\n ).items()\n if len_ <= max_txt_len\n }\n self.db_dir = db_dir\n self.db = TxtLmdb(db_dir, readonly=True)\n meta = json.load(open(f'{db_dir}/meta.json', 'r'))\n self.cls_ = meta['CLS']\n self.sep = meta['SEP']\n self.mask = meta['MASK']\n self.v_range = meta['v_range']\n\n def __getitem__(self, id_):\n txt_dump = self.db[id_]\n return txt_dump\n\n def combine_inputs(self, *inputs):\n input_ids = [self.cls_]\n for ids in inputs:\n input_ids.extend(ids + [self.sep])\n return torch.tensor(input_ids)\n\n @property\n def txt2img(self):\n txt2img = json.load(open(f'{self.db_dir}/txt2img.json'))\n return txt2img\n\n @property\n def img2txts(self):\n img2txts = json.load(open(f'{self.db_dir}/img2txts.json'))\n return img2txts\n\n\ndef get_ids_and_lens(db):\n assert isinstance(db, TxtTokLmdb)\n lens = []\n ids = []\n for id_ in list(db.id2len.keys())[hvd.rank()::hvd.size()]:\n lens.append(db.id2len[id_])\n ids.append(id_)\n return lens, ids\n\n\nclass DetectFeatTxtTokDataset(Dataset):\n def __init__(self, txt_db, img_db):\n assert isinstance(txt_db, TxtTokLmdb)\n assert isinstance(img_db, DetectFeatLmdb)\n self.txt_db = txt_db\n self.img_db = img_db\n txt_lens, self.ids = get_ids_and_lens(txt_db)\n\n txt2img = txt_db.txt2img\n self.lens = [tl + self.img_db.name2nbb[txt2img[id_]]\n for tl, id_ in zip(txt_lens, self.ids)]\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, i):\n id_ = self.ids[i]\n example = self.txt_db[id_]\n return example\n\n def _get_img_feat(self, fname):\n img_feat, bb = self.img_db[fname]\n img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)\n num_bb = img_feat.size(0)\n return img_feat, img_bb, num_bb\n\n\ndef pad_tensors(tensors, lens=None, pad=0):\n \"\"\"B x [T, ...]\"\"\"\n if lens is None:\n lens = [t.size(0) for t in tensors]\n max_len = max(lens)\n bs = len(tensors)\n hid = tensors[0].size(-1)\n dtype = tensors[0].dtype\n output = torch.zeros(bs, max_len, hid, dtype=dtype)\n if pad:\n output.data.fill_(pad)\n for i, (t, l) in enumerate(zip(tensors, lens)):\n output.data[i, :l, ...] = t.data\n return output\n\n\ndef get_gather_index(txt_lens, num_bbs, batch_size, max_len, out_size):\n assert len(txt_lens) == len(num_bbs) == batch_size\n gather_index = torch.arange(0, out_size, dtype=torch.long,\n ).unsqueeze(0).repeat(batch_size, 1)\n\n for i, (tl, nbb) in enumerate(zip(txt_lens, num_bbs)):\n gather_index.data[i, tl:tl+nbb] = torch.arange(max_len, max_len+nbb,\n dtype=torch.long).data\n return gather_index\n\n\nclass ConcatDatasetWithLens(ConcatDataset):\n \"\"\" A thin wrapper on pytorch concat dataset for lens batching \"\"\"\n def __init__(self, datasets):\n super().__init__(datasets)\n self.lens = [l for dset in datasets for l in dset.lens]\n\n def __getattr__(self, name):\n return self._run_method_on_all_dsets(name)\n\n def _run_method_on_all_dsets(self, name):\n def run_all(*args, **kwargs):\n return [dset.__getattribute__(name)(*args, **kwargs)\n for dset in self.datasets]\n return run_all\n\n\nclass ImageLmdbGroup(object):\n def __init__(self, conf_th, max_bb, min_bb, num_bb, compress):\n self.path2imgdb = {}\n self.conf_th = conf_th\n self.max_bb = max_bb\n self.min_bb = min_bb\n self.num_bb = num_bb\n self.compress = compress\n\n def __getitem__(self, path):\n img_db = self.path2imgdb.get(path, None)\n if img_db is None:\n img_db = DetectFeatLmdb(path, self.conf_th, self.max_bb,\n self.min_bb, self.num_bb, self.compress)\n return img_db\n" ]
[ [ "torch.cat", "torch.zeros", "torch.tensor", "torch.arange", "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jojonki/AttentionNetworks-for-QA
[ "16f469c0719bf30c42695a1b4df6bbd84db8ea49" ]
[ "process_data.py" ]
[ "import os\nimport numpy as np\nimport json\nimport pickle\nfrom nltk.tokenize import word_tokenize\nimport random\nimport torch\nfrom torch.autograd import Variable\n\n# TODO global\nNULL = \"-NULL-\"\nUNK = \"-UNK-\"\nENT = \"-ENT-\"\n\n\ndef save_pickle(d, path):\n print('save pickle to', path)\n with open(path, mode='wb') as f:\n pickle.dump(d, f)\n\n\ndef load_pickle(path):\n print('load', path)\n with open(path, mode='rb') as f:\n return pickle.load(f)\n\n\ndef lower_list(str_list):\n return [str_var.lower() for str_var in str_list]\n\n\ndef load_task(dataset_path):\n ret_data = []\n ctx_max_len = 0 # character level length\n with open(dataset_path) as f:\n data = json.load(f)\n ver = data['version']\n print('dataset version:', ver)\n data = data['data']\n for i, d in enumerate(data):\n if i % 100 == 0:\n print('load_task:', i, '/', len(data))\n # print('load', d['title'], i, '/', len(data))\n for p in d['paragraphs']:\n if len(p['context']) > ctx_max_len:\n ctx_max_len = len(p['context'])\n c = word_tokenize(p['context'])\n cc = [list(w) for w in c]\n q, a = [], []\n for qa in p['qas']:\n q = word_tokenize(qa['question'])\n qc = [list(w) for w in q]\n a = [ans['text'] for ans in qa['answers']]\n a_beg = [ans['answer_start'] for ans in qa['answers']]\n a_end = [ans['answer_start'] + len(ans['text']) for ans in qa['answers']]\n ret_data.append((c, cc, qa['id'], q, qc, a, a_beg, a_end)) # TODO context redandancy\n return ret_data, ctx_max_len\n\n\ndef load_processed_data(fpath):\n ctx_max_len = 0 # character level length\n with open(fpath) as f:\n lines = f.readlines()\n data = []\n for l in lines:\n c_label, c, q, a, a_txt = l.rstrip().split('\\t')\n if len(c) > ctx_max_len:\n ctx_max_len = len(c)\n c, q, a = c.split(' '), q.split(' '), a.split(' ')\n # if len(c) > 30: continue # TMP\n c, q = lower_list(c), lower_list(q)\n cc = [list(w) for w in c]\n qc = [list(w) for w in q]\n a = [int(aa) for aa in a]\n a = [a[0], a[-1]]\n data.append((c_label, c, cc, q, qc, a, a_txt))\n return data, ctx_max_len\n\n\ndef load_processed_json(fpath_data, fpath_shared):\n # shared ------------\n # x: word level context list\n # cx: chara level context list\n # p: raw str level context list\n # word_counter: word to index\n # char_coun0ter: char to index\n # lower_word_counter: low word counter\n # word2vec: word2vec pretrained weights\n # lower_word2vec: lowered word2vec pretrained weights\n # data ------------\n # q: word level question\n # cq: char-word level question\n # y: word level id\n # *x: [article_id, paragraph_id]\n # *cx: same as *x\n # cy: ?\n # idxs: nothing meaning\n # ids: question id\n # answers: original answer text\n # *p: same as *x\n data = json.load(open(fpath_data))\n shared = json.load(open(fpath_shared))\n return data, shared\n\n\ndef load_glove_weights(glove_dir, embd_dim, vocab_size, word_index):\n embeddings_index = {}\n with open(os.path.join(glove_dir, 'glove.6B.' + str(embd_dim) + 'd.txt')) as f:\n for line in f:\n values = line.split()\n word = values[0]\n vector = np.array(values[1:], dtype='float32')\n embeddings_index[word] = vector\n\n print('Found %s word vectors in glove.' % len(embeddings_index))\n embedding_matrix = np.zeros((vocab_size, embd_dim))\n print('embed_matrix.shape', embedding_matrix.shape)\n found_ct = 0\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n # words not found in embedding index will be all-zeros.\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n found_ct += 1\n print(found_ct, 'words are found in glove')\n\n return embedding_matrix\n\n\ndef to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)\n\n\ndef to_np(x):\n return x.data.cpu().numpy()\n\n\ndef _make_word_vector(sentence, w2i, seq_len):\n index_vec = [w2i[w] if w in w2i else w2i[UNK] for w in sentence]\n pad_len = max(0, seq_len - len(index_vec))\n index_vec += [w2i[NULL]] * pad_len\n index_vec = index_vec[:seq_len]\n return index_vec\n\n\ndef _make_char_vector(data, c2i, sent_len, word_len):\n tmp = torch.ones(sent_len, word_len).type(torch.LongTensor) # TODO use fills\n for i, word in enumerate(data):\n for j, ch in enumerate(word):\n tmp[i][j] = c2i[ch] if ch in c2i else c2i[UNK]\n return tmp\n\n\ndef make_vector(batch, w2i, c2i, ctx_sent_len, ctx_word_len, query_sent_len, query_word_len):\n c, cc, q, cq, ans = [], [], [], [], []\n # c, cc, q, cq, a in batch\n for d in batch:\n c.append(_make_word_vector(d[0], w2i, ctx_sent_len))\n cc.append(_make_char_vector(d[1], c2i, ctx_sent_len, ctx_word_len))\n q.append(_make_word_vector(d[2], w2i, query_sent_len))\n cq.append(_make_char_vector(d[3], c2i, query_sent_len, query_word_len))\n ans.append(d[-1])\n c = to_var(torch.LongTensor(c))\n cc = to_var(torch.stack(cc, 0))\n q = to_var(torch.LongTensor(q))\n cq = to_var(torch.stack(cq, 0))\n a = to_var(torch.LongTensor(ans))\n return c, cc, q, cq, a\n\n\nclass DataSet(object):\n def __init__(self, data, shared):\n self.data = data\n self.shared = shared\n\n def size(self):\n return len(self.data['q'])\n\n def get_batches(self, batch_size, shuffle=False):\n batches = []\n batch = []\n for i in range(self.size()): # TODO shuffle, last elms\n rx = self.data['*x'][i] # [article_id, paragraph_id]\n c = lower_list(self.shared['x'][rx[0]][rx[1]][0])\n # if len(c) > 150: continue\n cc = self.shared['cx'][rx[0]][rx[1]][0]\n q = lower_list(self.data['q'][i])\n # if len(q) < 5 or len(q) > 15: continue\n cq = self.data['cq'][i]\n a = self.data['y'][i][0] # [[0, 80], [0, 82]] TODO only use 1-best\n a = (a[0][1], a[1][1]) # (80, 82) <= [[0, 80], [0, 82]]\n batch.append((c, cc, q, cq, a))\n if len(batch) == batch_size:\n batches.append(batch)\n batch = []\n if shuffle:\n random.shuffle(batches)\n return batches\n\n def get_ctx_maxlen(self):\n # char level context maxlen\n return max([len(p) for pp in self.shared['p'] for p in pp])\n\n def get_sent_maxlen(self):\n # word level sentence maxlen\n return max([len(articles[0]) for xx in self.shared['x'] for articles in xx]), max([len(q) for q in self.data['q']])\n\n def get_word_maxlen(self):\n # max word len\n return max([len(w) for xx in self.shared['x'] for articles in xx for w in articles[0]]), max([len(w) for q in self.data['q'] for w in q])\n\n def get_word_index(self, word_count_th=10, char_count_th=100):\n\n word2vec_dict = self.get_word2vec()\n word_counter = self.get_word_counter()\n char_counter = self.get_char_counter()\n w2i = {w: i for i, w in enumerate(w for w, ct in word_counter.items()\n if ct > word_count_th or (w in word2vec_dict))}\n c2i = {c: i for i, c in\n enumerate(c for c, ct in char_counter.items()\n if ct > char_count_th)}\n # w2i[NULL] = 0\n # w2i[UNK] = 1\n # w2i[ENT] = 2\n # c2i[NULL] = 0\n # c2i[UNK] = 1\n # c2i[ENT] = 2\n\n return w2i, c2i\n\n def get_word2vec(self):\n return self.shared['lower_word2vec']\n\n def get_word_counter(self):\n return self.shared['lower_word_counter']\n\n def get_char_counter(self):\n return self.shared['char_counter']\n" ]
[ [ "torch.LongTensor", "torch.ones", "torch.cuda.is_available", "torch.stack", "numpy.array", "numpy.zeros", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WangWenjun559/MITS
[ "8d7ace2b3b2a58fb33af225c2997106d9402aaf5", "8d7ace2b3b2a58fb33af225c2997106d9402aaf5", "8d7ace2b3b2a58fb33af225c2997106d9402aaf5", "8d7ace2b3b2a58fb33af225c2997106d9402aaf5", "8d7ace2b3b2a58fb33af225c2997106d9402aaf5", "8d7ace2b3b2a58fb33af225c2997106d9402aaf5", "8d7ace2b3b2a58fb33af225c2997106d9402aaf5" ]
[ "summary/sumy/sklearn/decomposition/pca.py", "summary/sumy/sklearn/gaussian_process/tests/test_gaussian_process.py", "summary/sumy/sklearn/datasets/tests/test_base.py", "summary/sumy/sklearn/learning_curve.py", "summary/sumy/sklearn/ensemble/tests/test_weight_boosting.py", "summary/sumy/sklearn/metrics/tests/test_ranking.py", "summary/sumy/sklearn/tests/test_calibration.py" ]
[ "\"\"\" Principal Component Analysis\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Olivier Grisel <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Denis A. Engemann <[email protected]>\n# Michael Eickenberg <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom math import log, sqrt\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.special import gammaln\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils import check_random_state, as_float_array\nfrom ..utils import check_array\nfrom ..utils.extmath import fast_dot, fast_logdet, randomized_svd\nfrom ..utils.validation import check_is_fitted\n\n\ndef _assess_dimension_(spectrum, rank, n_samples, n_features):\n \"\"\"Compute the likelihood of a rank ``rank`` dataset\n\n The dataset is assumed to be embedded in gaussian noise of shape(n,\n dimf) having spectrum ``spectrum``.\n\n Parameters\n ----------\n spectrum: array of shape (n)\n Data spectrum.\n rank: int\n Tested rank value.\n n_samples: int\n Number of samples.\n n_features: int\n Number of features.\n\n Returns\n -------\n ll: float,\n The log-likelihood\n\n Notes\n -----\n This implements the method of `Thomas P. Minka:\n Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`\n \"\"\"\n if rank > len(spectrum):\n raise ValueError(\"The tested rank cannot exceed the rank of the\"\n \" dataset\")\n\n pu = -rank * log(2.)\n for i in range(rank):\n pu += (gammaln((n_features - i) / 2.)\n - log(np.pi) * (n_features - i) / 2.)\n\n pl = np.sum(np.log(spectrum[:rank]))\n pl = -pl * n_samples / 2.\n\n if rank == n_features:\n pv = 0\n v = 1\n else:\n v = np.sum(spectrum[rank:]) / (n_features - rank)\n pv = -np.log(v) * n_samples * (n_features - rank) / 2.\n\n m = n_features * rank - rank * (rank + 1.) / 2.\n pp = log(2. * np.pi) * (m + rank + 1.) / 2.\n\n pa = 0.\n spectrum_ = spectrum.copy()\n spectrum_[rank:n_features] = v\n for i in range(rank):\n for j in range(i + 1, len(spectrum)):\n pa += log((spectrum[i] - spectrum[j]) *\n (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)\n\n ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.\n\n return ll\n\n\ndef _infer_dimension_(spectrum, n_samples, n_features):\n \"\"\"Infers the dimension of a dataset of shape (n_samples, n_features)\n\n The dataset is described by its spectrum `spectrum`.\n \"\"\"\n n_spectrum = len(spectrum)\n ll = np.empty(n_spectrum)\n for rank in range(n_spectrum):\n ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)\n return ll.argmax()\n\n\nclass PCA(BaseEstimator, TransformerMixin):\n \"\"\"Principal component analysis (PCA)\n\n Linear dimensionality reduction using Singular Value Decomposition of the\n data and keeping only the most significant singular vectors to project the\n data to a lower dimensional space.\n\n This implementation uses the scipy.linalg implementation of the singular\n value decomposition. It only works for dense arrays and is not scalable to\n large dimensional data.\n\n The time complexity of this implementation is ``O(n ** 3)`` assuming\n n ~ n_samples ~ n_features.\n\n Read more in the :ref:`User Guide <PCA>`.\n\n Parameters\n ----------\n n_components : int, None or string\n Number of components to keep.\n if n_components is not set all components are kept::\n\n n_components == min(n_samples, n_features)\n\n if n_components == 'mle', Minka\\'s MLE is used to guess the dimension\n if ``0 < n_components < 1``, select the number of components such that\n the amount of variance that needs to be explained is greater than the\n percentage specified by n_components\n\n copy : bool\n If False, data passed to fit are overwritten and running\n fit(X).transform(X) will not yield the expected results,\n use fit_transform(X) instead.\n\n whiten : bool, optional\n When True (False by default) the `components_` vectors are divided\n by n_samples times singular values to ensure uncorrelated outputs\n with unit component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making there data respect some hard-wired assumptions.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n Components with maximum variance.\n\n explained_variance_ratio_ : array, [n_components]\n Percentage of variance explained by each of the selected components. \\\n k is not set then all components are stored and the sum of explained \\\n variances is equal to 1.0\n\n mean_ : array, [n_features]\n Per-feature empirical mean, estimated from the training set.\n\n n_components_ : int\n The estimated number of components. Relevant when n_components is set\n to 'mle' or a number between 0 and 1 to select using explained\n variance.\n\n noise_variance_ : float\n The estimated noise covariance following the Probabilistic PCA model\n from Tipping and Bishop 1999. See \"Pattern Recognition and\n Machine Learning\" by C. Bishop, 12.2.1 p. 574 or\n http://www.miketipping.com/papers/met-mppca.pdf. It is required to\n computed the estimated data covariance and score samples.\n\n Notes\n -----\n For n_components='mle', this class uses the method of `Thomas P. Minka:\n Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`\n\n Implements the probabilistic PCA model from:\n M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,\n Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622\n via the score and score_samples methods.\n See http://www.miketipping.com/papers/met-mppca.pdf\n\n Due to implementation subtleties of the Singular Value Decomposition (SVD),\n which is used in this implementation, running fit twice on the same matrix\n can lead to principal components with signs flipped (change in direction).\n For this reason, it is important to always use the same estimator object to\n transform data in a consistent fashion.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.decomposition import PCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> pca = PCA(n_components=2)\n >>> pca.fit(X)\n PCA(copy=True, n_components=2, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [ 0.99244... 0.00755...]\n\n See also\n --------\n RandomizedPCA\n KernelPCA\n SparsePCA\n TruncatedSVD\n \"\"\"\n def __init__(self, n_components=None, copy=True, whiten=False):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self._fit(X)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n U, S, V = self._fit(X)\n U = U[:, :self.n_components_]\n\n if self.whiten:\n # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)\n U *= sqrt(X.shape[0])\n else:\n # X_new = X * V = U * S * V^T * V = U * S\n U *= S[:self.n_components_]\n\n return U\n\n def _fit(self, X):\n \"\"\"Fit the model on X\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n\n Returns\n -------\n U, s, V : ndarrays\n The SVD of the input data, copied and centered when\n requested.\n \"\"\"\n X = check_array(X)\n n_samples, n_features = X.shape\n X = as_float_array(X, copy=self.copy)\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n U, S, V = linalg.svd(X, full_matrices=False)\n explained_variance_ = (S ** 2) / n_samples\n explained_variance_ratio_ = (explained_variance_ /\n explained_variance_.sum())\n\n components_ = V\n\n n_components = self.n_components\n if n_components is None:\n n_components = n_features\n elif n_components == 'mle':\n if n_samples < n_features:\n raise ValueError(\"n_components='mle' is only supported \"\n \"if n_samples >= n_features\")\n\n n_components = _infer_dimension_(explained_variance_,\n n_samples, n_features)\n elif not 0 <= n_components <= n_features:\n raise ValueError(\"n_components=%r invalid for n_features=%d\"\n % (n_components, n_features))\n\n if 0 < n_components < 1.0:\n # number of components for which the cumulated explained variance\n # percentage is superior to the desired threshold\n ratio_cumsum = explained_variance_ratio_.cumsum()\n n_components = np.sum(ratio_cumsum < n_components) + 1\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < n_features:\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n # store n_samples to revert whitening when getting covariance\n self.n_samples_ = n_samples\n\n self.components_ = components_[:n_components]\n self.explained_variance_ = explained_variance_[:n_components]\n explained_variance_ratio_ = explained_variance_ratio_[:n_components]\n self.explained_variance_ratio_ = explained_variance_ratio_\n self.n_components_ = n_components\n\n return (U, S, V)\n\n def get_covariance(self):\n \"\"\"Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances.\n\n Returns\n -------\n cov : array, shape=(n_features, n_features)\n Estimated covariance of data.\n \"\"\"\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)\n cov = np.dot(components_.T * exp_var_diff, components_)\n cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace\n return cov\n\n def get_precision(self):\n \"\"\"Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n \"\"\"\n n_features = self.components_.shape[1]\n\n # handle corner cases first\n if self.n_components_ == 0:\n return np.eye(n_features) / self.noise_variance_\n if self.n_components_ == n_features:\n return linalg.inv(self.get_covariance())\n\n # Get precision using matrix inversion lemma\n components_ = self.components_\n exp_var = self.explained_variance_\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)\n precision = np.dot(components_, components_.T) / self.noise_variance_\n precision.flat[::len(precision) + 1] += 1. / exp_var_diff\n precision = np.dot(components_.T,\n np.dot(linalg.inv(precision), components_))\n precision /= -(self.noise_variance_ ** 2)\n precision.flat[::len(precision) + 1] += 1. / self.noise_variance_\n return precision\n\n def transform(self, X):\n \"\"\"Apply the dimensionality reduction on X.\n\n X is projected on the first principal components previous extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples is the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n if self.mean_ is not None:\n X = X - self.mean_\n X_transformed = fast_dot(X, self.components_.T)\n if self.whiten:\n X_transformed /= np.sqrt(self.explained_variance_)\n return X_transformed\n\n def inverse_transform(self, X):\n \"\"\"Transform data back to its original space, i.e.,\n return an input X_original whose transform would be X\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_components)\n New data, where n_samples is the number of samples\n and n_components is the number of components.\n\n Returns\n -------\n X_original array-like, shape (n_samples, n_features)\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n if self.whiten:\n return fast_dot(\n X,\n np.sqrt(self.explained_variance_[:, np.newaxis]) *\n self.components_) + self.mean_\n else:\n return fast_dot(X, self.components_) + self.mean_\n\n def score_samples(self, X):\n \"\"\"Return the log-likelihood of each sample\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X: array, shape(n_samples, n_features)\n The data.\n\n Returns\n -------\n ll: array, shape (n_samples,)\n Log-likelihood of each sample under the current model\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n Xr = X - self.mean_\n n_features = X.shape[1]\n log_like = np.zeros(X.shape[0])\n precision = self.get_precision()\n log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)\n log_like -= .5 * (n_features * log(2. * np.pi)\n - fast_logdet(precision))\n return log_like\n\n def score(self, X, y=None):\n \"\"\"Return the average log-likelihood of all samples\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X: array, shape(n_samples, n_features)\n The data.\n\n Returns\n -------\n ll: float\n Average log-likelihood of the samples under the current model\n \"\"\"\n return np.mean(self.score_samples(X))\n\n\nclass RandomizedPCA(BaseEstimator, TransformerMixin):\n \"\"\"Principal component analysis (PCA) using randomized SVD\n\n Linear dimensionality reduction using approximated Singular Value\n Decomposition of the data and keeping only the most significant\n singular vectors to project the data to a lower dimensional space.\n\n Read more in the :ref:`User Guide <RandomizedPCA>`.\n\n Parameters\n ----------\n n_components : int, optional\n Maximum number of components to keep. When not given or None, this\n is set to n_features (the second dimension of the training data).\n\n copy : bool\n If False, data passed to fit are overwritten and running\n fit(X).transform(X) will not yield the expected results,\n use fit_transform(X) instead.\n\n iterated_power : int, optional\n Number of iterations for the power method. 3 by default.\n\n whiten : bool, optional\n When True (False by default) the `components_` vectors are divided\n by the singular values to ensure uncorrelated outputs with unit\n component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making their data respect some hard-wired assumptions.\n\n random_state : int or RandomState instance or None (default)\n Pseudo Random Number generator seed control. If None, use the\n numpy.random singleton.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n Components with maximum variance.\n\n explained_variance_ratio_ : array, [n_components]\n Percentage of variance explained by each of the selected components. \\\n k is not set then all components are stored and the sum of explained \\\n variances is equal to 1.0\n\n mean_ : array, [n_features]\n Per-feature empirical mean, estimated from the training set.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.decomposition import RandomizedPCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> pca = RandomizedPCA(n_components=2)\n >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n RandomizedPCA(copy=True, iterated_power=3, n_components=2,\n random_state=None, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [ 0.99244... 0.00755...]\n\n See also\n --------\n PCA\n TruncatedSVD\n\n References\n ----------\n\n .. [Halko2009] `Finding structure with randomness: Stochastic algorithms\n for constructing approximate matrix decompositions Halko, et al., 2009\n (arXiv:909)`\n\n .. [MRT] `A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`\n\n \"\"\"\n\n def __init__(self, n_components=None, copy=True, iterated_power=3,\n whiten=False, random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.iterated_power = iterated_power\n self.whiten = whiten\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X by extracting the first principal components.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self._fit(check_array(X))\n return self\n\n def _fit(self, X):\n \"\"\"Fit the model to the data X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n\n Returns\n -------\n X : ndarray, shape (n_samples, n_features)\n The input data, copied, centered and whitened when requested.\n \"\"\"\n random_state = check_random_state(self.random_state)\n X = np.atleast_2d(as_float_array(X, copy=self.copy))\n\n n_samples = X.shape[0]\n\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n if self.n_components is None:\n n_components = X.shape[1]\n else:\n n_components = self.n_components\n\n U, S, V = randomized_svd(X, n_components,\n n_iter=self.iterated_power,\n random_state=random_state)\n\n self.explained_variance_ = exp_var = (S ** 2) / n_samples\n full_var = np.var(X, axis=0).sum()\n self.explained_variance_ratio_ = exp_var / full_var\n\n if self.whiten:\n self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)\n else:\n self.components_ = V\n\n return X\n\n def transform(self, X, y=None):\n \"\"\"Apply dimensionality reduction on X.\n\n X is projected on the first principal components previous extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n if self.mean_ is not None:\n X = X - self.mean_\n\n X = fast_dot(X, self.components_.T)\n return X\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n X = check_array(X)\n X = self._fit(X)\n return fast_dot(X, self.components_.T)\n\n def inverse_transform(self, X, y=None):\n \"\"\"Transform data back to its original space.\n\n Returns an array X_original whose transform would be X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_components)\n New data, where n_samples in the number of samples\n and n_components is the number of components.\n\n Returns\n -------\n X_original array-like, shape (n_samples, n_features)\n\n Notes\n -----\n If whitening is enabled, inverse_transform does not compute the\n exact inverse operation of transform.\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X_original = fast_dot(X, self.components_)\n if self.mean_ is not None:\n X_original = X_original + self.mean_\n return X_original\n", "\"\"\"\nTesting for Gaussian Process module (sklearn.gaussian_process)\n\"\"\"\n\n# Author: Vincent Dubourg <[email protected]>\n# Licence: BSD 3 clause\n\nfrom nose.tools import raises\nfrom nose.tools import assert_true\n\nimport numpy as np\n\nfrom sklearn.gaussian_process import GaussianProcess\nfrom sklearn.gaussian_process import regression_models as regression\nfrom sklearn.gaussian_process import correlation_models as correlation\nfrom sklearn.datasets import make_regression\nfrom sklearn.utils.testing import assert_greater\n\n\nf = lambda x: x * np.sin(x)\nX = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T\nX2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T\ny = f(X).ravel()\n\n\ndef test_1d(regr=regression.constant, corr=correlation.squared_exponential,\n random_start=10, beta0=None):\n # MLE estimation of a one-dimensional Gaussian Process model.\n # Check random start optimization.\n # Test the interpolating property.\n gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,\n theta0=1e-2, thetaL=1e-4, thetaU=1e-1,\n random_start=random_start, verbose=False).fit(X, y)\n y_pred, MSE = gp.predict(X, eval_MSE=True)\n y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)\n\n assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)\n and np.allclose(MSE2, 0., atol=10))\n\n\ndef test_2d(regr=regression.constant, corr=correlation.squared_exponential,\n random_start=10, beta0=None):\n # MLE estimation of a two-dimensional Gaussian Process model accounting for\n # anisotropy. Check random start optimization.\n # Test the interpolating property.\n b, kappa, e = 5., .5, .1\n g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.\n X = np.array([[-4.61611719, -6.00099547],\n [4.10469096, 5.32782448],\n [0.00000000, -0.50000000],\n [-6.17289014, -4.6984743],\n [1.3109306, -6.93271427],\n [-5.03823144, 3.10584743],\n [-2.87600388, 6.74310541],\n [5.21301203, 4.26386883]])\n y = g(X).ravel()\n\n thetaL = [1e-4] * 2\n thetaU = [1e-1] * 2\n gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,\n theta0=[1e-2] * 2, thetaL=thetaL,\n thetaU=thetaU,\n random_start=random_start, verbose=False)\n gp.fit(X, y)\n y_pred, MSE = gp.predict(X, eval_MSE=True)\n\n assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))\n\n eps = np.finfo(gp.theta_.dtype).eps\n assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters\n assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters\n\n\ndef test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,\n random_start=10, beta0=None):\n # MLE estimation of a two-dimensional Gaussian Process model accounting for\n # anisotropy. Check random start optimization.\n # Test the GP interpolation for 2D output\n b, kappa, e = 5., .5, .1\n g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.\n f = lambda x: np.vstack((g(x), g(x))).T\n X = np.array([[-4.61611719, -6.00099547],\n [4.10469096, 5.32782448],\n [0.00000000, -0.50000000],\n [-6.17289014, -4.6984743],\n [1.3109306, -6.93271427],\n [-5.03823144, 3.10584743],\n [-2.87600388, 6.74310541],\n [5.21301203, 4.26386883]])\n y = f(X)\n gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,\n theta0=[1e-2] * 2, thetaL=[1e-4] * 2,\n thetaU=[1e-1] * 2,\n random_start=random_start, verbose=False)\n gp.fit(X, y)\n y_pred, MSE = gp.predict(X, eval_MSE=True)\n\n assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))\n\n\n@raises(ValueError)\ndef test_wrong_number_of_outputs():\n gp = GaussianProcess()\n gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])\n\n\ndef test_more_builtin_correlation_models(random_start=1):\n # Repeat test_1d and test_2d for several built-in correlation\n # models specified as strings.\n all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',\n 'linear']\n\n for corr in all_corr:\n test_1d(regr='constant', corr=corr, random_start=random_start)\n test_2d(regr='constant', corr=corr, random_start=random_start)\n test_2d_2d(regr='constant', corr=corr, random_start=random_start)\n\n\ndef test_ordinary_kriging():\n # Repeat test_1d and test_2d with given regression weights (beta0) for\n # different regression models (Ordinary Kriging).\n test_1d(regr='linear', beta0=[0., 0.5])\n test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])\n test_2d(regr='linear', beta0=[0., 0.5, 0.5])\n test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])\n test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])\n test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])\n\n\ndef test_no_normalize():\n gp = GaussianProcess(normalize=False).fit(X, y)\n y_pred = gp.predict(X)\n assert_true(np.allclose(y_pred, y))\n\n\ndef test_random_starts():\n # Test that an increasing number of random-starts of GP fitting only\n # increases the reduced likelihood function of the optimal theta.\n n_samples, n_features = 50, 3\n np.random.seed(0)\n rng = np.random.RandomState(0)\n X = rng.randn(n_samples, n_features) * 2 - 1\n y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)\n best_likelihood = -np.inf\n for random_start in range(1, 5):\n gp = GaussianProcess(regr=\"constant\", corr=\"squared_exponential\",\n theta0=[1e-0] * n_features,\n thetaL=[1e-4] * n_features,\n thetaU=[1e+1] * n_features,\n random_start=random_start, random_state=0,\n verbose=False).fit(X, y)\n rlf = gp.reduced_likelihood_function()[0]\n assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)\n best_likelihood = rlf\n\n\ndef test_mse_solving():\n # test the MSE estimate to be sane.\n # non-regression test for ignoring off-diagonals of feature covariance,\n # testing with nugget that renders covariance useless, only\n # using the mean function, with low effective rank of data\n gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,\n thetaL=1e-12, thetaU=1e-2, nugget=1e-2,\n optimizer='Welch', regr=\"linear\", random_state=0)\n\n X, y = make_regression(n_informative=3, n_features=60, noise=50,\n random_state=0, effective_rank=1)\n\n gp.fit(X, y)\n assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())\n", "import os\nimport shutil\nimport tempfile\nimport warnings\nimport nose\nimport numpy\nfrom pickle import loads\nfrom pickle import dumps\n\nfrom sklearn.datasets import get_data_home\nfrom sklearn.datasets import clear_data_home\nfrom sklearn.datasets import load_files\nfrom sklearn.datasets import load_sample_images\nfrom sklearn.datasets import load_sample_image\nfrom sklearn.datasets import load_digits\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.datasets import load_linnerud\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import load_boston\nfrom sklearn.datasets.base import Bunch\n\nfrom sklearn.externals.six import b, u\n\nfrom sklearn.utils.testing import assert_false\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_raises\n\n\nDATA_HOME = tempfile.mkdtemp(prefix=\"scikit_learn_data_home_test_\")\nLOAD_FILES_ROOT = tempfile.mkdtemp(prefix=\"scikit_learn_load_files_test_\")\nTEST_CATEGORY_DIR1 = \"\"\nTEST_CATEGORY_DIR2 = \"\"\n\n\ndef _remove_dir(path):\n if os.path.isdir(path):\n shutil.rmtree(path)\n\n\ndef teardown_module():\n \"\"\"Test fixture (clean up) run once after all tests of this module\"\"\"\n for path in [DATA_HOME, LOAD_FILES_ROOT]:\n _remove_dir(path)\n\n\ndef setup_load_files():\n global TEST_CATEGORY_DIR1\n global TEST_CATEGORY_DIR2\n TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)\n TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)\n sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,\n delete=False)\n sample_file.write(b(\"Hello World!\\n\"))\n sample_file.close()\n\n\ndef teardown_load_files():\n _remove_dir(TEST_CATEGORY_DIR1)\n _remove_dir(TEST_CATEGORY_DIR2)\n\n\ndef test_data_home():\n # get_data_home will point to a pre-existing folder\n data_home = get_data_home(data_home=DATA_HOME)\n assert_equal(data_home, DATA_HOME)\n assert_true(os.path.exists(data_home))\n\n # clear_data_home will delete both the content and the folder it-self\n clear_data_home(data_home=data_home)\n assert_false(os.path.exists(data_home))\n\n # if the folder is missing it will be created again\n data_home = get_data_home(data_home=DATA_HOME)\n assert_true(os.path.exists(data_home))\n\n\ndef test_default_empty_load_files():\n res = load_files(LOAD_FILES_ROOT)\n assert_equal(len(res.filenames), 0)\n assert_equal(len(res.target_names), 0)\n assert_equal(res.DESCR, None)\n\n\[email protected]_setup(setup_load_files, teardown_load_files)\ndef test_default_load_files():\n res = load_files(LOAD_FILES_ROOT)\n assert_equal(len(res.filenames), 1)\n assert_equal(len(res.target_names), 2)\n assert_equal(res.DESCR, None)\n assert_equal(res.data, [b(\"Hello World!\\n\")])\n\n\[email protected]_setup(setup_load_files, teardown_load_files)\ndef test_load_files_w_categories_desc_and_encoding():\n category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()\n res = load_files(LOAD_FILES_ROOT, description=\"test\",\n categories=category, encoding=\"utf-8\")\n assert_equal(len(res.filenames), 1)\n assert_equal(len(res.target_names), 1)\n assert_equal(res.DESCR, \"test\")\n assert_equal(res.data, [u(\"Hello World!\\n\")])\n\n\[email protected]_setup(setup_load_files, teardown_load_files)\ndef test_load_files_wo_load_content():\n res = load_files(LOAD_FILES_ROOT, load_content=False)\n assert_equal(len(res.filenames), 1)\n assert_equal(len(res.target_names), 2)\n assert_equal(res.DESCR, None)\n assert_equal(res.get('data'), None)\n\n\ndef test_load_sample_images():\n try:\n res = load_sample_images()\n assert_equal(len(res.images), 2)\n assert_equal(len(res.filenames), 2)\n assert_true(res.DESCR)\n except ImportError:\n warnings.warn(\"Could not load sample images, PIL is not available.\")\n\n\ndef test_load_digits():\n digits = load_digits()\n assert_equal(digits.data.shape, (1797, 64))\n assert_equal(numpy.unique(digits.target).size, 10)\n\n\ndef test_load_digits_n_class_lt_10():\n digits = load_digits(9)\n assert_equal(digits.data.shape, (1617, 64))\n assert_equal(numpy.unique(digits.target).size, 9)\n\n\ndef test_load_sample_image():\n try:\n china = load_sample_image('china.jpg')\n assert_equal(china.dtype, 'uint8')\n assert_equal(china.shape, (427, 640, 3))\n except ImportError:\n warnings.warn(\"Could not load sample images, PIL is not available.\")\n\n\ndef test_load_missing_sample_image_error():\n have_PIL = True\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n have_PIL = False\n if have_PIL:\n assert_raises(AttributeError, load_sample_image,\n 'blop.jpg')\n else:\n warnings.warn(\"Could not load sample images, PIL is not available.\")\n\n\ndef test_load_diabetes():\n res = load_diabetes()\n assert_equal(res.data.shape, (442, 10))\n assert_true(res.target.size, 442)\n\n\ndef test_load_linnerud():\n res = load_linnerud()\n assert_equal(res.data.shape, (20, 3))\n assert_equal(res.target.shape, (20, 3))\n assert_equal(len(res.target_names), 3)\n assert_true(res.DESCR)\n\n\ndef test_load_iris():\n res = load_iris()\n assert_equal(res.data.shape, (150, 4))\n assert_equal(res.target.size, 150)\n assert_equal(res.target_names.size, 3)\n assert_true(res.DESCR)\n\n\ndef test_load_boston():\n res = load_boston()\n assert_equal(res.data.shape, (506, 13))\n assert_equal(res.target.size, 506)\n assert_equal(res.feature_names.size, 13)\n assert_true(res.DESCR)\n\n\ndef test_loads_dumps_bunch():\n bunch = Bunch(x=\"x\")\n bunch_from_pkl = loads(dumps(bunch))\n bunch_from_pkl.x = \"y\"\n assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)\n", "\"\"\"Utilities to evaluate models with respect to a variable\n\"\"\"\n# Author: Alexander Fabisch <[email protected]>\n#\n# License: BSD 3 clause\n\nimport warnings\n\nimport numpy as np\n\nfrom .base import is_classifier, clone\nfrom .cross_validation import _check_cv\nfrom .externals.joblib import Parallel, delayed\nfrom .cross_validation import _safe_split, _score, _fit_and_score\nfrom .metrics.scorer import check_scoring\nfrom .utils import indexable\nfrom .utils.fixes import astype\n\n\n__all__ = ['learning_curve', 'validation_curve']\n\n\ndef learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),\n cv=None, scoring=None, exploit_incremental_learning=False,\n n_jobs=1, pre_dispatch=\"all\", verbose=0):\n \"\"\"Learning curve.\n\n Determines cross-validated training and test scores for different training\n set sizes.\n\n A cross-validation generator splits the whole dataset k times in training\n and test data. Subsets of the training set with varying sizes will be used\n to train the estimator and a score for each training subset size and the\n test set will be computed. Afterwards, the scores will be averaged over\n all k runs for each training subset size.\n\n Read more in the :ref:`User Guide <learning_curves>`.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n\n cv : integer, cross-validation generator, optional\n If an integer is passed, it is the number of folds (defaults to 3).\n Specific cross-validation objects can be passed, see\n sklearn.cross_validation module for the list of possible objects\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n exploit_incremental_learning : boolean, optional, default: False\n If the estimator supports incremental learning, this will be\n used to speed up fitting for different training set sizes.\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n\n pre_dispatch : integer or string, optional\n Number of predispatched jobs for parallel execution (default is\n all). The option can reduce the allocated memory. The string can\n be an expression like '2*n_jobs'.\n\n verbose : integer, optional\n Controls the verbosity: the higher, the more messages.\n\n Returns\n -------\n train_sizes_abs : array, shape = (n_unique_ticks,), dtype int\n Numbers of training examples that has been used to generate the\n learning curve. Note that the number of ticks might be less\n than n_ticks because duplicate entries will be removed.\n\n train_scores : array, shape (n_ticks, n_cv_folds)\n Scores on training sets.\n\n test_scores : array, shape (n_ticks, n_cv_folds)\n Scores on test set.\n\n Notes\n -----\n See :ref:`examples/model_selection/plot_learning_curve.py\n <example_model_selection_plot_learning_curve.py>`\n \"\"\"\n if exploit_incremental_learning and not hasattr(estimator, \"partial_fit\"):\n raise ValueError(\"An estimator must support the partial_fit interface \"\n \"to exploit incremental learning\")\n\n X, y = indexable(X, y)\n # Make a list since we will be iterating multiple times over the folds\n cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))\n scorer = check_scoring(estimator, scoring=scoring)\n\n # HACK as long as boolean indices are allowed in cv generators\n if cv[0][0].dtype == bool:\n new_cv = []\n for i in range(len(cv)):\n new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))\n cv = new_cv\n\n n_max_training_samples = len(cv[0][0])\n # Because the lengths of folds can be significantly different, it is\n # not guaranteed that we use all of the available training data when we\n # use the first 'n_max_training_samples' samples.\n train_sizes_abs = _translate_train_sizes(train_sizes,\n n_max_training_samples)\n n_unique_ticks = train_sizes_abs.shape[0]\n if verbose > 0:\n print(\"[learning_curve] Training set sizes: \" + str(train_sizes_abs))\n\n parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,\n verbose=verbose)\n if exploit_incremental_learning:\n classes = np.unique(y) if is_classifier(estimator) else None\n out = parallel(delayed(_incremental_fit_estimator)(\n clone(estimator), X, y, classes, train, test, train_sizes_abs,\n scorer, verbose) for train, test in cv)\n else:\n out = parallel(delayed(_fit_and_score)(\n clone(estimator), X, y, scorer, train[:n_train_samples], test,\n verbose, parameters=None, fit_params=None, return_train_score=True)\n for train, test in cv for n_train_samples in train_sizes_abs)\n out = np.array(out)[:, :2]\n n_cv_folds = out.shape[0] // n_unique_ticks\n out = out.reshape(n_cv_folds, n_unique_ticks, 2)\n\n out = np.asarray(out).transpose((2, 1, 0))\n\n return train_sizes_abs, out[0], out[1]\n\n\ndef _translate_train_sizes(train_sizes, n_max_training_samples):\n \"\"\"Determine absolute sizes of training subsets and validate 'train_sizes'.\n\n Examples:\n _translate_train_sizes([0.5, 1.0], 10) -> [5, 10]\n _translate_train_sizes([5, 10], 10) -> [5, 10]\n\n Parameters\n ----------\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Numbers of training examples that will be used to generate the\n learning curve. If the dtype is float, it is regarded as a\n fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].\n\n n_max_training_samples : int\n Maximum number of training samples (upper bound of 'train_sizes').\n\n Returns\n -------\n train_sizes_abs : array, shape (n_unique_ticks,), dtype int\n Numbers of training examples that will be used to generate the\n learning curve. Note that the number of ticks might be less\n than n_ticks because duplicate entries will be removed.\n \"\"\"\n train_sizes_abs = np.asarray(train_sizes)\n n_ticks = train_sizes_abs.shape[0]\n n_min_required_samples = np.min(train_sizes_abs)\n n_max_required_samples = np.max(train_sizes_abs)\n if np.issubdtype(train_sizes_abs.dtype, np.float):\n if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:\n raise ValueError(\"train_sizes has been interpreted as fractions \"\n \"of the maximum number of training samples and \"\n \"must be within (0, 1], but is within [%f, %f].\"\n % (n_min_required_samples,\n n_max_required_samples))\n train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,\n dtype=np.int, copy=False)\n train_sizes_abs = np.clip(train_sizes_abs, 1,\n n_max_training_samples)\n else:\n if (n_min_required_samples <= 0 or\n n_max_required_samples > n_max_training_samples):\n raise ValueError(\"train_sizes has been interpreted as absolute \"\n \"numbers of training samples and must be within \"\n \"(0, %d], but is within [%d, %d].\"\n % (n_max_training_samples,\n n_min_required_samples,\n n_max_required_samples))\n\n train_sizes_abs = np.unique(train_sizes_abs)\n if n_ticks > train_sizes_abs.shape[0]:\n warnings.warn(\"Removed duplicate entries from 'train_sizes'. Number \"\n \"of ticks will be less than than the size of \"\n \"'train_sizes' %d instead of %d).\"\n % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)\n\n return train_sizes_abs\n\n\ndef _incremental_fit_estimator(estimator, X, y, classes, train, test,\n train_sizes, scorer, verbose):\n \"\"\"Train estimator on training subsets incrementally and compute scores.\"\"\"\n train_scores, test_scores = [], []\n partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])\n for n_train_samples, partial_train in partitions:\n train_subset = train[:n_train_samples]\n X_train, y_train = _safe_split(estimator, X, y, train_subset)\n X_partial_train, y_partial_train = _safe_split(estimator, X, y,\n partial_train)\n X_test, y_test = _safe_split(estimator, X, y, test, train_subset)\n if y_partial_train is None:\n estimator.partial_fit(X_partial_train, classes=classes)\n else:\n estimator.partial_fit(X_partial_train, y_partial_train,\n classes=classes)\n train_scores.append(_score(estimator, X_train, y_train, scorer))\n test_scores.append(_score(estimator, X_test, y_test, scorer))\n return np.array((train_scores, test_scores)).T\n\n\ndef validation_curve(estimator, X, y, param_name, param_range, cv=None,\n scoring=None, n_jobs=1, pre_dispatch=\"all\", verbose=0):\n \"\"\"Validation curve.\n\n Determine training and test scores for varying parameter values.\n\n Compute scores for an estimator with different values of a specified\n parameter. This is similar to grid search with one parameter. However, this\n will also compute training scores and is merely a utility for plotting the\n results.\n\n Read more in the :ref:`User Guide <validation_curve>`.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n param_name : string\n Name of the parameter that will be varied.\n\n param_range : array-like, shape (n_values,)\n The values of the parameter that will be evaluated.\n\n cv : integer, cross-validation generator, optional\n If an integer is passed, it is the number of folds (defaults to 3).\n Specific cross-validation objects can be passed, see\n sklearn.cross_validation module for the list of possible objects\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n\n pre_dispatch : integer or string, optional\n Number of predispatched jobs for parallel execution (default is\n all). The option can reduce the allocated memory. The string can\n be an expression like '2*n_jobs'.\n\n verbose : integer, optional\n Controls the verbosity: the higher, the more messages.\n\n Returns\n -------\n train_scores : array, shape (n_ticks, n_cv_folds)\n Scores on training sets.\n\n test_scores : array, shape (n_ticks, n_cv_folds)\n Scores on test set.\n\n Notes\n -----\n See\n :ref:`examples/model_selection/plot_validation_curve.py\n <example_model_selection_plot_validation_curve.py>`\n \"\"\"\n X, y = indexable(X, y)\n cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))\n scorer = check_scoring(estimator, scoring=scoring)\n\n parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,\n verbose=verbose)\n out = parallel(delayed(_fit_and_score)(\n estimator, X, y, scorer, train, test, verbose,\n parameters={param_name: v}, fit_params=None, return_train_score=True)\n for train, test in cv for v in param_range)\n\n out = np.asarray(out)[:, :2]\n n_params = len(param_range)\n n_cv_folds = out.shape[0] // n_params\n out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))\n\n return out[0], out[1]\n", "\"\"\"Testing for the boost module (sklearn.ensemble.boost).\"\"\"\n\nimport numpy as np\nfrom sklearn.utils.testing import assert_array_equal, assert_array_less\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_raises, assert_raises_regexp\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom scipy.sparse import csc_matrix\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import coo_matrix\nfrom scipy.sparse import dok_matrix\nfrom scipy.sparse import lil_matrix\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.utils import shuffle\nfrom sklearn import datasets\n\n\n# Common random state\nrng = np.random.RandomState(0)\n\n# Toy sample\nX = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]\ny_class = [\"foo\", \"foo\", \"foo\", 1, 1, 1] # test string class labels\ny_regr = [-1, -1, -1, 1, 1, 1]\nT = [[-1, -1], [2, 2], [3, 2]]\ny_t_class = [\"foo\", 1, 1]\ny_t_regr = [-1, 1, 1]\n\n# Load the iris dataset and randomly permute it\niris = datasets.load_iris()\nperm = rng.permutation(iris.target.size)\niris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)\n\n# Load the boston dataset and randomly permute it\nboston = datasets.load_boston()\nboston.data, boston.target = shuffle(boston.data, boston.target,\n random_state=rng)\n\n\ndef test_classification_toy():\n # Check classification on a toy dataset.\n for alg in ['SAMME', 'SAMME.R']:\n clf = AdaBoostClassifier(algorithm=alg, random_state=0)\n clf.fit(X, y_class)\n assert_array_equal(clf.predict(T), y_t_class)\n assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)\n assert_equal(clf.predict_proba(T).shape, (len(T), 2))\n assert_equal(clf.decision_function(T).shape, (len(T),))\n\n\ndef test_regression_toy():\n # Check classification on a toy dataset.\n clf = AdaBoostRegressor(random_state=0)\n clf.fit(X, y_regr)\n assert_array_equal(clf.predict(T), y_t_regr)\n\n\ndef test_iris():\n # Check consistency on dataset iris.\n classes = np.unique(iris.target)\n clf_samme = prob_samme = None\n\n for alg in ['SAMME', 'SAMME.R']:\n clf = AdaBoostClassifier(algorithm=alg)\n clf.fit(iris.data, iris.target)\n\n assert_array_equal(classes, clf.classes_)\n proba = clf.predict_proba(iris.data)\n if alg == \"SAMME\":\n clf_samme = clf\n prob_samme = proba\n assert_equal(proba.shape[1], len(classes))\n assert_equal(clf.decision_function(iris.data).shape[1], len(classes))\n\n score = clf.score(iris.data, iris.target)\n assert score > 0.9, \"Failed with algorithm %s and score = %f\" % \\\n (alg, score)\n\n # Somewhat hacky regression test: prior to\n # ae7adc880d624615a34bafdb1d75ef67051b8200,\n # predict_proba returned SAMME.R values for SAMME.\n clf_samme.algorithm = \"SAMME.R\"\n assert_array_less(0,\n np.abs(clf_samme.predict_proba(iris.data) - prob_samme))\n\n\ndef test_boston():\n # Check consistency on dataset boston house prices.\n clf = AdaBoostRegressor(random_state=0)\n clf.fit(boston.data, boston.target)\n score = clf.score(boston.data, boston.target)\n assert score > 0.85\n\n\ndef test_staged_predict():\n # Check staged predictions.\n rng = np.random.RandomState(0)\n iris_weights = rng.randint(10, size=iris.target.shape)\n boston_weights = rng.randint(10, size=boston.target.shape)\n\n # AdaBoost classification\n for alg in ['SAMME', 'SAMME.R']:\n clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)\n clf.fit(iris.data, iris.target, sample_weight=iris_weights)\n\n predictions = clf.predict(iris.data)\n staged_predictions = [p for p in clf.staged_predict(iris.data)]\n proba = clf.predict_proba(iris.data)\n staged_probas = [p for p in clf.staged_predict_proba(iris.data)]\n score = clf.score(iris.data, iris.target, sample_weight=iris_weights)\n staged_scores = [\n s for s in clf.staged_score(\n iris.data, iris.target, sample_weight=iris_weights)]\n\n assert_equal(len(staged_predictions), 10)\n assert_array_almost_equal(predictions, staged_predictions[-1])\n assert_equal(len(staged_probas), 10)\n assert_array_almost_equal(proba, staged_probas[-1])\n assert_equal(len(staged_scores), 10)\n assert_array_almost_equal(score, staged_scores[-1])\n\n # AdaBoost regression\n clf = AdaBoostRegressor(n_estimators=10, random_state=0)\n clf.fit(boston.data, boston.target, sample_weight=boston_weights)\n\n predictions = clf.predict(boston.data)\n staged_predictions = [p for p in clf.staged_predict(boston.data)]\n score = clf.score(boston.data, boston.target, sample_weight=boston_weights)\n staged_scores = [\n s for s in clf.staged_score(\n boston.data, boston.target, sample_weight=boston_weights)]\n\n assert_equal(len(staged_predictions), 10)\n assert_array_almost_equal(predictions, staged_predictions[-1])\n assert_equal(len(staged_scores), 10)\n assert_array_almost_equal(score, staged_scores[-1])\n\n\ndef test_gridsearch():\n # Check that base trees can be grid-searched.\n # AdaBoost classification\n boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())\n parameters = {'n_estimators': (1, 2),\n 'base_estimator__max_depth': (1, 2),\n 'algorithm': ('SAMME', 'SAMME.R')}\n clf = GridSearchCV(boost, parameters)\n clf.fit(iris.data, iris.target)\n\n # AdaBoost regression\n boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),\n random_state=0)\n parameters = {'n_estimators': (1, 2),\n 'base_estimator__max_depth': (1, 2)}\n clf = GridSearchCV(boost, parameters)\n clf.fit(boston.data, boston.target)\n\n\ndef test_pickle():\n # Check pickability.\n import pickle\n\n # Adaboost classifier\n for alg in ['SAMME', 'SAMME.R']:\n obj = AdaBoostClassifier(algorithm=alg)\n obj.fit(iris.data, iris.target)\n score = obj.score(iris.data, iris.target)\n s = pickle.dumps(obj)\n\n obj2 = pickle.loads(s)\n assert_equal(type(obj2), obj.__class__)\n score2 = obj2.score(iris.data, iris.target)\n assert_equal(score, score2)\n\n # Adaboost regressor\n obj = AdaBoostRegressor(random_state=0)\n obj.fit(boston.data, boston.target)\n score = obj.score(boston.data, boston.target)\n s = pickle.dumps(obj)\n\n obj2 = pickle.loads(s)\n assert_equal(type(obj2), obj.__class__)\n score2 = obj2.score(boston.data, boston.target)\n assert_equal(score, score2)\n\n\ndef test_importances():\n # Check variable importances.\n X, y = datasets.make_classification(n_samples=2000,\n n_features=10,\n n_informative=3,\n n_redundant=0,\n n_repeated=0,\n shuffle=False,\n random_state=1)\n\n for alg in ['SAMME', 'SAMME.R']:\n clf = AdaBoostClassifier(algorithm=alg)\n\n clf.fit(X, y)\n importances = clf.feature_importances_\n\n assert_equal(importances.shape[0], 10)\n assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),\n True)\n\n\ndef test_error():\n # Test that it gives proper exception on deficient input.\n assert_raises(ValueError,\n AdaBoostClassifier(learning_rate=-1).fit,\n X, y_class)\n\n assert_raises(ValueError,\n AdaBoostClassifier(algorithm=\"foo\").fit,\n X, y_class)\n\n assert_raises(ValueError,\n AdaBoostClassifier().fit,\n X, y_class, sample_weight=np.asarray([-1]))\n\n\ndef test_base_estimator():\n # Test different base estimators.\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.svm import SVC\n\n # XXX doesn't work with y_class because RF doesn't support classes_\n # Shouldn't AdaBoost run a LabelBinarizer?\n clf = AdaBoostClassifier(RandomForestClassifier())\n clf.fit(X, y_regr)\n\n clf = AdaBoostClassifier(SVC(), algorithm=\"SAMME\")\n clf.fit(X, y_class)\n\n from sklearn.ensemble import RandomForestRegressor\n from sklearn.svm import SVR\n\n clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)\n clf.fit(X, y_regr)\n\n clf = AdaBoostRegressor(SVR(), random_state=0)\n clf.fit(X, y_regr)\n\n # Check that an empty discrete ensemble fails in fit, not predict.\n X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]\n y_fail = [\"foo\", \"bar\", 1, 2]\n clf = AdaBoostClassifier(SVC(), algorithm=\"SAMME\")\n assert_raises_regexp(ValueError, \"worse than random\",\n clf.fit, X_fail, y_fail)\n\n\ndef test_sample_weight_missing():\n from sklearn.linear_model import LinearRegression\n from sklearn.cluster import KMeans\n\n clf = AdaBoostClassifier(LinearRegression(), algorithm=\"SAMME\")\n assert_raises(ValueError, clf.fit, X, y_regr)\n\n clf = AdaBoostRegressor(LinearRegression())\n assert_raises(ValueError, clf.fit, X, y_regr)\n\n clf = AdaBoostClassifier(KMeans(), algorithm=\"SAMME\")\n assert_raises(ValueError, clf.fit, X, y_regr)\n\n clf = AdaBoostRegressor(KMeans())\n assert_raises(ValueError, clf.fit, X, y_regr)\n\n\ndef test_sparse_classification():\n # Check classification with sparse input.\n\n class CustomSVC(SVC):\n \"\"\"SVC variant that records the nature of the training set.\"\"\"\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Modification on fit caries data type for later verification.\"\"\"\n super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)\n self.data_type_ = type(X)\n return self\n\n X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,\n n_features=5,\n return_indicator=True,\n random_state=42)\n # Flatten y to a 1d array\n y = np.ravel(y)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,\n dok_matrix]:\n X_train_sparse = sparse_format(X_train)\n X_test_sparse = sparse_format(X_test)\n\n # Trained on sparse format\n sparse_classifier = AdaBoostClassifier(\n base_estimator=CustomSVC(probability=True),\n random_state=1,\n algorithm=\"SAMME\"\n ).fit(X_train_sparse, y_train)\n\n # Trained on dense format\n dense_classifier = AdaBoostClassifier(\n base_estimator=CustomSVC(probability=True),\n random_state=1,\n algorithm=\"SAMME\"\n ).fit(X_train, y_train)\n\n # predict\n sparse_results = sparse_classifier.predict(X_test_sparse)\n dense_results = dense_classifier.predict(X_test)\n assert_array_equal(sparse_results, dense_results)\n\n # decision_function\n sparse_results = sparse_classifier.decision_function(X_test_sparse)\n dense_results = dense_classifier.decision_function(X_test)\n assert_array_equal(sparse_results, dense_results)\n\n # predict_log_proba\n sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)\n dense_results = dense_classifier.predict_log_proba(X_test)\n assert_array_equal(sparse_results, dense_results)\n\n # predict_proba\n sparse_results = sparse_classifier.predict_proba(X_test_sparse)\n dense_results = dense_classifier.predict_proba(X_test)\n assert_array_equal(sparse_results, dense_results)\n\n # score\n sparse_results = sparse_classifier.score(X_test_sparse, y_test)\n dense_results = dense_classifier.score(X_test, y_test)\n assert_array_equal(sparse_results, dense_results)\n\n # staged_decision_function\n sparse_results = sparse_classifier.staged_decision_function(\n X_test_sparse)\n dense_results = dense_classifier.staged_decision_function(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_equal(sprase_res, dense_res)\n\n # staged_predict\n sparse_results = sparse_classifier.staged_predict(X_test_sparse)\n dense_results = dense_classifier.staged_predict(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_equal(sprase_res, dense_res)\n\n # staged_predict_proba\n sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)\n dense_results = dense_classifier.staged_predict_proba(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_equal(sprase_res, dense_res)\n\n # staged_score\n sparse_results = sparse_classifier.staged_score(X_test_sparse,\n y_test)\n dense_results = dense_classifier.staged_score(X_test, y_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_equal(sprase_res, dense_res)\n\n # Verify sparsity of data is maintained during training\n types = [i.data_type_ for i in sparse_classifier.estimators_]\n\n assert all([(t == csc_matrix or t == csr_matrix)\n for t in types])\n\n\ndef test_sparse_regression():\n # Check regression with sparse input.\n\n class CustomSVR(SVR):\n \"\"\"SVR variant that records the nature of the training set.\"\"\"\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Modification on fit caries data type for later verification.\"\"\"\n super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)\n self.data_type_ = type(X)\n return self\n\n X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,\n random_state=42)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,\n dok_matrix]:\n X_train_sparse = sparse_format(X_train)\n X_test_sparse = sparse_format(X_test)\n\n # Trained on sparse format\n sparse_classifier = AdaBoostRegressor(\n base_estimator=CustomSVR(),\n random_state=1\n ).fit(X_train_sparse, y_train)\n\n # Trained on dense format\n dense_classifier = dense_results = AdaBoostRegressor(\n base_estimator=CustomSVR(),\n random_state=1\n ).fit(X_train, y_train)\n\n # predict\n sparse_results = sparse_classifier.predict(X_test_sparse)\n dense_results = dense_classifier.predict(X_test)\n assert_array_equal(sparse_results, dense_results)\n\n # staged_predict\n sparse_results = sparse_classifier.staged_predict(X_test_sparse)\n dense_results = dense_classifier.staged_predict(X_test)\n for sprase_res, dense_res in zip(sparse_results, dense_results):\n assert_array_equal(sprase_res, dense_res)\n\n types = [i.data_type_ for i in sparse_classifier.estimators_]\n\n assert all([(t == csc_matrix or t == csr_matrix)\n for t in types])\n", "from __future__ import division, print_function\n\nimport numpy as np\nfrom itertools import product\nimport warnings\nfrom scipy.sparse import csr_matrix\n\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn import ensemble\n\nfrom sklearn.datasets import make_multilabel_classification\nfrom sklearn.random_projection import sparse_random_matrix\nfrom sklearn.utils.validation import check_array, check_consistent_length\nfrom sklearn.utils.validation import check_random_state\n\nfrom sklearn.utils.testing import assert_raises, clean_warning_registry\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_warns\n\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import coverage_error\nfrom sklearn.metrics import label_ranking_average_precision_score\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import label_ranking_loss\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\n\nfrom sklearn.metrics.base import UndefinedMetricWarning\n\n\n###############################################################################\n# Utilities for testing\n\ndef make_prediction(dataset=None, binary=False):\n \"\"\"Make some classification predictions on a toy dataset using a SVC\n\n If binary is True restrict to a binary classification problem instead of a\n multiclass classification problem\n \"\"\"\n\n if dataset is None:\n # import some data to play with\n dataset = datasets.load_iris()\n\n X = dataset.data\n y = dataset.target\n\n if binary:\n # restrict to a binary classification task\n X, y = X[y < 2], y[y < 2]\n\n n_samples, n_features = X.shape\n p = np.arange(n_samples)\n\n rng = check_random_state(37)\n rng.shuffle(p)\n X, y = X[p], y[p]\n half = int(n_samples / 2)\n\n # add noisy features to make the problem harder and avoid perfect results\n rng = np.random.RandomState(0)\n X = np.c_[X, rng.randn(n_samples, 200 * n_features)]\n\n # run classifier, get class probabilities and label predictions\n clf = svm.SVC(kernel='linear', probability=True, random_state=0)\n probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])\n\n if binary:\n # only interested in probabilities of the positive case\n # XXX: do we really want a special API for the binary case?\n probas_pred = probas_pred[:, 1]\n\n y_pred = clf.predict(X[half:])\n y_true = y[half:]\n return y_true, y_pred, probas_pred\n\n\n###############################################################################\n# Tests\n\ndef _auc(y_true, y_score):\n \"\"\"Alternative implementation to check for correctness of\n `roc_auc_score`.\"\"\"\n pos_label = np.unique(y_true)[1]\n\n # Count the number of times positive samples are correctly ranked above\n # negative samples.\n pos = y_score[y_true == pos_label]\n neg = y_score[y_true != pos_label]\n diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)\n n_correct = np.sum(diff_matrix > 0)\n\n return n_correct / float(len(pos) * len(neg))\n\n\ndef _average_precision(y_true, y_score):\n \"\"\"Alternative implementation to check for correctness of\n `average_precision_score`.\"\"\"\n pos_label = np.unique(y_true)[1]\n n_pos = np.sum(y_true == pos_label)\n order = np.argsort(y_score)[::-1]\n y_score = y_score[order]\n y_true = y_true[order]\n\n score = 0\n for i in range(len(y_score)):\n if y_true[i] == pos_label:\n # Compute precision up to document i\n # i.e, percentage of relevant documents up to document i.\n prec = 0\n for j in range(0, i + 1):\n if y_true[j] == pos_label:\n prec += 1.0\n prec /= (i + 1.0)\n score += prec\n\n return score / n_pos\n\n\ndef test_roc_curve():\n # Test Area under Receiver Operating Characteristic (ROC) curve\n y_true, _, probas_pred = make_prediction(binary=True)\n\n fpr, tpr, thresholds = roc_curve(y_true, probas_pred)\n roc_auc = auc(fpr, tpr)\n expected_auc = _auc(y_true, probas_pred)\n assert_array_almost_equal(roc_auc, expected_auc, decimal=2)\n assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n\ndef test_roc_curve_end_points():\n # Make sure that roc_curve returns a curve start at 0 and ending and\n # 1 even in corner cases\n rng = np.random.RandomState(0)\n y_true = np.array([0] * 50 + [1] * 50)\n y_pred = rng.randint(3, size=100)\n fpr, tpr, thr = roc_curve(y_true, y_pred)\n assert_equal(fpr[0], 0)\n assert_equal(fpr[-1], 1)\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thr.shape)\n\n\ndef test_roc_returns_consistency():\n # Test whether the returned threshold matches up with tpr\n # make small toy dataset\n y_true, _, probas_pred = make_prediction(binary=True)\n fpr, tpr, thresholds = roc_curve(y_true, probas_pred)\n\n # use the given thresholds to determine the tpr\n tpr_correct = []\n for t in thresholds:\n tp = np.sum((probas_pred >= t) & y_true)\n p = np.sum(y_true)\n tpr_correct.append(1.0 * tp / p)\n\n # compare tpr and tpr_correct to see if the thresholds' order was correct\n assert_array_almost_equal(tpr, tpr_correct, decimal=2)\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n\ndef test_roc_nonrepeating_thresholds():\n # Test to ensure that we don't return spurious repeating thresholds.\n # Duplicated thresholds can arise due to machine precision issues.\n dataset = datasets.load_digits()\n X = dataset['data']\n y = dataset['target']\n\n # This random forest classifier can only return probabilities\n # significant to two decimal places\n clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)\n\n # How well can the classifier predict whether a digit is less than 5?\n # This task contributes floating point roundoff errors to the probabilities\n train, test = slice(None, None, 2), slice(1, None, 2)\n probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])\n y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here\n y_true = [yy < 5 for yy in y[test]]\n\n # Check for repeating values in the thresholds\n fpr, tpr, thresholds = roc_curve(y_true, y_score)\n assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)\n\n\ndef test_roc_curve_multi():\n # roc_curve not applicable for multi-class problems\n y_true, _, probas_pred = make_prediction(binary=False)\n\n assert_raises(ValueError, roc_curve, y_true, probas_pred)\n\n\ndef test_roc_curve_confidence():\n # roc_curve for confidence scores\n y_true, _, probas_pred = make_prediction(binary=True)\n\n fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.90, decimal=2)\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n\ndef test_roc_curve_hard():\n # roc_curve for hard decisions\n y_true, pred, probas_pred = make_prediction(binary=True)\n\n # always predict one\n trivial_pred = np.ones(y_true.shape)\n fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.50, decimal=2)\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n # always predict zero\n trivial_pred = np.zeros(y_true.shape)\n fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.50, decimal=2)\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n # hard decisions\n fpr, tpr, thresholds = roc_curve(y_true, pred)\n roc_auc = auc(fpr, tpr)\n assert_array_almost_equal(roc_auc, 0.78, decimal=2)\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n\ndef test_roc_curve_one_label():\n y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]\n # assert there are warnings\n w = UndefinedMetricWarning\n fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)\n # all true labels, all fpr should be nan\n assert_array_equal(fpr,\n np.nan * np.ones(len(thresholds)))\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n # assert there are warnings\n fpr, tpr, thresholds = assert_warns(w, roc_curve,\n [1 - x for x in y_true],\n y_pred)\n # all negative labels, all tpr should be nan\n assert_array_equal(tpr,\n np.nan * np.ones(len(thresholds)))\n assert_equal(fpr.shape, tpr.shape)\n assert_equal(fpr.shape, thresholds.shape)\n\n\ndef test_roc_curve_toydata():\n # Binary classification\n y_true = [0, 1]\n y_score = [0, 1]\n tpr, fpr, _ = roc_curve(y_true, y_score)\n roc_auc = roc_auc_score(y_true, y_score)\n assert_array_almost_equal(tpr, [0, 1])\n assert_array_almost_equal(fpr, [1, 1])\n assert_almost_equal(roc_auc, 1.)\n\n y_true = [0, 1]\n y_score = [1, 0]\n tpr, fpr, _ = roc_curve(y_true, y_score)\n roc_auc = roc_auc_score(y_true, y_score)\n assert_array_almost_equal(tpr, [0, 1, 1])\n assert_array_almost_equal(fpr, [0, 0, 1])\n assert_almost_equal(roc_auc, 0.)\n\n y_true = [1, 0]\n y_score = [1, 1]\n tpr, fpr, _ = roc_curve(y_true, y_score)\n roc_auc = roc_auc_score(y_true, y_score)\n assert_array_almost_equal(tpr, [0, 1])\n assert_array_almost_equal(fpr, [0, 1])\n assert_almost_equal(roc_auc, 0.5)\n\n y_true = [1, 0]\n y_score = [1, 0]\n tpr, fpr, _ = roc_curve(y_true, y_score)\n roc_auc = roc_auc_score(y_true, y_score)\n assert_array_almost_equal(tpr, [0, 1])\n assert_array_almost_equal(fpr, [1, 1])\n assert_almost_equal(roc_auc, 1.)\n\n y_true = [1, 0]\n y_score = [0.5, 0.5]\n tpr, fpr, _ = roc_curve(y_true, y_score)\n roc_auc = roc_auc_score(y_true, y_score)\n assert_array_almost_equal(tpr, [0, 1])\n assert_array_almost_equal(fpr, [0, 1])\n assert_almost_equal(roc_auc, .5)\n\n y_true = [0, 0]\n y_score = [0.25, 0.75]\n tpr, fpr, _ = roc_curve(y_true, y_score)\n assert_raises(ValueError, roc_auc_score, y_true, y_score)\n assert_array_almost_equal(tpr, [0., 0.5, 1.])\n assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])\n\n y_true = [1, 1]\n y_score = [0.25, 0.75]\n tpr, fpr, _ = roc_curve(y_true, y_score)\n assert_raises(ValueError, roc_auc_score, y_true, y_score)\n assert_array_almost_equal(tpr, [np.nan, np.nan])\n assert_array_almost_equal(fpr, [0.5, 1.])\n\n # Multi-label classification task\n y_true = np.array([[0, 1], [0, 1]])\n y_score = np.array([[0, 1], [0, 1]])\n assert_raises(ValueError, roc_auc_score, y_true, y_score, average=\"macro\")\n assert_raises(ValueError, roc_auc_score, y_true, y_score,\n average=\"weighted\")\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"samples\"), 1.)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"micro\"), 1.)\n\n y_true = np.array([[0, 1], [0, 1]])\n y_score = np.array([[0, 1], [1, 0]])\n assert_raises(ValueError, roc_auc_score, y_true, y_score, average=\"macro\")\n assert_raises(ValueError, roc_auc_score, y_true, y_score,\n average=\"weighted\")\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"samples\"), 0.5)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"micro\"), 0.5)\n\n y_true = np.array([[1, 0], [0, 1]])\n y_score = np.array([[0, 1], [1, 0]])\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"macro\"), 0)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"weighted\"), 0)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"samples\"), 0)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"micro\"), 0)\n\n y_true = np.array([[1, 0], [0, 1]])\n y_score = np.array([[0.5, 0.5], [0.5, 0.5]])\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"macro\"), .5)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"weighted\"), .5)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"samples\"), .5)\n assert_almost_equal(roc_auc_score(y_true, y_score, average=\"micro\"), .5)\n\n\ndef test_auc():\n # Test Area Under Curve (AUC) computation\n x = [0, 1]\n y = [0, 1]\n assert_array_almost_equal(auc(x, y), 0.5)\n x = [1, 0]\n y = [0, 1]\n assert_array_almost_equal(auc(x, y), 0.5)\n x = [1, 0, 0]\n y = [0, 1, 1]\n assert_array_almost_equal(auc(x, y), 0.5)\n x = [0, 1]\n y = [1, 1]\n assert_array_almost_equal(auc(x, y), 1)\n x = [0, 0.5, 1]\n y = [0, 0.5, 1]\n assert_array_almost_equal(auc(x, y), 0.5)\n\n\ndef test_auc_duplicate_values():\n # Test Area Under Curve (AUC) computation with duplicate values\n\n # auc() was previously sorting the x and y arrays according to the indices\n # from numpy.argsort(x), which was reordering the tied 0's in this example\n # and resulting in an incorrect area computation. This test detects the\n # error.\n x = [-2.0, 0.0, 0.0, 0.0, 1.0]\n y1 = [2.0, 0.0, 0.5, 1.0, 1.0]\n y2 = [2.0, 1.0, 0.0, 0.5, 1.0]\n y3 = [2.0, 1.0, 0.5, 0.0, 1.0]\n\n for y in (y1, y2, y3):\n assert_array_almost_equal(auc(x, y, reorder=True), 3.0)\n\n\ndef test_auc_errors():\n # Incompatible shapes\n assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])\n\n # Too few x values\n assert_raises(ValueError, auc, [0.0], [0.1])\n\n # x is not in order\n assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])\n\n\ndef test_auc_score_non_binary_class():\n # Test that roc_auc_score function returns an error when trying\n # to compute AUC for non-binary class values.\n rng = check_random_state(404)\n y_pred = rng.rand(10)\n # y_true contains only one class value\n y_true = np.zeros(10, dtype=\"int\")\n assert_raise_message(ValueError, \"ROC AUC score is not defined\",\n roc_auc_score, y_true, y_pred)\n y_true = np.ones(10, dtype=\"int\")\n assert_raise_message(ValueError, \"ROC AUC score is not defined\",\n roc_auc_score, y_true, y_pred)\n y_true = -np.ones(10, dtype=\"int\")\n assert_raise_message(ValueError, \"ROC AUC score is not defined\",\n roc_auc_score, y_true, y_pred)\n # y_true contains three different class values\n y_true = rng.randint(0, 3, size=10)\n assert_raise_message(ValueError, \"multiclass format is not supported\",\n roc_auc_score, y_true, y_pred)\n\n clean_warning_registry()\n with warnings.catch_warnings(record=True):\n rng = check_random_state(404)\n y_pred = rng.rand(10)\n # y_true contains only one class value\n y_true = np.zeros(10, dtype=\"int\")\n assert_raise_message(ValueError, \"ROC AUC score is not defined\",\n roc_auc_score, y_true, y_pred)\n y_true = np.ones(10, dtype=\"int\")\n assert_raise_message(ValueError, \"ROC AUC score is not defined\",\n roc_auc_score, y_true, y_pred)\n y_true = -np.ones(10, dtype=\"int\")\n assert_raise_message(ValueError, \"ROC AUC score is not defined\",\n roc_auc_score, y_true, y_pred)\n\n # y_true contains three different class values\n y_true = rng.randint(0, 3, size=10)\n assert_raise_message(ValueError, \"multiclass format is not supported\",\n roc_auc_score, y_true, y_pred)\n\n\ndef test_precision_recall_curve():\n y_true, _, probas_pred = make_prediction(binary=True)\n _test_precision_recall_curve(y_true, probas_pred)\n\n # Use {-1, 1} for labels; make sure original labels aren't modified\n y_true[np.where(y_true == 0)] = -1\n y_true_copy = y_true.copy()\n _test_precision_recall_curve(y_true, probas_pred)\n assert_array_equal(y_true_copy, y_true)\n\n labels = [1, 0, 0, 1]\n predict_probas = [1, 2, 3, 4]\n p, r, t = precision_recall_curve(labels, predict_probas)\n assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))\n assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))\n assert_array_almost_equal(t, np.array([1, 2, 3, 4]))\n assert_equal(p.size, r.size)\n assert_equal(p.size, t.size + 1)\n\n\ndef test_precision_recall_curve_pos_label():\n y_true, _, probas_pred = make_prediction(binary=False)\n pos_label = 2\n p, r, thresholds = precision_recall_curve(y_true,\n probas_pred[:, pos_label],\n pos_label=pos_label)\n p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,\n probas_pred[:, pos_label])\n assert_array_almost_equal(p, p2)\n assert_array_almost_equal(r, r2)\n assert_array_almost_equal(thresholds, thresholds2)\n assert_equal(p.size, r.size)\n assert_equal(p.size, thresholds.size + 1)\n\n\ndef _test_precision_recall_curve(y_true, probas_pred):\n # Test Precision-Recall and aread under PR curve\n p, r, thresholds = precision_recall_curve(y_true, probas_pred)\n precision_recall_auc = auc(r, p)\n assert_array_almost_equal(precision_recall_auc, 0.85, 2)\n assert_array_almost_equal(precision_recall_auc,\n average_precision_score(y_true, probas_pred))\n assert_almost_equal(_average_precision(y_true, probas_pred),\n precision_recall_auc, 1)\n assert_equal(p.size, r.size)\n assert_equal(p.size, thresholds.size + 1)\n # Smoke test in the case of proba having only one value\n p, r, thresholds = precision_recall_curve(y_true,\n np.zeros_like(probas_pred))\n precision_recall_auc = auc(r, p)\n assert_array_almost_equal(precision_recall_auc, 0.75, 3)\n assert_equal(p.size, r.size)\n assert_equal(p.size, thresholds.size + 1)\n\n\ndef test_precision_recall_curve_errors():\n # Contains non-binary labels\n assert_raises(ValueError, precision_recall_curve,\n [0, 1, 2], [[0.0], [1.0], [1.0]])\n\n\ndef test_precision_recall_curve_toydata():\n with np.errstate(all=\"raise\"):\n # Binary classification\n y_true = [0, 1]\n y_score = [0, 1]\n p, r, _ = precision_recall_curve(y_true, y_score)\n auc_prc = average_precision_score(y_true, y_score)\n assert_array_almost_equal(p, [1, 1])\n assert_array_almost_equal(r, [1, 0])\n assert_almost_equal(auc_prc, 1.)\n\n y_true = [0, 1]\n y_score = [1, 0]\n p, r, _ = precision_recall_curve(y_true, y_score)\n auc_prc = average_precision_score(y_true, y_score)\n assert_array_almost_equal(p, [0.5, 0., 1.])\n assert_array_almost_equal(r, [1., 0., 0.])\n assert_almost_equal(auc_prc, 0.25)\n\n y_true = [1, 0]\n y_score = [1, 1]\n p, r, _ = precision_recall_curve(y_true, y_score)\n auc_prc = average_precision_score(y_true, y_score)\n assert_array_almost_equal(p, [0.5, 1])\n assert_array_almost_equal(r, [1., 0])\n assert_almost_equal(auc_prc, .75)\n\n y_true = [1, 0]\n y_score = [1, 0]\n p, r, _ = precision_recall_curve(y_true, y_score)\n auc_prc = average_precision_score(y_true, y_score)\n assert_array_almost_equal(p, [1, 1])\n assert_array_almost_equal(r, [1, 0])\n assert_almost_equal(auc_prc, 1.)\n\n y_true = [1, 0]\n y_score = [0.5, 0.5]\n p, r, _ = precision_recall_curve(y_true, y_score)\n auc_prc = average_precision_score(y_true, y_score)\n assert_array_almost_equal(p, [0.5, 1])\n assert_array_almost_equal(r, [1, 0.])\n assert_almost_equal(auc_prc, .75)\n\n y_true = [0, 0]\n y_score = [0.25, 0.75]\n assert_raises(Exception, precision_recall_curve, y_true, y_score)\n assert_raises(Exception, average_precision_score, y_true, y_score)\n\n y_true = [1, 1]\n y_score = [0.25, 0.75]\n p, r, _ = precision_recall_curve(y_true, y_score)\n assert_almost_equal(average_precision_score(y_true, y_score), 1.)\n assert_array_almost_equal(p, [1., 1., 1.])\n assert_array_almost_equal(r, [1, 0.5, 0.])\n\n # Multi-label classification task\n y_true = np.array([[0, 1], [0, 1]])\n y_score = np.array([[0, 1], [0, 1]])\n assert_raises(Exception, average_precision_score, y_true, y_score,\n average=\"macro\")\n assert_raises(Exception, average_precision_score, y_true, y_score,\n average=\"weighted\")\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"samples\"), 1.)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"micro\"), 1.)\n\n y_true = np.array([[0, 1], [0, 1]])\n y_score = np.array([[0, 1], [1, 0]])\n assert_raises(Exception, average_precision_score, y_true, y_score,\n average=\"macro\")\n assert_raises(Exception, average_precision_score, y_true, y_score,\n average=\"weighted\")\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"samples\"), 0.625)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"micro\"), 0.625)\n\n y_true = np.array([[1, 0], [0, 1]])\n y_score = np.array([[0, 1], [1, 0]])\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"macro\"), 0.25)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"weighted\"), 0.25)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"samples\"), 0.25)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"micro\"), 0.25)\n\n y_true = np.array([[1, 0], [0, 1]])\n y_score = np.array([[0.5, 0.5], [0.5, 0.5]])\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"macro\"), 0.75)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"weighted\"), 0.75)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"samples\"), 0.75)\n assert_almost_equal(average_precision_score(y_true, y_score,\n average=\"micro\"), 0.75)\n\n\ndef test_score_scale_invariance():\n # Test that average_precision_score and roc_auc_score are invariant by\n # the scaling or shifting of probabilities\n y_true, _, probas_pred = make_prediction(binary=True)\n\n roc_auc = roc_auc_score(y_true, probas_pred)\n roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)\n roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)\n assert_equal(roc_auc, roc_auc_scaled)\n assert_equal(roc_auc, roc_auc_shifted)\n\n pr_auc = average_precision_score(y_true, probas_pred)\n pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)\n pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)\n assert_equal(pr_auc, pr_auc_scaled)\n assert_equal(pr_auc, pr_auc_shifted)\n\n\ndef check_lrap_toy(lrap_score):\n # Check on several small example that it works\n assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)\n assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)\n assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)\n\n assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)\n assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)\n assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)\n assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)\n assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),\n (2 / 3 + 1 / 1) / 2)\n assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),\n (2 / 3 + 1 / 2) / 2)\n\n assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)\n assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)\n assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),\n (1 / 2 + 2 / 3) / 2)\n assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)\n assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),\n (1 + 2 / 3) / 2)\n assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)\n assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)\n\n assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)\n assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)\n assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),\n (1 + 2 / 3) / 2)\n assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)\n assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),\n (1 / 2 + 2 / 3) / 2)\n assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)\n assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)\n\n # Tie handling\n assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)\n assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)\n assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)\n\n assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)\n assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)\n assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)\n assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)\n assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),\n (2 / 3 + 1 / 2) / 2)\n assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),\n (2 / 3 + 1 / 2) / 2)\n assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)\n\n assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)\n\n assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),\n 3 / 4)\n\n\ndef check_zero_or_all_relevant_labels(lrap_score):\n random_state = check_random_state(0)\n\n for n_labels in range(2, 5):\n y_score = random_state.uniform(size=(1, n_labels))\n y_score_ties = np.zeros_like(y_score)\n\n # No relevant labels\n y_true = np.zeros((1, n_labels))\n assert_equal(lrap_score(y_true, y_score), 1.)\n assert_equal(lrap_score(y_true, y_score_ties), 1.)\n\n # Only relevant labels\n y_true = np.ones((1, n_labels))\n assert_equal(lrap_score(y_true, y_score), 1.)\n assert_equal(lrap_score(y_true, y_score_ties), 1.)\n\n # Degenerate case: only one label\n assert_almost_equal(lrap_score([[1], [0], [1], [0]],\n [[0.5], [0.5], [0.5], [0.5]]), 1.)\n\n\ndef check_lrap_error_raised(lrap_score):\n # Raise value error if not appropriate format\n assert_raises(ValueError, lrap_score,\n [0, 1, 0], [0.25, 0.3, 0.2])\n assert_raises(ValueError, lrap_score, [0, 1, 2],\n [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])\n assert_raises(ValueError, lrap_score, [(0), (1), (2)],\n [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])\n\n # Check that that y_true.shape != y_score.shape raise the proper exception\n assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])\n assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])\n assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])\n assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])\n assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])\n assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])\n\n\ndef check_lrap_only_ties(lrap_score):\n # Check tie handling in score\n # Basic check with only ties and increasing label space\n for n_labels in range(2, 10):\n y_score = np.ones((1, n_labels))\n\n # Check for growing number of consecutive relevant\n for n_relevant in range(1, n_labels):\n # Check for a bunch of positions\n for pos in range(n_labels - n_relevant):\n y_true = np.zeros((1, n_labels))\n y_true[0, pos:pos + n_relevant] = 1\n assert_almost_equal(lrap_score(y_true, y_score),\n n_relevant / n_labels)\n\n\ndef check_lrap_without_tie_and_increasing_score(lrap_score):\n # Check that Label ranking average precision works for various\n # Basic check with increasing label space size and decreasing score\n for n_labels in range(2, 10):\n y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)\n\n # First and last\n y_true = np.zeros((1, n_labels))\n y_true[0, 0] = 1\n y_true[0, -1] = 1\n assert_almost_equal(lrap_score(y_true, y_score),\n (2 / n_labels + 1) / 2)\n\n # Check for growing number of consecutive relevant label\n for n_relevant in range(1, n_labels):\n # Check for a bunch of position\n for pos in range(n_labels - n_relevant):\n y_true = np.zeros((1, n_labels))\n y_true[0, pos:pos + n_relevant] = 1\n assert_almost_equal(lrap_score(y_true, y_score),\n sum((r + 1) / ((pos + r + 1) * n_relevant)\n for r in range(n_relevant)))\n\n\ndef _my_lrap(y_true, y_score):\n \"\"\"Simple implementation of label ranking average precision\"\"\"\n check_consistent_length(y_true, y_score)\n y_true = check_array(y_true)\n y_score = check_array(y_score)\n n_samples, n_labels = y_true.shape\n score = np.empty((n_samples, ))\n for i in range(n_samples):\n # The best rank correspond to 1. Rank higher than 1 are worse.\n # The best inverse ranking correspond to n_labels.\n unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)\n n_ranks = unique_rank.size\n rank = n_ranks - inv_rank\n\n # Rank need to be corrected to take into account ties\n # ex: rank 1 ex aequo means that both label are rank 2.\n corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()\n rank = corr_rank[rank]\n\n relevant = y_true[i].nonzero()[0]\n if relevant.size == 0 or relevant.size == n_labels:\n score[i] = 1\n continue\n\n score[i] = 0.\n for label in relevant:\n # Let's count the number of relevant label with better rank\n # (smaller rank).\n n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)\n\n # Weight by the rank of the actual label\n score[i] += n_ranked_above / rank[label]\n\n score[i] /= relevant.size\n\n return score.mean()\n\n\ndef check_alternative_lrap_implementation(lrap_score, n_classes=5,\n n_samples=20, random_state=0):\n _, y_true = make_multilabel_classification(n_features=1,\n allow_unlabeled=False,\n return_indicator=True,\n random_state=random_state,\n n_classes=n_classes,\n n_samples=n_samples)\n\n # Score with ties\n y_score = sparse_random_matrix(n_components=y_true.shape[0],\n n_features=y_true.shape[1],\n random_state=random_state)\n\n if hasattr(y_score, \"toarray\"):\n y_score = y_score.toarray()\n score_lrap = label_ranking_average_precision_score(y_true, y_score)\n score_my_lrap = _my_lrap(y_true, y_score)\n assert_almost_equal(score_lrap, score_my_lrap)\n\n # Uniform score\n random_state = check_random_state(random_state)\n y_score = random_state.uniform(size=(n_samples, n_classes))\n score_lrap = label_ranking_average_precision_score(y_true, y_score)\n score_my_lrap = _my_lrap(y_true, y_score)\n assert_almost_equal(score_lrap, score_my_lrap)\n\n\ndef test_label_ranking_avp():\n for fn in [label_ranking_average_precision_score, _my_lrap]:\n yield check_lrap_toy, fn\n yield check_lrap_without_tie_and_increasing_score, fn\n yield check_lrap_only_ties, fn\n yield check_zero_or_all_relevant_labels, fn\n yield check_lrap_error_raised, label_ranking_average_precision_score\n\n for n_samples, n_classes, random_state in product((1, 2, 8, 20),\n (2, 5, 10),\n range(1)):\n yield (check_alternative_lrap_implementation,\n label_ranking_average_precision_score,\n n_classes, n_samples, random_state)\n\n\ndef test_coverage_error():\n # Toy case\n assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)\n assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)\n assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)\n assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)\n\n assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)\n assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)\n assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)\n assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)\n assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)\n assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)\n assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)\n assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)\n\n assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)\n assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)\n assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)\n assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)\n assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)\n assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)\n assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)\n assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)\n\n assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)\n assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)\n assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)\n assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)\n assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)\n assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)\n assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)\n assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)\n\n # Non trival case\n assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],\n [[0.1, 10., -3], [0, 1, 3]]),\n (1 + 3) / 2.)\n\n assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],\n [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),\n (1 + 3 + 3) / 3.)\n\n assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],\n [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),\n (1 + 3 + 3) / 3.)\n\n\ndef test_coverage_tie_handling():\n assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)\n assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)\n assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)\n assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)\n\n assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)\n assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)\n assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)\n assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)\n assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)\n assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)\n assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)\n assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)\n\n\ndef test_label_ranking_loss():\n assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)\n assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)\n\n assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),\n 0)\n assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),\n 1 / 2)\n assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),\n 0)\n assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),\n 2 / 2)\n assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),\n 1 / 2)\n assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),\n 2 / 2)\n\n # Undefined metrics - the ranking doesn't matter\n assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)\n assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)\n assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)\n assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)\n\n assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),\n 0)\n assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),\n 0)\n assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),\n 0)\n assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)\n\n # Non trival case\n assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],\n [[0.1, 10., -3], [0, 1, 3]]),\n (0 + 2 / 2) / 2.)\n\n assert_almost_equal(label_ranking_loss(\n [[0, 1, 0], [1, 1, 0], [0, 1, 1]],\n [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),\n (0 + 2 / 2 + 1 / 2) / 3.)\n\n assert_almost_equal(label_ranking_loss(\n [[0, 1, 0], [1, 1, 0], [0, 1, 1]],\n [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),\n (0 + 2 / 2 + 1 / 2) / 3.)\n\n # Sparse csr matrices\n assert_almost_equal(label_ranking_loss(\n csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),\n [[0.1, 10, -3], [3, 1, 3]]),\n (0 + 2 / 2) / 2.)\n\n\ndef test_ranking_appropriate_input_shape():\n # Check that that y_true.shape != y_score.shape raise the proper exception\n assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])\n assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])\n assert_raises(ValueError, label_ranking_loss,\n [[0, 1], [0, 1]], [[0], [1]])\n\n assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])\n assert_raises(ValueError, label_ranking_loss,\n [[0], [1]], [[0, 1], [0, 1]])\n assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])\n\n\ndef test_ranking_loss_ties_handling():\n # Tie handling\n assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)\n assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)\n assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),\n 1 / 2)\n assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),\n 1 / 2)\n assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)\n assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)\n assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)\n assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)\n", "# Authors: Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom sklearn.utils.testing import (assert_array_almost_equal, assert_equal,\n assert_greater, assert_almost_equal,\n assert_greater_equal,\n assert_array_equal,\n assert_raises,\n assert_warns_message)\nfrom sklearn.datasets import make_classification, make_blobs\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import Ridge\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.metrics import brier_score_loss, log_loss\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration\nfrom sklearn.calibration import calibration_curve\n\n\ndef test_calibration():\n \"\"\"Test calibration objects with isotonic and sigmoid\"\"\"\n n_samples = 100\n X, y = make_classification(n_samples=2 * n_samples, n_features=6,\n random_state=42)\n sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)\n\n X -= X.min() # MultinomialNB only allows positive X\n\n # split train and test\n X_train, y_train, sw_train = \\\n X[:n_samples], y[:n_samples], sample_weight[:n_samples]\n X_test, y_test = X[n_samples:], y[n_samples:]\n\n # Naive-Bayes\n clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)\n prob_pos_clf = clf.predict_proba(X_test)[:, 1]\n\n pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)\n assert_raises(ValueError, pc_clf.fit, X, y)\n\n # Naive Bayes with calibration\n for this_X_train, this_X_test in [(X_train, X_test),\n (sparse.csr_matrix(X_train),\n sparse.csr_matrix(X_test))]:\n for method in ['isotonic', 'sigmoid']:\n pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)\n # Note that this fit overwrites the fit on the entire training\n # set\n pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)\n prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]\n\n # Check that brier score has improved after calibration\n assert_greater(brier_score_loss(y_test, prob_pos_clf),\n brier_score_loss(y_test, prob_pos_pc_clf))\n\n # Check invariance against relabeling [0, 1] -> [1, 2]\n pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)\n prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]\n assert_array_almost_equal(prob_pos_pc_clf,\n prob_pos_pc_clf_relabeled)\n\n # Check invariance against relabeling [0, 1] -> [-1, 1]\n pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)\n prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]\n assert_array_almost_equal(prob_pos_pc_clf,\n prob_pos_pc_clf_relabeled)\n\n # Check invariance against relabeling [0, 1] -> [1, 0]\n pc_clf.fit(this_X_train, (y_train + 1) % 2,\n sample_weight=sw_train)\n prob_pos_pc_clf_relabeled = \\\n pc_clf.predict_proba(this_X_test)[:, 1]\n if method == \"sigmoid\":\n assert_array_almost_equal(prob_pos_pc_clf,\n 1 - prob_pos_pc_clf_relabeled)\n else:\n # Isotonic calibration is not invariant against relabeling\n # but should improve in both cases\n assert_greater(brier_score_loss(y_test, prob_pos_clf),\n brier_score_loss((y_test + 1) % 2,\n prob_pos_pc_clf_relabeled))\n\n # check that calibration can also deal with regressors that have\n # a decision_function\n clf_base_regressor = CalibratedClassifierCV(Ridge())\n clf_base_regressor.fit(X_train, y_train)\n clf_base_regressor.predict(X_test)\n\n # Check failure cases:\n # only \"isotonic\" and \"sigmoid\" should be accepted as methods\n clf_invalid_method = CalibratedClassifierCV(clf, method=\"foo\")\n assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)\n\n # base-estimators should provide either decision_function or\n # predict_proba (most regressors, for instance, should fail)\n clf_base_regressor = \\\n CalibratedClassifierCV(RandomForestRegressor(), method=\"sigmoid\")\n assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)\n\n\ndef test_sample_weight_warning():\n n_samples = 100\n X, y = make_classification(n_samples=2 * n_samples, n_features=6,\n random_state=42)\n\n sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))\n X_train, y_train, sw_train = \\\n X[:n_samples], y[:n_samples], sample_weight[:n_samples]\n X_test = X[n_samples:]\n\n for method in ['sigmoid', 'isotonic']:\n base_estimator = LinearSVC(random_state=42)\n calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)\n # LinearSVC does not currently support sample weights but they\n # can still be used for the calibration step (with a warning)\n msg = \"LinearSVC does not support sample_weight.\"\n assert_warns_message(\n UserWarning, msg,\n calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)\n probs_with_sw = calibrated_clf.predict_proba(X_test)\n\n # As the weights are used for the calibration, they should still yield\n # a different predictions\n calibrated_clf.fit(X_train, y_train)\n probs_without_sw = calibrated_clf.predict_proba(X_test)\n\n diff = np.linalg.norm(probs_with_sw - probs_without_sw)\n assert_greater(diff, 0.1)\n\n\ndef test_calibration_multiclass():\n \"\"\"Test calibration for multiclass \"\"\"\n # test multi-class setting with classifier that implements\n # only decision function\n clf = LinearSVC()\n X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,\n centers=3, cluster_std=3.0)\n\n # Use categorical labels to check that CalibratedClassifierCV supports\n # them correctly\n target_names = np.array(['a', 'b', 'c'])\n y = target_names[y_idx]\n\n X_train, y_train = X[::2], y[::2]\n X_test, y_test = X[1::2], y[1::2]\n\n clf.fit(X_train, y_train)\n for method in ['isotonic', 'sigmoid']:\n cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)\n cal_clf.fit(X_train, y_train)\n probas = cal_clf.predict_proba(X_test)\n assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))\n\n # Check that log-loss of calibrated classifier is smaller than\n # log-loss of naively turned OvR decision function to probabilities\n # via softmax\n def softmax(y_pred):\n e = np.exp(-y_pred)\n return e / e.sum(axis=1).reshape(-1, 1)\n uncalibrated_log_loss = \\\n log_loss(y_test, softmax(clf.decision_function(X_test)))\n calibrated_log_loss = log_loss(y_test, probas)\n assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)\n\n # Test that calibration of a multiclass classifier decreases log-loss\n # for RandomForestClassifier\n X, y = make_blobs(n_samples=100, n_features=2, random_state=42,\n cluster_std=3.0)\n X_train, y_train = X[::2], y[::2]\n X_test, y_test = X[1::2], y[1::2]\n\n clf = RandomForestClassifier(n_estimators=10, random_state=42)\n clf.fit(X_train, y_train)\n clf_probs = clf.predict_proba(X_test)\n loss = log_loss(y_test, clf_probs)\n\n for method in ['isotonic', 'sigmoid']:\n cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)\n cal_clf.fit(X_train, y_train)\n cal_clf_probs = cal_clf.predict_proba(X_test)\n cal_loss = log_loss(y_test, cal_clf_probs)\n assert_greater(loss, cal_loss)\n\n\ndef test_calibration_prefit():\n \"\"\"Test calibration for prefitted classifiers\"\"\"\n n_samples = 50\n X, y = make_classification(n_samples=3 * n_samples, n_features=6,\n random_state=42)\n sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)\n\n X -= X.min() # MultinomialNB only allows positive X\n\n # split train and test\n X_train, y_train, sw_train = \\\n X[:n_samples], y[:n_samples], sample_weight[:n_samples]\n X_calib, y_calib, sw_calib = \\\n X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \\\n sample_weight[n_samples:2 * n_samples]\n X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]\n\n # Naive-Bayes\n clf = MultinomialNB()\n clf.fit(X_train, y_train, sw_train)\n prob_pos_clf = clf.predict_proba(X_test)[:, 1]\n\n # Naive Bayes with calibration\n for this_X_calib, this_X_test in [(X_calib, X_test),\n (sparse.csr_matrix(X_calib),\n sparse.csr_matrix(X_test))]:\n for method in ['isotonic', 'sigmoid']:\n pc_clf = CalibratedClassifierCV(clf, method=method, cv=\"prefit\")\n\n for sw in [sw_calib, None]:\n pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)\n y_prob = pc_clf.predict_proba(this_X_test)\n y_pred = pc_clf.predict(this_X_test)\n prob_pos_pc_clf = y_prob[:, 1]\n assert_array_equal(y_pred,\n np.array([0, 1])[np.argmax(y_prob, axis=1)])\n\n assert_greater(brier_score_loss(y_test, prob_pos_clf),\n brier_score_loss(y_test, prob_pos_pc_clf))\n\n\ndef test_sigmoid_calibration():\n \"\"\"Test calibration values with Platt sigmoid model\"\"\"\n exF = np.array([5, -4, 1.0])\n exY = np.array([1, -1, -1])\n # computed from my python port of the C++ code in LibSVM\n AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])\n assert_array_almost_equal(AB_lin_libsvm,\n _sigmoid_calibration(exF, exY), 3)\n lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))\n sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)\n assert_array_almost_equal(lin_prob, sk_prob, 6)\n\n # check that _SigmoidCalibration().fit only accepts 1d array or 2d column\n # arrays\n assert_raises(ValueError, _SigmoidCalibration().fit,\n np.vstack((exF, exF)), exY)\n\n\ndef test_calibration_curve():\n \"\"\"Check calibration_curve function\"\"\"\n y_true = np.array([0, 0, 0, 1, 1, 1])\n y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])\n prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)\n prob_true_unnormalized, prob_pred_unnormalized = \\\n calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)\n assert_equal(len(prob_true), len(prob_pred))\n assert_equal(len(prob_true), 2)\n assert_almost_equal(prob_true, [0, 1])\n assert_almost_equal(prob_pred, [0.1, 0.9])\n assert_almost_equal(prob_true, prob_true_unnormalized)\n assert_almost_equal(prob_pred, prob_pred_unnormalized)\n\n # probabilities outside [0, 1] should not be accepted when normalize\n # is set to False\n assert_raises(ValueError, calibration_curve, [1.1], [-0.1],\n normalize=False)\n\n\ndef test_calibration_nan_imputer():\n \"\"\"Test that calibration can accept nan\"\"\"\n X, y = make_classification(n_samples=10, n_features=2,\n n_informative=2, n_redundant=0,\n random_state=42)\n X[0, 0] = np.nan\n clf = Pipeline(\n [('imputer', Imputer()),\n ('rf', RandomForestClassifier(n_estimators=1))])\n clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')\n clf_c.fit(X, y)\n clf_c.predict(X)\n" ]
[ [ "numpy.dot", "numpy.log", "scipy.linalg.svd", "numpy.maximum", "numpy.sqrt", "numpy.eye", "numpy.var", "numpy.mean", "scipy.special.gammaln", "scipy.linalg.inv", "numpy.zeros", "numpy.sum", "numpy.empty" ], [ "sklearn.gaussian_process.GaussianProcess", "numpy.allclose", "numpy.random.seed", "numpy.sin", "numpy.finfo", "numpy.atleast_2d", "sklearn.datasets.make_regression", "numpy.all", "numpy.array", "numpy.random.RandomState" ], [ "sklearn.utils.testing.assert_equal", "sklearn.datasets.load_sample_images", "numpy.unique", "sklearn.datasets.load_linnerud", "sklearn.datasets.load_sample_image", "sklearn.datasets.base.Bunch", "sklearn.datasets.load_iris", "sklearn.datasets.load_diabetes", "sklearn.datasets.load_files", "sklearn.externals.six.b", "sklearn.datasets.get_data_home", "sklearn.utils.testing.assert_raises", "sklearn.utils.testing.assert_true", "sklearn.datasets.load_digits", "sklearn.datasets.load_boston", "sklearn.datasets.clear_data_home", "sklearn.externals.six.u" ], [ "numpy.split", "numpy.linspace", "numpy.min", "numpy.asarray", "numpy.unique", "numpy.issubdtype", "numpy.clip", "numpy.nonzero", "numpy.max", "numpy.array" ], [ "sklearn.ensemble.RandomForestRegressor", "sklearn.utils.testing.assert_array_almost_equal", "sklearn.datasets.make_classification", "sklearn.cross_validation.train_test_split", "sklearn.cluster.KMeans", "numpy.asarray", "sklearn.utils.testing.assert_raises", "sklearn.tree.DecisionTreeClassifier", "sklearn.datasets.load_boston", "sklearn.ensemble.RandomForestClassifier", "numpy.unique", "numpy.ravel", "sklearn.utils.testing.assert_raises_regexp", "sklearn.datasets.load_iris", "sklearn.grid_search.GridSearchCV", "sklearn.svm.SVR", "sklearn.ensemble.AdaBoostClassifier", "sklearn.datasets.make_multilabel_classification", "sklearn.svm.SVC", "sklearn.utils.testing.assert_array_equal", "numpy.random.RandomState", "sklearn.utils.testing.assert_equal", "sklearn.tree.DecisionTreeRegressor", "sklearn.utils.shuffle", "sklearn.datasets.make_regression", "sklearn.linear_model.LinearRegression", "sklearn.ensemble.AdaBoostRegressor" ], [ "sklearn.metrics.roc_auc_score", "sklearn.utils.testing.assert_array_almost_equal", "sklearn.utils.testing.assert_almost_equal", "sklearn.utils.testing.assert_raises", "sklearn.utils.testing.clean_warning_registry", "numpy.round", "numpy.zeros_like", "sklearn.metrics.label_ranking_loss", "sklearn.utils.testing.assert_warns", "numpy.where", "sklearn.random_projection.sparse_random_matrix", "sklearn.ensemble.RandomForestClassifier", "numpy.unique", "numpy.arange", "sklearn.metrics.precision_recall_curve", "numpy.zeros", "sklearn.utils.testing.assert_raise_message", "sklearn.metrics.coverage_error", "sklearn.datasets.load_iris", "sklearn.metrics.roc_curve", "sklearn.datasets.make_multilabel_classification", "sklearn.utils.validation.check_random_state", "sklearn.svm.SVC", "sklearn.utils.testing.assert_array_equal", "sklearn.metrics.auc", "numpy.argsort", "numpy.array", "numpy.random.RandomState", "numpy.sum", "numpy.errstate", "sklearn.utils.testing.assert_equal", "sklearn.utils.validation.check_array", "numpy.ones", "sklearn.datasets.load_digits", "sklearn.metrics.average_precision_score", "sklearn.utils.validation.check_consistent_length", "numpy.bincount", "sklearn.metrics.label_ranking_average_precision_score", "numpy.empty" ], [ "sklearn.ensemble.RandomForestRegressor", "sklearn.utils.testing.assert_array_almost_equal", "sklearn.datasets.make_classification", "sklearn.utils.testing.assert_almost_equal", "sklearn.utils.testing.assert_raises", "sklearn.svm.LinearSVC", "numpy.exp", "sklearn.datasets.make_blobs", "sklearn.ensemble.RandomForestClassifier", "sklearn.utils.testing.assert_greater", "sklearn.preprocessing.Imputer", "sklearn.utils.testing.assert_warns_message", "numpy.argmax", "sklearn.calibration.calibration_curve", "sklearn.naive_bayes.MultinomialNB", "scipy.sparse.csr_matrix", "sklearn.metrics.log_loss", "sklearn.linear_model.Ridge", "numpy.random.RandomState", "numpy.array", "numpy.sum", "sklearn.calibration._sigmoid_calibration", "sklearn.utils.testing.assert_greater_equal", "numpy.linalg.norm", "sklearn.calibration._SigmoidCalibration", "sklearn.calibration.CalibratedClassifierCV", "numpy.vstack", "sklearn.metrics.brier_score_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
caglorithm/stimulus_neural_populations
[ "58567901bed6f6bc17fc2975435138c33bb6be66" ]
[ "models/brian2/utils_net.py" ]
[ "import numpy as np\n# try to import numba\n# or define dummy decorator\ntry:\n from numba import autojit\nexcept:\n def autojit(func):\n return func\n\n# util functions for network simulation\ndef smooth_trace(trace, scale):\n scale = int(scale)\n if scale == 1 or scale == 0:\n return trace\n slen = int(len(trace) / scale)\n if slen == 0:\n return trace\n return np.array([np.mean(trace[i*scale:(i+1)*scale]) for i in xrange(slen)])\n\n@autojit\ndef choose_k_from_n(n, k):\n # use vaguely estimated metric of when sorting random numbers is better\n if float(k) / float(n) > 0.125:\n ans = np.argsort(np.random.rand(n))[:k]\n return ans\n nums = range(n)\n swaps = (np.random.rand(k) * xrange(n, n - k, -1)).astype('int') + xrange(k)\n for i in xrange(k):\n # swap with some random element from here to end - these swap positions precalculated\n nums[i], nums[swaps[i]] = nums[swaps[i]], nums[i]\n ans = nums[:k]\n return ans\n\ndef fixed_connectivity(n, k):\n prelist = np.zeros(k * n, dtype = int)\n postlist = np.zeros_like(prelist)\n for j in xrange(n):\n presynapses = choose_k_from_n(n, k)\n prelist[j * k:(j + 1) * k] = presynapses\n postlist[j * k:(j + 1) * k] = j * np.ones(k, dtype = int)\n return prelist, postlist" ]
[ [ "numpy.ones", "numpy.mean", "numpy.random.rand", "numpy.zeros_like", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SergioRAgostinho/cvxpnpl
[ "eaa568594df0adcf0c70cc5288b24e5dc1fa9d2f", "eaa568594df0adcf0c70cc5288b24e5dc1fa9d2f" ]
[ "benchmarks/toolkit/datasets.py", "benchmarks/real/pnpl.py" ]
[ "from collections import namedtuple\nimport json\nimport os\nfrom os.path import join as pjoin\nfrom pathlib import Path\n\nimport numpy as np\nfrom plymit import Ply\nfrom PIL import Image\n\nfrom .renderer import Renderer\n\nModel = namedtuple(\n \"Model\",\n [\n \"id\",\n \"points\",\n \"normals\",\n \"color\",\n \"faces\",\n \"diameter\",\n \"min\",\n \"size\",\n \"symmetries_discrete\",\n ],\n)\n\nCamera = namedtuple(\"Camera\", [\"K\", \"size\"])\n\n\nclass Dataset:\n def __init__(self, prefix):\n print(\"Initializing \" + type(self).__name__)\n self.prefix = prefix\n self.camera = self._parse_camera()\n\n # Load models\n self.models = self._load_models()\n self.renderer = self._init_renderer()\n\n # Handle Partitions\n # we're only interested in the test partition here\n # self.train = type(self)._Partition(pjoin(self.prefix, \"train\"))\n # self.train = None\n self.test = type(self)._Partition(\n pjoin(self.prefix, \"test\"), self.models, self.renderer\n )\n\n def __iter__(self):\n return iter(self.test)\n\n def __len__(self):\n return self.test.n_frames\n\n def __getstate__(self):\n # save prefix only and reload database upon deserializing\n return {\"prefix\": self.prefix}\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.__init__(Path(self.prefix).parent)\n\n def _init_renderer(self):\n renderer = Renderer(False)\n renderer.load_models(list(self.models.values()))\n return renderer\n\n def _parse_camera(self):\n data = json.loads(open(pjoin(self.prefix, \"camera.json\")).read())\n camera = Camera(\n K=np.array(\n ((data[\"fx\"], 0, data[\"cx\"]), (0, data[\"fy\"], data[\"cy\"]), (0, 0, 1),)\n ),\n size=(data[\"width\"], data[\"height\"]),\n )\n return camera\n\n def _load_models(self):\n\n models = {}\n\n print(\"Reading ply files for models: \", end=\"\", flush=True)\n\n # load model info. models_eval are lighter\n info = json.loads(\n open(pjoin(self.prefix, \"models_eval\", \"models_info.json\")).read()\n )\n for k, v in info.items():\n\n print(k, end=\" \", flush=True)\n\n # load points, normals and color\n ply = Ply(pjoin(self.prefix, \"models\", \"obj_{:06d}.ply\".format(int(k))))\n\n # parse vertices\n points = []\n normals = []\n colors = []\n for vertex in ply.elementLists[\"vertex\"]:\n points.extend([vertex.x, vertex.y, vertex.z])\n normals.extend([vertex.nx, vertex.ny, vertex.nz])\n colors.extend([vertex.red, vertex.green, vertex.blue])\n points = np.array(points, dtype=np.float32).reshape((-1, 3))\n normals = np.array(normals, dtype=np.float32).reshape((-1, 3))\n colors = np.array(colors, dtype=np.uint8).reshape((-1, 3))\n\n # faces\n faces = []\n for f in ply.elementLists[\"face\"]:\n faces.extend(f.vertex_indices)\n faces = np.array(faces, dtype=np.uint32).reshape((-1, 3))\n\n # create model object\n models[k] = Model(\n int(k),\n points,\n normals,\n colors,\n faces,\n v[\"diameter\"],\n np.array((v[\"min_x\"], v[\"min_y\"], v[\"min_z\"])),\n np.array((v[\"size_x\"], v[\"size_y\"], v[\"size_z\"])),\n [np.array(s).reshape((4, 4)) for s in v[\"symmetries_discrete\"]]\n if \"symmetries_discrete\" in v\n else None,\n )\n print(\"DONE\", flush=True)\n return models\n\n class _Partition:\n def __init__(self, prefix, models, renderer):\n\n self.prefix = prefix\n self.models = models\n self.renderer = renderer\n\n seq_names = sorted([d.name for d in os.scandir(prefix)])\n # seq_names = [seq_names[1]]\n self.sequences = [\n Dataset._Sequence(int(n), pjoin(prefix, n), models, renderer)\n for n in seq_names\n ]\n\n # store the total number of frames in the partition\n self.n_frames = 0\n for seq in self.sequences:\n self.n_frames += len(seq)\n\n def __iter__(self):\n return iter(self.sequences)\n\n def __len__(self):\n return len(self.sequences)\n\n class _Sequence:\n def __init__(self, name, prefix, models, renderer):\n\n self.name = name\n self.prefix = prefix\n self.models = models\n self.renderer = renderer\n\n # parse gt\n gt = json.loads(open(pjoin(prefix, \"scene_gt.json\")).read())\n self.poses = [None] * len(gt.keys())\n for k, v in gt.items():\n poses = {}\n for pose in v:\n poses[pose[\"obj_id\"]] = np.hstack(\n (\n np.array(pose[\"cam_R_m2c\"]).reshape((3, 3)),\n np.array(pose[\"cam_t_m2c\"]).reshape((3, 1)),\n )\n )\n self.poses[int(k)] = poses\n\n # iterator stuff\n self.i = 0\n\n def __iter__(self):\n self.i = 0\n return self\n\n def __len__(self):\n return len(self.poses)\n # return 4\n\n def __next__(self):\n # reached the end. get out\n if self.i == len(self):\n raise StopIteration\n\n # generate object coordinates\n poses = self.poses[self.i]\n oc = self.renderer.object_coordinates(poses)\n\n # load visibility masks\n mask = self.fuse_masks(self.i, poses.keys())\n\n # return dictionary object with rgb, depth and poses\n data = {\n \"id\": self.i,\n \"rgb\": np.array(\n Image.open(pjoin(self.prefix, \"rgb\", \"{:06d}.png\".format(self.i)))\n ), # load rgb\n # \"depth\": np.array(\n # Image.open(pjoin(self.prefix, \"depth\", \"{:06d}.png\".format(self.i)))\n # ), # load depth\n \"mask\": mask,\n \"oc\": oc,\n \"poses\": poses,\n }\n self.i += 1\n return data\n\n def fuse_masks(self, frame, object_ids):\n masks = np.zeros(self.renderer.size[::-1], dtype=np.uint8)\n for i, oid in enumerate(object_ids):\n masks[\n np.array(\n Image.open(\n pjoin(self.prefix, \"mask_visib\", f\"{frame:06d}_{i:06d}.png\")\n )\n )\n > 127\n ] = oid\n return masks\n\n\nclass Linemod(Dataset):\n\n seq_names = [\n \"ape\",\n \"benchvise\",\n \"bowl\",\n \"cam\",\n \"can\",\n \"cat\",\n \"cup\",\n \"driller\",\n \"duck\",\n \"eggbox\",\n \"glue\",\n \"holepuncher\",\n \"iron\",\n \"lamp\",\n \"phone\",\n ]\n\n def __init__(self, prefix):\n super().__init__(pjoin(prefix, \"lm\"))\n\n\nclass Occlusion(Dataset):\n\n seq_names = [\"\"]\n\n def __init__(self, prefix):\n super().__init__(pjoin(prefix, \"lmo\"))\n", "import numpy as np\n\nfrom toolkit.methods.pnpl import CvxPnPL, DLT, EPnPL, OPnPL\nfrom toolkit.suites import parse_arguments, PnPLReal\nfrom toolkit.datasets import Linemod, Occlusion\n\n\n# reproducibility is a great thing\nnp.random.seed(0)\nnp.random.seed(42)\n\n\n# parse console arguments\nargs = parse_arguments()\n\n# Just a loading data scenario\nif args.load:\n session = PnPLReal.load(args.load)\n session.print(args.print_mode)\n quit()\n\n# run something\nsession = PnPLReal(methods=[CvxPnPL, DLT, EPnPL, OPnPL])\nsession.run(data=[Linemod(args.datasets_prefix), Occlusion(args.datasets_prefix)])\n# session.run(data=[Linemod(args.datasets_prefix)])\nif args.save:\n session.save(args.save)\nsession.print()\n" ]
[ [ "numpy.array", "numpy.zeros" ], [ "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kcrumb/automl
[ "6e0cb70003c05dbbba45a7d741ec975423042f0e", "8cc08ade25d99d0dcc977da46ae2642dadccf826" ]
[ "efficientdet/utils.py", "efficientdet/keras/infer_grad_cam.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utils.\"\"\"\nimport contextlib\nimport os\nimport re\nfrom typing import Text, Tuple, Union\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow.compat.v2 as tf2\nfrom tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import\n# pylint: disable=logging-format-interpolation\n\n\ndef srelu_fn(x):\n \"\"\"Smooth relu: a smooth version of relu.\"\"\"\n with tf.name_scope('srelu'):\n beta = tf.Variable(20.0, name='srelu_beta', dtype=tf.float32)**2\n beta = tf.cast(beta**2, x.dtype)\n safe_log = tf.math.log(tf.where(x > 0., beta * x + 1., tf.ones_like(x)))\n return tf.where((x > 0.), x - (1. / beta) * safe_log, tf.zeros_like(x))\n\n\ndef activation_fn(features: tf.Tensor, act_type: Text):\n \"\"\"Customized non-linear activation type.\"\"\"\n if act_type in ('silu', 'swish'):\n return tf.nn.swish(features)\n elif act_type == 'swish_native':\n return features * tf.sigmoid(features)\n elif act_type == 'hswish':\n return features * tf.nn.relu6(features + 3) / 6\n elif act_type == 'relu':\n return tf.nn.relu(features)\n elif act_type == 'relu6':\n return tf.nn.relu6(features)\n elif act_type == 'mish':\n return features * tf.math.tanh(tf.math.softplus(features))\n elif act_type == 'srelu':\n return srelu_fn(features)\n else:\n raise ValueError('Unsupported act_type {}'.format(act_type))\n\n\ndef cross_replica_mean(t, num_shards_per_group=None):\n \"\"\"Calculates the average value of input tensor across TPU replicas.\"\"\"\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if not num_shards_per_group:\n return tf.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype)\n\n group_assignment = None\n if num_shards_per_group > 1:\n if num_shards % num_shards_per_group != 0:\n raise ValueError(\n 'num_shards: %d mod shards_per_group: %d, should be 0' %\n (num_shards, num_shards_per_group))\n num_groups = num_shards // num_shards_per_group\n group_assignment = [[\n x for x in range(num_shards) if x // num_shards_per_group == y\n ] for y in range(num_groups)]\n return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(\n num_shards_per_group, t.dtype)\n\n\ndef get_ema_vars():\n \"\"\"Get all exponential moving average (ema) variables.\"\"\"\n ema_vars = tf.trainable_variables() + \\\n tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES)\n for v in tf.global_variables():\n # We maintain mva for batch norm moving mean and variance as well.\n if 'moving_mean' in v.name or 'moving_variance' in v.name:\n ema_vars.append(v)\n return list(set(ema_vars))\n\n\ndef get_ckpt_var_map(ckpt_path, ckpt_scope, var_scope, skip_mismatch=None):\n \"\"\"Get a var map for restoring from pretrained checkpoints.\n\n Args:\n ckpt_path: string. A pretrained checkpoint path.\n ckpt_scope: string. Scope name for checkpoint variables.\n var_scope: string. Scope name for model variables.\n skip_mismatch: skip variables if shape mismatch.\n\n Returns:\n var_map: a dictionary from checkpoint name to model variables.\n \"\"\"\n logging.info('Init model from checkpoint {}'.format(ckpt_path))\n if not ckpt_scope.endswith('/') or not var_scope.endswith('/'):\n raise ValueError('Please specific scope name ending with /')\n if ckpt_scope.startswith('/'):\n ckpt_scope = ckpt_scope[1:]\n if var_scope.startswith('/'):\n var_scope = var_scope[1:]\n\n var_map = {}\n # Get the list of vars to restore.\n model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)\n reader = tf.train.load_checkpoint(ckpt_path)\n ckpt_var_name_to_shape = reader.get_variable_to_shape_map()\n ckpt_var_names = set(reader.get_variable_to_shape_map().keys())\n\n for i, v in enumerate(model_vars):\n if not v.op.name.startswith(var_scope):\n logging.info('skip {} -- does not match scope {}'.format(\n v.op.name, var_scope))\n ckpt_var = ckpt_scope + v.op.name[len(var_scope):]\n if (ckpt_var not in ckpt_var_names and\n v.op.name.endswith('/ExponentialMovingAverage')):\n ckpt_var = ckpt_scope + v.op.name[:-len('/ExponentialMovingAverage')]\n\n if ckpt_var not in ckpt_var_names:\n if 'Momentum' in ckpt_var or 'RMSProp' in ckpt_var:\n # Skip optimizer variables.\n continue\n if skip_mismatch:\n logging.info('skip {} ({}) -- not in ckpt'.format(v.op.name, ckpt_var))\n continue\n raise ValueError('{} is not in ckpt {}'.format(v.op, ckpt_path))\n\n if v.shape != ckpt_var_name_to_shape[ckpt_var]:\n if skip_mismatch:\n logging.info('skip {} ({} vs {}) -- shape mismatch'.format(\n v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var]))\n continue\n raise ValueError('shape mismatch {} ({} vs {})'.format(\n v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var]))\n\n if i < 5:\n # Log the first few elements for sanity check.\n logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var))\n var_map[ckpt_var] = v\n\n return var_map\n\n\ndef get_ckpt_var_map_ema(ckpt_path, ckpt_scope, var_scope, var_exclude_expr):\n \"\"\"Get a ema var map for restoring from pretrained checkpoints.\n\n Args:\n ckpt_path: string. A pretrained checkpoint path.\n ckpt_scope: string. Scope name for checkpoint variables.\n var_scope: string. Scope name for model variables.\n var_exclude_expr: string. A regex for excluding variables.\n This is useful for finetuning with different classes, where\n var_exclude_expr='.*class-predict.*' can be used.\n\n Returns:\n var_map: a dictionary from checkpoint name to model variables.\n \"\"\"\n logging.info('Init model from checkpoint {}'.format(ckpt_path))\n if not ckpt_scope.endswith('/') or not var_scope.endswith('/'):\n raise ValueError('Please specific scope name ending with /')\n if ckpt_scope.startswith('/'):\n ckpt_scope = ckpt_scope[1:]\n if var_scope.startswith('/'):\n var_scope = var_scope[1:]\n\n var_map = {}\n # Get the list of vars to restore.\n model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)\n reader = tf.train.load_checkpoint(ckpt_path)\n ckpt_var_names = set(reader.get_variable_to_shape_map().keys())\n exclude_matcher = re.compile(var_exclude_expr) if var_exclude_expr else None\n for v in model_vars:\n if exclude_matcher and exclude_matcher.match(v.op.name):\n logging.info(\n 'skip {} -- excluded by {}'.format(v.op.name, var_exclude_expr))\n continue\n\n if not v.op.name.startswith(var_scope):\n logging.info('skip {} -- does not match scope {}'.format(\n v.op.name, var_scope))\n\n if v.op.name.endswith('/ExponentialMovingAverage'):\n logging.info('skip ema var {}'.format(v.op.name))\n continue\n\n ckpt_var = ckpt_scope + v.op.name[len(var_scope):]\n ckpt_var_ema = ckpt_var + '/ExponentialMovingAverage'\n if ckpt_var_ema in ckpt_var_names:\n var_map[ckpt_var_ema] = v\n logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var_ema))\n elif ckpt_var in ckpt_var_names:\n var_map[ckpt_var] = v\n logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var))\n else:\n logging.info('skip {} ({}) -- not in ckpt'.format(v.op.name, ckpt_var))\n return var_map\n\n\nclass TpuBatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Cross replica batch normalization.\"\"\"\n\n def __init__(self, fused=False, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n if fused in (True, None):\n raise ValueError('TpuBatchNormalization does not support fused=True.')\n super().__init__(fused=fused, **kwargs)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super()._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n num_shards = tpu_function.get_tpu_context().number_of_shards or 1\n num_shards_per_group = min(32, num_shards) # aggregate up to 32 cores.\n logging.info('TpuBatchNormalization with num_shards_per_group {}'.format(\n num_shards_per_group))\n if num_shards_per_group > 1:\n # Compute variance using: Var[X]= E[X^2] - E[X]^2.\n shard_square_of_mean = tf.math.square(shard_mean)\n shard_mean_of_square = shard_variance + shard_square_of_mean\n group_mean = cross_replica_mean(shard_mean, num_shards_per_group)\n group_mean_of_square = cross_replica_mean(\n shard_mean_of_square, num_shards_per_group)\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\nclass SyncBatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Cross replica batch normalization.\"\"\"\n\n def __init__(self, fused=False, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n if fused in (True, None):\n raise ValueError('SyncBatchNormalization does not support fused=True.')\n super().__init__(fused=fused, **kwargs)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super()._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n replica_context = tf.distribute.get_replica_context()\n num_shards = replica_context.num_replicas_in_sync or 1\n\n if num_shards > 1:\n # Compute variance using: Var[X]= E[X^2] - E[X]^2.\n shard_square_of_mean = tf.math.square(shard_mean)\n shard_mean_of_square = shard_variance + shard_square_of_mean\n shard_stack = tf.stack([shard_mean, shard_mean_of_square])\n group_mean, group_mean_of_square = tf.unstack(\n replica_context.all_reduce(tf.distribute.ReduceOp.MEAN, shard_stack))\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\nclass BatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Fixed default name of BatchNormalization to match TpuBatchNormalization.\"\"\"\n\n def __init__(self, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n super().__init__(**kwargs)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\ndef batch_norm_class(is_training, strategy=None):\n if is_training and strategy == 'tpu':\n return TpuBatchNormalization\n elif is_training and strategy == 'gpus':\n # TODO(fsx950223): use SyncBatchNorm after TF bug is fixed (incorrect nccl\n # all_reduce). See https://github.com/tensorflow/tensorflow/issues/41980\n return BatchNormalization\n else:\n return BatchNormalization\n\n\ndef batch_normalization(inputs, training=False, strategy=None, **kwargs):\n \"\"\"A wrapper for TpuBatchNormalization.\"\"\"\n bn_layer = batch_norm_class(training, strategy)(**kwargs)\n return bn_layer(inputs, training=training)\n\n\ndef batch_norm_act(inputs,\n is_training_bn: bool,\n act_type: Union[Text, None],\n init_zero: bool = False,\n data_format: Text = 'channels_last',\n momentum: float = 0.99,\n epsilon: float = 1e-3,\n strategy: Text = None,\n name: Text = None):\n \"\"\"Performs a batch normalization followed by a non-linear activation.\n\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training_bn: `bool` for whether the model is training.\n act_type: non-linear relu function type. If None, omits the relu operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n momentum: `float`, momentume of batch norm.\n epsilon: `float`, small value for numerical stability.\n strategy: string to specify training strategy for TPU/GPU/CPU.\n name: the name of the batch normalization layer\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == 'channels_first':\n axis = 1\n else:\n axis = 3\n\n inputs = batch_normalization(\n inputs=inputs,\n axis=axis,\n momentum=momentum,\n epsilon=epsilon,\n center=True,\n scale=True,\n training=is_training_bn,\n strategy=strategy,\n gamma_initializer=gamma_initializer,\n name=name)\n\n if act_type:\n inputs = activation_fn(inputs, act_type)\n return inputs\n\n\ndef drop_connect(inputs, is_training, survival_prob):\n \"\"\"Drop the entire conv with given survival probability.\"\"\"\n # \"Deep Networks with Stochastic Depth\", https://arxiv.org/pdf/1603.09382.pdf\n if not is_training:\n return inputs\n\n # Compute tensor.\n batch_size = tf.shape(inputs)[0]\n random_tensor = survival_prob\n random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)\n binary_tensor = tf.floor(random_tensor)\n # Unlike conventional way that multiply survival_prob at test time, here we\n # divide survival_prob at training time, such that no addition compute is\n # needed at test time.\n output = inputs / survival_prob * binary_tensor\n return output\n\n\ndef num_params_flops(readable_format=True):\n \"\"\"Return number of parameters and flops.\"\"\"\n nparams = np.sum(\n [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])\n options = tf.profiler.ProfileOptionBuilder.float_operation()\n options['output'] = 'none'\n flops = tf.profiler.profile(\n tf.get_default_graph(), options=options).total_float_ops\n # We use flops to denote multiply-adds, which is counted as 2 ops in tfprof.\n flops = flops // 2\n if readable_format:\n nparams = float(nparams) * 1e-6\n flops = float(flops) * 1e-9\n return nparams, flops\n\n\nconv_kernel_initializer = tf.initializers.variance_scaling()\ndense_kernel_initializer = tf.initializers.variance_scaling()\n\n\nclass Pair(tuple):\n\n def __new__(cls, name, value):\n return super().__new__(cls, (name, value))\n\n def __init__(self, name, _): # pylint: disable=super-init-not-called\n self.name = name\n\n\ndef scalar(name, tensor):\n \"\"\"Stores a (name, Tensor) tuple in a custom collection.\"\"\"\n logging.info('Adding scale summary {}'.format(Pair(name, tensor)))\n tf.add_to_collection('scalar_summaries', Pair(name, tf.reduce_mean(tensor)))\n\n\ndef image(name, tensor):\n logging.info('Adding image summary {}'.format(Pair(name, tensor)))\n tf.add_to_collection('image_summaries', Pair(name, tensor))\n\n\ndef get_tpu_host_call(global_step, params):\n \"\"\"Get TPU host call for summaries.\"\"\"\n scalar_summaries = tf.get_collection('scalar_summaries')\n if params['img_summary_steps']:\n image_summaries = tf.get_collection('image_summaries')\n else:\n image_summaries = []\n if not scalar_summaries and not image_summaries:\n return None # No summaries to write.\n\n model_dir = params['model_dir']\n iterations_per_loop = params.get('iterations_per_loop', 100)\n img_steps = params['img_summary_steps']\n\n def host_call_fn(global_step, *args):\n \"\"\"Training host call. Creates summaries for training metrics.\"\"\"\n gs = global_step[0]\n with tf2.summary.create_file_writer(\n model_dir, max_queue=iterations_per_loop).as_default():\n with tf2.summary.record_if(True):\n for i, _ in enumerate(scalar_summaries):\n name = scalar_summaries[i][0]\n tensor = args[i][0]\n tf2.summary.scalar(name, tensor, step=gs)\n\n if img_steps:\n with tf2.summary.record_if(lambda: tf.math.equal(gs % img_steps, 0)):\n # Log images every 1k steps.\n for i, _ in enumerate(image_summaries):\n name = image_summaries[i][0]\n tensor = args[i + len(scalar_summaries)]\n tf2.summary.image(name, tensor, step=gs)\n\n return tf.summary.all_v2_summary_ops()\n\n reshaped_tensors = [tf.reshape(t, [1]) for _, t in scalar_summaries]\n reshaped_tensors += [t for _, t in image_summaries]\n global_step_t = tf.reshape(global_step, [1])\n return host_call_fn, [global_step_t] + reshaped_tensors\n\n\ndef archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):\n \"\"\"Archive a checkpoint if the metric is better.\"\"\"\n ckpt_dir, ckpt_name = os.path.split(ckpt_path)\n\n saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')\n saved_objective = float('-inf')\n if tf.io.gfile.exists(saved_objective_path):\n with tf.io.gfile.GFile(saved_objective_path, 'r') as f:\n saved_objective = float(f.read())\n if saved_objective > ckpt_objective:\n logging.info('Ckpt {} is worse than {}'.format(ckpt_objective,\n saved_objective))\n return False\n\n filenames = tf.io.gfile.glob(ckpt_path + '.*')\n if filenames is None:\n logging.info('No files to copy for checkpoint {}'.format(ckpt_path))\n return False\n\n # clear up the backup folder.\n backup_dir = os.path.join(ckpt_dir, 'backup')\n if tf.io.gfile.exists(backup_dir):\n tf.io.gfile.rmtree(backup_dir)\n\n # rename the old checkpoints to backup folder.\n dst_dir = os.path.join(ckpt_dir, 'archive')\n if tf.io.gfile.exists(dst_dir):\n logging.info('mv {} to {}'.format(dst_dir, backup_dir))\n tf.io.gfile.rename(dst_dir, backup_dir)\n\n # Write checkpoints.\n tf.io.gfile.makedirs(dst_dir)\n for f in filenames:\n dest = os.path.join(dst_dir, os.path.basename(f))\n tf.io.gfile.copy(f, dest, overwrite=True)\n ckpt_state = tf.train.generate_checkpoint_state_proto(\n dst_dir,\n model_checkpoint_path=os.path.join(dst_dir, ckpt_name))\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:\n f.write(str(ckpt_state))\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:\n f.write('%s' % ckpt_eval)\n\n # Update the best objective.\n with tf.io.gfile.GFile(saved_objective_path, 'w') as f:\n f.write('%f' % ckpt_objective)\n\n logging.info('Copying checkpoint {} to {}'.format(ckpt_path, dst_dir))\n return True\n\n\ndef parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):\n \"\"\"Parse the image size and return (height, width).\n\n Args:\n image_size: A integer, a tuple (H, W), or a string with HxW format.\n\n Returns:\n A tuple of integer (height, width).\n \"\"\"\n if isinstance(image_size, int):\n # image_size is integer, with the same width and height.\n return (image_size, image_size)\n\n if isinstance(image_size, str):\n # image_size is a string with format WxH\n width, height = image_size.lower().split('x')\n return (int(height), int(width))\n\n if isinstance(image_size, tuple):\n return image_size\n\n raise ValueError('image_size must be an int, WxH string, or (height, width)'\n 'tuple. Was %r' % image_size)\n\n\ndef get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]],\n max_level: int):\n \"\"\"Get feat widths and heights for all levels.\n\n Args:\n image_size: A integer, a tuple (H, W), or a string with HxW format.\n max_level: maximum feature level.\n\n Returns:\n feat_sizes: a list of tuples (height, width) for each level.\n \"\"\"\n image_size = parse_image_size(image_size)\n feat_sizes = [{'height': image_size[0], 'width': image_size[1]}]\n feat_size = image_size\n for _ in range(1, max_level + 1):\n feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)\n feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]})\n return feat_sizes\n\n\ndef verify_feats_size(feats,\n feat_sizes,\n min_level,\n max_level,\n data_format='channels_last'):\n \"\"\"Verify the feature map sizes.\"\"\"\n expected_output_size = feat_sizes[min_level:max_level + 1]\n for cnt, size in enumerate(expected_output_size):\n h_id, w_id = (2, 3) if data_format == 'channels_first' else (1, 2)\n if feats[cnt].shape[h_id] != size['height']:\n raise ValueError(\n 'feats[{}] has shape {} but its height should be {}.'\n '(input_height: {}, min_level: {}, max_level: {}.)'.format(\n cnt, feats[cnt].shape, size['height'], feat_sizes[0]['height'],\n min_level, max_level))\n if feats[cnt].shape[w_id] != size['width']:\n raise ValueError(\n 'feats[{}] has shape {} but its width should be {}.'\n '(input_width: {}, min_level: {}, max_level: {}.)'.format(\n cnt, feats[cnt].shape, size['width'], feat_sizes[0]['width'],\n min_level, max_level))\n\n\ndef get_precision(strategy: str, mixed_precision: bool = False):\n \"\"\"Get the precision policy for a given strategy.\"\"\"\n if mixed_precision:\n if strategy == 'tpu':\n return 'mixed_bfloat16'\n\n if tf.config.experimental.list_physical_devices('GPU'):\n return 'mixed_float16'\n\n # TODO(fsx950223): Fix CPU float16 inference\n # https://github.com/google/automl/issues/504\n logging.warning('float16 is not supported for CPU, use float32 instead')\n return 'float32'\n\n return 'float32'\n\n\[email protected]\ndef float16_scope():\n \"\"\"Scope class for float16.\"\"\"\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Returns a custom getter that methods must be called under.\"\"\"\n cast_to_float16 = False\n requested_dtype = kwargs['dtype']\n if requested_dtype == tf.float16:\n kwargs['dtype'] = tf.float32\n cast_to_float16 = True\n var = getter(*args, **kwargs)\n if cast_to_float16:\n var = tf.cast(var, tf.float16)\n return var\n\n with tf.variable_scope('', custom_getter=_custom_getter) as varscope:\n yield varscope\n\n\ndef set_precision_policy(policy_name: Text = None, loss_scale: bool = False):\n \"\"\"Set precision policy according to the name.\n\n Args:\n policy_name: precision policy name, one of 'float32', 'mixed_float16',\n 'mixed_bfloat16', or None.\n loss_scale: whether to use loss scale (only for training).\n \"\"\"\n if not policy_name:\n return\n\n assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32')\n logging.info('use mixed precision policy name %s', policy_name)\n # TODO(tanmingxing): use tf.keras.layers.enable_v2_dtype_behavior() when it\n # available in stable TF release.\n from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top,g-direct-tensorflow-import\n base_layer_utils.enable_v2_dtype_behavior()\n # mixed_float16 training is not supported for now, so disable loss_scale.\n # float32 and mixed_bfloat16 do not need loss scale for training.\n if loss_scale:\n policy = tf2.keras.mixed_precision.experimental.Policy(policy_name)\n else:\n policy = tf2.keras.mixed_precision.experimental.Policy(\n policy_name, loss_scale=None)\n tf2.keras.mixed_precision.experimental.set_policy(policy)\n\n\ndef build_model_with_precision(pp, mm, ii, tt, *args, **kwargs):\n \"\"\"Build model with its inputs/params for a specified precision context.\n\n This is highly specific to this codebase, and not intended to be general API.\n Advanced users only. DO NOT use it if you don't know what it does.\n NOTE: short argument names are intended to avoid conficts with kwargs.\n\n Args:\n pp: A string, precision policy name, such as \"mixed_float16\".\n mm: A function, for rmodel builder.\n ii: A tensor, for model inputs.\n tt: A bool, If true, it is for training; otherwise, it is for eval.\n *args: A list of model arguments.\n **kwargs: A dict, extra model parameters.\n\n Returns:\n the output of mm model.\n \"\"\"\n if pp == 'mixed_bfloat16':\n set_precision_policy(pp)\n inputs = tf.cast(ii, tf.bfloat16)\n with tf.tpu.bfloat16_scope():\n outputs = mm(inputs, *args, **kwargs)\n set_precision_policy('float32')\n elif pp == 'mixed_float16':\n set_precision_policy(pp, loss_scale=tt)\n inputs = tf.cast(ii, tf.float16)\n with float16_scope():\n outputs = mm(inputs, *args, **kwargs)\n set_precision_policy('float32')\n elif not pp or pp == 'float32':\n outputs = mm(ii, *args, **kwargs)\n else:\n raise ValueError('Unknow precision name {}'.format(pp))\n\n # Users are responsible to convert the dtype of all outputs.\n return outputs\n", "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nA simple Grad-CAM script visualize the gradient of the last layer EfficientNet backbone.\nBase on https://arxiv.org/abs/1610.02391\n\"\"\"\n\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\n\nimport hparams_config\nimport inference\nimport utils\nfrom keras import efficientdet_keras\n\nimport matplotlib.cm as cm\n\nflags.DEFINE_string('image_path', None, 'Location of test image.')\nflags.DEFINE_string('output_dir', None, 'Directory of annotated output images.')\nflags.DEFINE_string('model_dir', None, 'Location of the checkpoint to run.')\nflags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name to use.')\nflags.DEFINE_string('hparams', '', 'Comma separated k=v pairs or a yaml file')\nflags.DEFINE_bool('debug', False, 'If true, run function in eager for debug.')\nflags.DEFINE_enum('gradient_type', 'cls', ['cls', 'box'], 'Gradient that should be visualized')\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n img = Image.open(FLAGS.image_path)\n imgs = [np.array(img)]\n # Create model config.\n config = hparams_config.get_efficientdet_config(FLAGS.model_name)\n config.is_training_bn = False\n # config.image_size = '640x640'\n # config.nms_configs.score_thresh = 0.01\n config.nms_configs.score_thresh = 0.4\n config.nms_configs.max_output_size = 100\n config.override(FLAGS.hparams)\n\n # Use 'mixed_float16' if running on GPUs.\n policy = tf.keras.mixed_precision.experimental.Policy('float32')\n tf.keras.mixed_precision.experimental.set_policy(policy)\n tf.config.experimental_run_functions_eagerly(FLAGS.debug)\n\n # Create model\n model = efficientdet_keras.EfficientDetNet(config=config)\n target_size = utils.parse_image_size(config.image_size)\n target_size = target_size + (3,)\n model_inputs = tf.keras.Input(shape=target_size)\n model(model_inputs, False)\n model.summary()\n\n # output layers detailed\n # for i in model.layers:\n # print(i.name, i.input, i.output)\n\n model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))\n\n # create new model to access intermediate layers\n effdet_model = tf.keras.Model(inputs=model.input, outputs=[model.get_layer(name='class_net').output,\n model.get_layer(name='box_net').output,\n model.backbone.layers[-3].output # last layer\n ])\n\n # is only used for pre- and post-processing methods\n effdet_methods = efficientdet_keras.EfficientDetModel(config=config)\n\n # input image preprocessing\n imgs = tf.convert_to_tensor(imgs)\n inputs, scales = effdet_methods._preprocessing(imgs, config.image_size, 'infer')\n\n with tf.GradientTape() as tape:\n # Compute activations of the last conv layer and make the tape watch it\n cls_outputs, box_outputs, efficientnet_last_layer = effdet_model(inputs, False)\n\n # save gradients\n grads = None\n if FLAGS.gradient_type == 'cls':\n grads = tape.gradient(cls_outputs, efficientnet_last_layer)\n elif FLAGS.gradient_type == 'box':\n grads = tape.gradient(box_outputs, efficientnet_last_layer)\n\n assert grads != None\n grad_cam(grads, efficientnet_last_layer[0], img, imgs[0], FLAGS.gradient_type)\n\n\n ### bounding box visualization ###\n boxes, scores, classes, valid_len = effdet_methods._postprocess(cls_outputs, box_outputs, scales)\n\n # Visualize results.\n for i, img in enumerate(imgs):\n length = valid_len[i]\n img = inference.visualize_image(\n img,\n boxes[i].numpy()[:length],\n classes[i].numpy().astype(np.int)[:length],\n scores[i].numpy()[:length],\n min_score_thresh=config.nms_configs.score_thresh,\n max_boxes_to_draw=config.nms_configs.max_output_size)\n output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg')\n Image.fromarray(img).save(output_image_path)\n print('writing annotated image to ', output_image_path)\n\n\ndef grad_cam(grads, last_layer, input_img: Image, img, type: str):\n ### calculate grad-cam ###\n # neuron importance weights a^c_k\n pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))\n # liner combination\n weighted_features = tf.multiply(pooled_grads, last_layer)\n sumed_weighted_features = tf.reduce_sum(weighted_features, axis=-1)\n # relu operation\n heatmap = tf.maximum(sumed_weighted_features, 0)\n\n ### heatmap visualization ###\n # normalize\n heatmap = heatmap.numpy()\n heatmap = heatmap / np.max(heatmap)\n # rescale heatmap to a range 0-255\n heatmap = np.uint(255 * heatmap)\n # get color map\n cmap = cm.get_cmap(\"jet\")\n # get colormap colors\n cmap_color = cmap(np.arange(cmap.N))\n # set alpha (zeros are transparent)\n alpha_channel = np.ones((cmap.N)) - 0.5\n alpha_channel[0] = 0\n cmap_color[:, -1] = alpha_channel\n # map heatmap values to RGBA value\n heatmap_colored = cmap_color[heatmap]\n # resize heatmap\n heatmap_img = tf.keras.preprocessing.image.array_to_img(heatmap_colored)\n heatmap_img = heatmap_img.resize((img.shape[1], img.shape[0]))\n # overlay heatmap and input image\n in_img = input_img.copy()\n in_img.paste(im=heatmap_img, box=(0, 0), mask=heatmap_img)\n\n # output image path\n img_name = FLAGS.image_path.replace('\\\\', '/')\n img_name = img_name[img_name.rindex('/') + 1:]\n img_name = img_name[:img_name.rindex('.')] + '-' + type + img_name[img_name.rindex('.'):]\n output_image_path = os.path.join(FLAGS.output_dir, img_name)\n\n in_img.save(output_image_path)\n print('Writing Grad-CAM image ({}) to {}'.format(type, output_image_path))\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('image_path')\n flags.mark_flag_as_required('output_dir')\n flags.mark_flag_as_required('model_dir')\n logging.set_verbosity(logging.WARNING)\n app.run(main)\n" ]
[ [ "tensorflow.compat.v1.io.gfile.exists", "tensorflow.compat.v1.io.gfile.rename", "tensorflow.compat.v1.io.gfile.makedirs", "tensorflow.compat.v1.random.uniform", "tensorflow.compat.v1.config.experimental.list_physical_devices", "tensorflow.compat.v2.keras.mixed_precision.experimental.Policy", "tensorflow.python.keras.engine.base_layer_utils.enable_v2_dtype_behavior", "tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.io.gfile.glob", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.math.square", "tensorflow.python.tpu.tpu_function.get_tpu_context", "tensorflow.compat.v1.nn.relu6", "tensorflow.compat.v1.math.equal", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v2.summary.record_if", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.io.gfile.rmtree", "tensorflow.compat.v1.sigmoid", "tensorflow.compat.v2.summary.image", "tensorflow.compat.v1.distribute.get_replica_context", "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.ones_initializer", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.io.gfile.copy", "tensorflow.compat.v1.name_scope", "tensorflow.compat.v1.initializers.variance_scaling", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v2.summary.create_file_writer", "tensorflow.compat.v1.nn.swish", "tensorflow.compat.v1.floor", "tensorflow.compat.v1.io.gfile.GFile", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.nn.relu", "tensorflow.compat.v2.summary.scalar", "tensorflow.compat.v1.math.softplus", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.tpu.bfloat16_scope", "tensorflow.compat.v1.Variable", "tensorflow.compat.v1.summary.all_v2_summary_ops", "tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v1.train.load_checkpoint", "tensorflow.compat.v1.tpu.cross_replica_sum" ], [ "tensorflow.convert_to_tensor", "tensorflow.multiply", "tensorflow.train.latest_checkpoint", "tensorflow.keras.Input", "tensorflow.reduce_mean", "tensorflow.keras.mixed_precision.experimental.Policy", "tensorflow.keras.mixed_precision.experimental.set_policy", "tensorflow.reduce_sum", "tensorflow.maximum", "numpy.uint", "numpy.arange", "numpy.ones", "numpy.max", "tensorflow.config.experimental_run_functions_eagerly", "matplotlib.cm.get_cmap", "numpy.array", "tensorflow.keras.preprocessing.image.array_to_img", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
Tchiik/CompViz
[ "b0a94cbf360e04cc4bcac261ae435eff462aa625" ]
[ "custom_2.py" ]
[ "\"\"\"\nMask R-CNN\nTrain on the toy bottle dataset and implement color splash effect.\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n------------------------------------------------------------\nUsage: import the module (see Jupyter notebooks for examples), or run from\n the command line as such:\n # Train a new model starting from pre-trained COCO weights\n python3 bottle.py train --dataset=/home/datascience/Workspace/maskRcnn/Mask_RCNN-master/samples/bottle/dataset --weights=coco\n # Resume training a model that you had trained earlier\n python3 bottle.py train --dataset=/path/to/bottle/dataset --weights=last\n # Train a new model starting from ImageNet weights\n python3 bottle.py train --dataset=/path/to/bottle/dataset --weights=imagenet\n # Apply color splash to an image\n python3 bottle.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>\n # Apply color splash to video using the last weights you trained\n python3 bottle.py splash --weights=last --video=<URL or path to file>\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\nimport cv2\nfrom mrcnn.visualize import display_instances\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = '/content/drive/My Drive/CompViz/'\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\nnewCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_scratch_0014.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Configurations\n############################################################\n\n\nclass CustomConfig(Config):\n \"\"\"Configuration for training on the toy dataset.\n Derives from the base Config class and overrides some values.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"damage\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 2\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 5 # Background + toy\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = 200\n\n # Skip detections with < 90% confidence\n DETECTION_MIN_CONFIDENCE = 0.9\n\n\n############################################################\n# Dataset\n############################################################\n\nclass CustomDataset(utils.Dataset):\n\n def load_custom(self, dataset_dir, subset):\n \"\"\"Load a subset of the bottle dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes. We have only one class to add.\n self.add_class(\"damage\", 1, \"bumper_scratch\")\n self.add_class(\"damage\", 2, \"door_scratch\")\n self.add_class(\"damage\", 3, \"bumper_dent\")\n self.add_class(\"damage\", 4, \"door_dent\")\n self.add_class(\"damage\", 5, \"broken_headlight\")\n\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir + '/' + subset)\n\n # Load annotations\n # VGG Image Annotator saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n annotations1 = json.load(open(os.path.join(dataset_dir + '/' + \"via_region_data.json\"),'r',encoding=\"utf8\",errors='ignore'))\n # print(annotations1)\n annotations = list(annotations1.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n \n # Add images\n for a in annotations:\n # print(a)\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. There are stores in the\n # shape_attributes (see json format above)\n polygons = [r['shape_attributes'] for r in a['regions'].values()] \n objects = [s['region_attributes'] for s in a['regions'].values()] #s['region_attributes'] ['name'] for s in a['regions']\n print(\"damage:\",objects)\n \n num_ids = [n['damage'] for n in objects]\n dic = {\"door_scratch\":1, \"bumper_scratch\":2, \"door_dent\":3, \"bumper_dent\":4,\"broken_headlight\":5}\n num_ids = [dic.get(n, n) for n in num_ids]\n \n # num_ids=[]\n # for n in objects:\n # #print(n)\n # #print(type(n))\n # try:\n # if n.key()=='bumper_scratch':\n # num_ids.append(1)\n # elif n.key()=='door_scratch':\n # num_ids.append(2)\n # elif n.key()=='bumper_dent':\n # num_ids.append(3)\n # elif n.key()=='door_dent':\n # num_ids.append(4)\n # elif n.key()=='broken_headlight':\n # num_ids.append(5)\n # except:\n # pass\n # \n\n # name_dict = {\"bumper_scratch\": 1,\"door_scratch\": 2,\"bumper_dent\": 3,\"door_dent\": 4,\"broken_headlight\": 5 }\n # key = tuple(name_dict)\n # num_ids = [name_dict[a] for a in objects]\n \n # num_ids = [int(n['Event']) for n in objects]\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n print(\"num_ids\",num_ids)\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"damage\", ## for a single class just add the name here\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons,\n num_ids=num_ids)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a bottle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"damage\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n if info[\"source\"] != \"damage\":\n return super(self.__class__, self).load_mask(image_id)\n num_ids = info['num_ids']\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n \trr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n \tmask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n # Map class names to class IDs.\n num_ids = np.array(num_ids, dtype=np.int32)\n return mask, num_ids\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"object\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n\ndef train(model):\n \"\"\"Train the model.\"\"\"\n # Training dataset.\n dataset_train = CustomDataset()\n dataset_train.load_custom(args.dataset, \"train\")\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = CustomDataset()\n dataset_val.load_custom(args.dataset, \"val\")\n dataset_val.prepare()\n\n # *** This training schedule is an example. Update to your needs ***\n # Since we're using a very small dataset, and starting from\n # COCO trained weights, we don't need to train too long. Also,\n # no need to train all layers, just the heads should do it.\n print(\"Training network heads\")\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=10,\n layers='heads')\n\n\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n # Copy color pixels from the original color image where mask is set\n if mask.shape[0] > 0:\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray\n return splash\n\n\ndef detect_and_color_splash(model, image_path=None, video_path=None):\n assert image_path or video_path\n\n # Image or video?\n if image_path:\n # Run model detection and generate the color splash effect\n print(\"Running on {}\".format(args.image))\n # Read image\n image = skimage.io.imread(args.image)\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # Save output\n file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n skimage.io.imsave(file_name, splash)\n elif video_path:\n import cv2\n # Video capture\n vcapture = cv2.VideoCapture(video_path)\n width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = vcapture.get(cv2.CAP_PROP_FPS)\n\n # Define codec and create video writer\n file_name = \"splash_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n vwriter = cv2.VideoWriter(file_name,\n cv2.VideoWriter_fourcc(*'MJPG'),\n fps, (width, height))\n\n count = 0\n success = True\n while success:\n print(\"frame: \", count)\n # Read next image\n success, image = vcapture.read()\n if success:\n # OpenCV returns images as BGR, convert to RGB\n image = image[..., ::-1]\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # RGB -> BGR to save image to video\n splash = splash[..., ::-1]\n # Add image to video writer\n vwriter.write(splash)\n count += 1\n vwriter.release()\n print(\"Saved to \", file_name)\n\n############################################################\n# Training\n############################################################\n\nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect custom class.')\n parser.add_argument(\"command\",\n metavar=\"<command>\",\n help=\"'train' or 'splash'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/path/to/custom/dataset/\",\n help='Directory of the custom dataset')\n parser.add_argument('--weights', required=True,\n metavar=\"/path/to/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/)')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image to apply the color splash effect on')\n parser.add_argument('--video', required=False,\n metavar=\"path or URL to video\",\n help='Video to apply the color splash effect on')\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n elif args.command == \"splash\":\n assert args.image or args.video,\\\n \"Provide --image or --video to apply color splash\"\n\n print(\"Weights: \", args.weights)\n print(\"Dataset: \", args.dataset)\n print(\"Logs: \", args.logs)\n\n # Configurations\n if args.command == \"train\":\n config = CustomConfig()\n else:\n class InferenceConfig(CustomConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n config = InferenceConfig()\n config.display()\n\n # Create model\n if args.command == \"train\":\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.logs)\n else:\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"newcoco\":\n # Find last trained weights\n weights_path = newCOCO_WEIGHTS_PATH\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()[1]\n elif args.weights.lower() == \"imagenet\":\n # Start from ImageNet trained weights\n weights_path = model.get_imagenet_weights()\n else:\n weights_path = args.weights\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n # Exclude the last layers because they require a matching\n # number of classes\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n else:\n model.load_weights(weights_path, by_name=True)\n\n # Train or evaluate\n if args.command == \"train\":\n train(model)\n elif args.command == \"splash\":\n detect_and_color_splash(model, image_path=args.image,\n video_path=args.video)\n else:\n print(\"'{}' is not recognized. \"\n \"Use 'train' or 'splash'\".format(args.command))" ]
[ [ "numpy.array", "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CubicZebra/anomalydetect
[ "ec571b47569f491f72abc9736097fb30f9289f46" ]
[ "core/nearest.py" ]
[ "import numpy as np\nfrom basic.types import vector, matrix\nfrom typing import Optional\nfrom basic.tests import dt\n\n\n# print(dt)\n# print(dt.mean(axis=0))\n\n\n_ptr = np.array([1, 1, 4, 4, 8])\n# print(dt - _ptr)\n\n\ndef _is_broadcastable(x: matrix, _x: vector) -> Optional[TypeError]:\n if x.shape[1] != _x.shape[0]:\n raise TypeError(r'arg {} is not broadcastable to target matrix'.format(_x))\n\n\ndef _euclidean_ord(x: matrix, _x: vector) -> vector:\n _is_broadcastable(x, _x)\n return np.linalg.norm(x - _x, axis=1, ord=2).argsort(kind='mergesort')\n\n\ndef to_a_table(x: matrix, tag: vector, k: Optional[int] = None):\n idx_tab = np.array([_euclidean_ord(x, item) for item in x])\n _ = np.array([item for item in 'abcdefghijklmnopqrst']) # labels for test\n # for v in range(len(idx_tab)):\n # print(_[idx_tab[v]])\n # k -> np.unique(), 二值化的\n cls, counts = np.unique(tag, return_counts=True)\n proportions = counts/counts.sum()\n np.where(tag == '2')\n # print(np.log(v[0]) - np.log(v[1])) # 计算加速: 当数据量大时,最好对k的值(最大)作出限制\n\n\n# to_a_table(dt, ['a', 'a', 'b', 'b', 'b'])\n\n\n# v = _euclidean_dis(dt, _ptr)\n# print(v)\n# print(np.argsort(v, kind='mergesort'))\n\n\nfrom multiprocessing.pool import ThreadPool\nimport time\n\n\ndef print_hello(x):\n print(r'hello, {}'.format(x))\n time.sleep(2)\n return 'name_' + x\n\n\nwith ThreadPool(2) as p:\n res = p.map(print_hello, ['aa', 'bb'])\n\nprint(res)\n\n\nif __name__ == '__main__':\n # with ThreadPool() as p:\n # res = p.map(print_hello, ['aa', 'bb'])\n #\n # print(res)\n pass\n" ]
[ [ "numpy.array", "numpy.where", "numpy.linalg.norm", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Liang-ZX/Stereo-Mask-RCNN
[ "c7c53062eacca4511fd4d091bea41cd7b5cf100d" ]
[ "demo.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n\n# Modified by Peiliang Li for Stereo RCNN demo\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport shutil\nimport time\nimport cv2\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport math as m\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.roi_layers import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv, kpts_transform_inv, border_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.stereo_rcnn.resnet import resnet\nfrom model.utils import kitti_utils\nfrom model.utils import vis_3d_utils as vis_utils\nfrom model.utils import box_estimator as box_estimator\nfrom model.dense_align import dense_align\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Test the Stereo R-CNN network')\n\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"models_stereo\",\n type=str)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=12, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=6477, type=int)\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n np.random.seed(cfg.RNG_SEED)\n\n input_dir = args.load_dir + \"/\"\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'stereo_rcnn_{}_{}.pth'.format(args.checkepoch, args.checkpoint))\n kitti_classes = np.asarray(['__background__', 'Car'])\n\n # initilize the network here.\n stereoRCNN = resnet(kitti_classes, 101, pretrained=False)\n stereoRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n stereoRCNN.load_state_dict(checkpoint['model'])\n print('load model successfully!')\n\n with torch.no_grad():\n # initilize the tensor holder here.\n im_left_data = Variable(torch.FloatTensor(1).cuda())\n im_right_data = Variable(torch.FloatTensor(1).cuda())\n im_info = Variable(torch.FloatTensor(1).cuda())\n num_boxes = Variable(torch.LongTensor(1).cuda())\n gt_boxes = Variable(torch.FloatTensor(1).cuda())\n\n stereoRCNN.cuda()\n\n eval_thresh = 0.05\n vis_thresh = 0.7\n\n stereoRCNN.eval()\n \n # read data\n img_l_path = 'demo/left.png'\n img_r_path = 'demo/right.png'\n\n img_left = cv2.imread(img_l_path)\n img_right = cv2.imread(img_r_path)\n\n # rgb -> bgr\n img_left = img_left.astype(np.float32, copy=False)\n img_right = img_right.astype(np.float32, copy=False)\n\n img_left -= cfg.PIXEL_MEANS\n img_right -= cfg.PIXEL_MEANS\n\n im_shape = img_left.shape\n im_size_min = np.min(im_shape[0:2])\n im_scale = float(cfg.TRAIN.SCALES[0]) / float(im_size_min)\n\n img_left = cv2.resize(img_left, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n img_right = cv2.resize(img_right, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n \n info = np.array([[img_left.shape[0], img_left.shape[1], \\\n im_scale]], dtype=np.float32)\n \n img_left = torch.from_numpy(img_left)\n img_left = img_left.permute(2, 0, 1).unsqueeze(0).contiguous()\n\n img_right = torch.from_numpy(img_right)\n img_right = img_right.permute(2, 0, 1).unsqueeze(0).contiguous()\n\n info = torch.from_numpy(info)\n\n im_left_data.data.resize_(img_left.size()).copy_(img_left)\n im_right_data.data.resize_(img_right.size()).copy_(img_right)\n im_info.data.resize_(info.size()).copy_(info)\n \n det_tic = time.time()\n rois_left, rois_right, cls_prob, bbox_pred, bbox_pred_dim, kpts_prob,\\\n left_prob, right_prob, rpn_loss_cls, rpn_loss_box_left_right,\\\n RCNN_loss_cls, RCNN_loss_bbox, RCNN_loss_dim_orien, RCNN_loss_kpts, rois_label =\\\n stereoRCNN(im_left_data, im_right_data, im_info, gt_boxes, gt_boxes,\\\n gt_boxes, gt_boxes, gt_boxes, num_boxes)\n \n scores = cls_prob.data\n boxes_left = rois_left.data[:, :, 1:5]\n boxes_right = rois_right.data[:, :, 1:5]\n\n bbox_pred = bbox_pred.data\n box_delta_left = bbox_pred.new(bbox_pred.size()[1], 4*len(kitti_classes)).zero_()\n box_delta_right = bbox_pred.new(bbox_pred.size()[1], 4*len(kitti_classes)).zero_()\n\n for keep_inx in range(box_delta_left.size()[0]):\n box_delta_left[keep_inx, 0::4] = bbox_pred[0,keep_inx,0::6]\n box_delta_left[keep_inx, 1::4] = bbox_pred[0,keep_inx,1::6]\n box_delta_left[keep_inx, 2::4] = bbox_pred[0,keep_inx,2::6]\n box_delta_left[keep_inx, 3::4] = bbox_pred[0,keep_inx,3::6]\n\n box_delta_right[keep_inx, 0::4] = bbox_pred[0,keep_inx,4::6]\n box_delta_right[keep_inx, 1::4] = bbox_pred[0,keep_inx,1::6]\n box_delta_right[keep_inx, 2::4] = bbox_pred[0,keep_inx,5::6]\n box_delta_right[keep_inx, 3::4] = bbox_pred[0,keep_inx,3::6]\n\n box_delta_left = box_delta_left.view(-1,4)\n box_delta_right = box_delta_right.view(-1,4)\n\n dim_orien = bbox_pred_dim.data\n dim_orien = dim_orien.view(-1,5)\n\n kpts_prob = kpts_prob.data\n kpts_prob = kpts_prob.view(-1,4*cfg.KPTS_GRID)\n max_prob, kpts_delta = torch.max(kpts_prob,1)\n\n left_prob = left_prob.data\n left_prob = left_prob.view(-1,cfg.KPTS_GRID)\n _, left_delta = torch.max(left_prob,1)\n\n right_prob = right_prob.data\n right_prob = right_prob.view(-1,cfg.KPTS_GRID)\n _, right_delta = torch.max(right_prob,1)\n\n box_delta_left = box_delta_left * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_delta_right = box_delta_right * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n dim_orien = dim_orien * torch.FloatTensor(cfg.TRAIN.DIM_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.DIM_NORMALIZE_MEANS).cuda()\n\n\n box_delta_left = box_delta_left.view(1,-1,4*len(kitti_classes))\n box_delta_right = box_delta_right.view(1, -1,4*len(kitti_classes))\n dim_orien = dim_orien.view(1, -1, 5*len(kitti_classes))\n kpts_delta = kpts_delta.view(1, -1, 1)\n left_delta = left_delta.view(1, -1, 1)\n right_delta = right_delta.view(1, -1, 1)\n max_prob = max_prob.view(1, -1, 1)\n\n pred_boxes_left = bbox_transform_inv(boxes_left, box_delta_left, 1)\n pred_boxes_right = bbox_transform_inv(boxes_right, box_delta_right, 1)\n pred_kpts, kpts_type = kpts_transform_inv(boxes_left, kpts_delta,cfg.KPTS_GRID)\n pred_left = border_transform_inv(boxes_left, left_delta,cfg.KPTS_GRID)\n pred_right = border_transform_inv(boxes_left, right_delta,cfg.KPTS_GRID)\n\n pred_boxes_left = clip_boxes(pred_boxes_left, im_info.data, 1)\n pred_boxes_right = clip_boxes(pred_boxes_right, im_info.data, 1)\n\n pred_boxes_left /= im_info[0,2].data\n pred_boxes_right /= im_info[0,2].data\n pred_kpts /= im_info[0,2].data\n pred_left /= im_info[0,2].data\n pred_right /= im_info[0,2].data\n\n scores = scores.squeeze()\n pred_boxes_left = pred_boxes_left.squeeze()\n pred_boxes_right = pred_boxes_right.squeeze()\n\n pred_kpts = torch.cat((pred_kpts, kpts_type, max_prob, pred_left, pred_right),2)\n pred_kpts = pred_kpts.squeeze()\n dim_orien = dim_orien.squeeze()\n\n det_toc = time.time()\n detect_time = det_toc - det_tic\n\n calib = kitti_utils.read_obj_calibration('demo/calib.txt')\n\n im2show_left = np.copy(cv2.imread(img_l_path))\n im2show_right = np.copy(cv2.imread(img_r_path))\n \n pointcloud = kitti_utils.get_point_cloud('demo/lidar.bin', calib)\n im_box = vis_utils.vis_lidar_in_bev(pointcloud, width=im2show_left.shape[0]*2)\n\n for j in xrange(1, len(kitti_classes)):\n inds = torch.nonzero(scores[:,j] > eval_thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:,j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n\n cls_boxes_left = pred_boxes_left[inds][:, j * 4:(j + 1) * 4]\n cls_boxes_right = pred_boxes_right[inds][:, j * 4:(j + 1) * 4]\n cls_dim_orien = dim_orien[inds][:, j * 5:(j + 1) * 5]\n \n cls_kpts = pred_kpts[inds]\n\n cls_dets_left = torch.cat((cls_boxes_left, cls_scores.unsqueeze(1)), 1)\n cls_dets_right = torch.cat((cls_boxes_right, cls_scores.unsqueeze(1)), 1)\n\n cls_dets_left = cls_dets_left[order]\n cls_dets_right = cls_dets_right[order]\n cls_dim_orien = cls_dim_orien[order]\n cls_kpts = cls_kpts[order] \n\n keep = nms(cls_boxes_left[order, :], cls_scores[order], cfg.TEST.NMS)\n keep = keep.view(-1).long()\n cls_dets_left = cls_dets_left[keep]\n cls_dets_right = cls_dets_right[keep]\n cls_dim_orien = cls_dim_orien[keep]\n cls_kpts = cls_kpts[keep]\n\n # optional operation, can check the regressed borderline keypoint using 2D box inference\n infered_kpts = kitti_utils.infer_boundary(im2show_left.shape, cls_dets_left.cpu().numpy())\n infered_kpts = torch.from_numpy(infered_kpts).type_as(cls_dets_left)\n for detect_idx in range(cls_dets_left.size()[0]):\n if cls_kpts[detect_idx,4] - cls_kpts[detect_idx,3] < \\\n 0.5*(infered_kpts[detect_idx,1]-infered_kpts[detect_idx,0]):\n cls_kpts[detect_idx,3:5] = infered_kpts[detect_idx]\n\n im2show_left = vis_detections(im2show_left, kitti_classes[j], \\\n cls_dets_left.cpu().numpy(), vis_thresh, cls_kpts.cpu().numpy())\n im2show_right = vis_detections(im2show_right, kitti_classes[j], \\\n cls_dets_right.cpu().numpy(), vis_thresh) \n\n # read intrinsic\n f = calib.p2[0,0]\n cx, cy = calib.p2[0,2], calib.p2[1,2]\n bl = (calib.p2[0,3] - calib.p3[0,3])/f\n\n boxes_all = cls_dets_left.new(0,5)\n kpts_all = cls_dets_left.new(0,5)\n poses_all = cls_dets_left.new(0,8)\n\n solve_tic = time.time()\n for detect_idx in range(cls_dets_left.size()[0]):\n if cls_dets_left[detect_idx, -1] > eval_thresh:\n box_left = cls_dets_left[detect_idx,0:4].cpu().numpy() # based on origin image\n box_right = cls_dets_right[detect_idx,0:4].cpu().numpy() \n kpts_u = cls_kpts[detect_idx,0]\n dim = cls_dim_orien[detect_idx,0:3].cpu().numpy()\n sin_alpha = cls_dim_orien[detect_idx,3]\n cos_alpha = cls_dim_orien[detect_idx,4]\n alpha = m.atan2(sin_alpha, cos_alpha)\n status, state = box_estimator.solve_x_y_z_theta_from_kpt(im2show_left.shape, calib, alpha, \\\n dim, box_left, box_right, cls_kpts[detect_idx].cpu().numpy())\n if status > 0: # not faild\n poses = im_left_data.data.new(8).zero_()\n xyz = np.array([state[0], state[1], state[2]])\n theta = state[3]\n poses[0], poses[1], poses[2], poses[3], poses[4], poses[5], poses[6], poses[7] = \\\n xyz[0], xyz[1], xyz[2], float(dim[0]), float(dim[1]), float(dim[2]), theta, alpha\n\n boxes_all = torch.cat((boxes_all,cls_dets_left[detect_idx,0:5].unsqueeze(0)),0)\n kpts_all = torch.cat((kpts_all,cls_kpts[detect_idx].unsqueeze(0)),0)\n poses_all = torch.cat((poses_all,poses.unsqueeze(0)),0)\n \n if boxes_all.dim() > 0:\n # solve disparity by dense alignment (enlarged image)\n succ, dis_final = dense_align.align_parallel(calib, im_info.data[0,2], \\\n im_left_data.data, im_right_data.data, \\\n boxes_all[:,0:4], kpts_all, poses_all[:,0:7])\n \n # do 3D rectify using the aligned disparity\n for solved_idx in range(succ.size(0)):\n if succ[solved_idx] > 0: # succ\n box_left = boxes_all[solved_idx,0:4].cpu().numpy()\n score = boxes_all[solved_idx,4].cpu().numpy()\n dim = poses_all[solved_idx,3:6].cpu().numpy()\n state_rect, z = box_estimator.solve_x_y_theta_from_kpt(im2show_left.shape, calib, \\\n poses_all[solved_idx,7].cpu().numpy(), dim, box_left, \\\n dis_final[solved_idx].cpu().numpy(), kpts_all[solved_idx].cpu().numpy())\n xyz = np.array([state_rect[0], state_rect[1], z])\n theta = state_rect[2]\n\n if score > vis_thresh:\n im_box = vis_utils.vis_box_in_bev(im_box, xyz, dim, theta, width=im2show_left.shape[0]*2)\n im2show_left = vis_utils.vis_single_box_in_img(im2show_left, calib, xyz, dim, theta)\n\n solve_time = time.time() - solve_tic\n\n sys.stdout.write('demo mode (Press Esc to exit!) \\r'\\\n .format(detect_time, solve_time))\n\n im2show = np.concatenate((im2show_left, im2show_right), axis=0)\n im2show = np.concatenate((im2show, im_box), axis=1)\n # cv2.imshow('result', im2show)\n cv2.imwrite('demo/result.png', im2show)\n\n k = cv2.waitKey(-1)\n if k == 27: # Esc key to stop\n print('exit!')\n sys.exit()\n\n\n\n\n\n" ]
[ [ "torch.LongTensor", "torch.max", "numpy.random.seed", "numpy.min", "numpy.asarray", "torch.load", "torch.cat", "torch.from_numpy", "numpy.concatenate", "torch.no_grad", "torch.sort", "torch.FloatTensor", "torch.nonzero", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fshart/aesara
[ "1ddf96a7b8e8503fb8773b09c3ca77483fd884c4", "1ddf96a7b8e8503fb8773b09c3ca77483fd884c4", "1ddf96a7b8e8503fb8773b09c3ca77483fd884c4", "1ddf96a7b8e8503fb8773b09c3ca77483fd884c4", "1ddf96a7b8e8503fb8773b09c3ca77483fd884c4", "1ddf96a7b8e8503fb8773b09c3ca77483fd884c4" ]
[ "aesara/misc/safe_asarray.py", "aesara/tensor/nnet/conv.py", "aesara/tensor/random/type.py", "aesara/graph/unify.py", "tests/tensor/test_casting.py", "tests/tensor/random/test_utils.py" ]
[ "\"\"\"\nHelper function to safely convert an array to a new data type.\n\"\"\"\n\n\nimport numpy as np\n\nfrom aesara.configdefaults import config\n\n\n__docformat__ = \"restructuredtext en\"\n\n\ndef _asarray(a, dtype, order=None):\n \"\"\"Convert the input to a Numpy array.\n\n This function is almost identical to ``numpy.asarray``, but it should be\n used instead of its numpy counterpart when a data type is provided in\n order to perform type conversion if required.\n The reason is that ``numpy.asarray`` may not actually update the array's\n data type to the user-provided type. For more information see ticket\n http://projects.scipy.org/numpy/ticket/870.\n\n In that case, we check that both dtype have the same string\n description (byte order, basic type, and number of bytes), and\n return a view with the desired dtype.\n\n This function's name starts with a '_' to indicate that it is meant to be\n used internally. It is imported so as to be available directly through\n _asarray\n \"\"\"\n if str(dtype) == \"floatX\":\n dtype = config.floatX\n dtype = np.dtype(dtype) # Convert into dtype object.\n rval = np.asarray(a, dtype=dtype, order=order)\n # Note that dtype comparison must be done by comparing their `num`\n # attribute. One cannot assume that two identical data types are pointers\n # towards the same object (e.g. under Windows this appears not to be the\n # case).\n if rval.dtype.num != dtype.num:\n # Type mismatch between the data type we asked for, and the one\n # returned by numpy.asarray.\n # If both types have the same string description (byte order, basic\n # type, and number of bytes), then it is safe to return a view.\n if dtype.str == rval.dtype.str:\n # Silent fix.\n return rval.view(dtype=dtype)\n else:\n # Unexpected mismatch: better know what is going on!\n raise TypeError(\n \"numpy.array did not return the data type we \"\n f\"asked for ({dtype} {dtype.str} #{dtype.num}), instead it returned type \"\n f\"{rval.dtype} {rval.str} #{rval.dtype.num}: function \"\n \"_asarray may need to be modified to handle this \"\n \"data type.\"\n )\n else:\n return rval\n", "\"\"\"\nContains an Op for convolving input images with a set of filters. This was\ndeveloped especially for Convolutional Neural Networks.\n\nFor related ops, including downsampling and subsampling, see\ntensor.signal and tensor.signal.pool.\n\nSee especially conv2d().\n\"\"\"\n\n\nimport logging\nimport warnings\n\nimport numpy as np\nfrom scipy.signal.signaltools import _bvalfromboundary, _valfrommode\nfrom scipy.signal.sigtools import _convolve2d\n\nimport aesara\nfrom aesara.graph.basic import Apply\nfrom aesara.graph.op import OpenMPOp\nfrom aesara.tensor import blas\nfrom aesara.tensor.basic import (\n as_tensor_variable,\n get_scalar_constant_value,\n patternbroadcast,\n)\nfrom aesara.tensor.exceptions import NotScalarConstantError\nfrom aesara.tensor.nnet.abstract_conv import get_conv_output_shape, get_conv_shape_1axis\nfrom aesara.tensor.type import discrete_dtypes, tensor\n\n\n__docformat__ = \"restructuredtext en\"\n_logger = logging.getLogger(\"aesara.tensor.nnet.conv\")\n\n\ndef conv2d(\n input,\n filters,\n image_shape=None,\n filter_shape=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n **kargs,\n):\n \"\"\"\n Deprecated, old conv2d interface.\n This function will build the symbolic graph for convolving a stack of\n input images with a set of filters. The implementation is modelled after\n Convolutional Neural Networks (CNN). It is simply a wrapper to the ConvOp\n but provides a much cleaner interface.\n\n Parameters\n ----------\n input : symbolic 4D tensor\n Mini-batch of feature map stacks, of shape\n (batch size, stack size, nb row, nb col)\n see the optional parameter image_shape\n filters: symbolic 4D tensor\n Set of filters used in CNN layer of shape\n (nb filters, stack size, nb row, nb col)\n see the optional parameter filter_shape\n border_mode : {'valid', 'full'}\n 'valid'only apply filter to complete patches of the image. Generates\n output of shape: image_shape - filter_shape + 1.\n 'full' zero-pads image to multiple of filter shape to generate output\n of shape: image_shape + filter_shape - 1.\n subsample: tuple of len 2\n Factor by which to subsample the output. Also called strides elsewhere.\n image_shape: None, tuple/list of len 4 of int, None or Constant variable\n The shape of the input parameter.\n Optional, used for optimization like loop unrolling\n You can put None for any element of the list to tell that this element\n is not constant.\n filter_shape : None, tuple/list of len 4 of int, None or Constant variable\n Optional, used for optimization like loop unrolling\n You can put None for any element of the list\n to tell that this element is not constant.\n kwargs\n Kwargs are passed onto ConvOp. Can be used to set the following:\n unroll_batch, unroll_kern, unroll_patch, openmp (see ConvOp doc).\n\n openmp: By default have the same value as\n config.openmp. For small image, filter,\n batch size, nkern and stack size, it can be\n faster to disable manually openmp. A fast and\n incomplete test show that with image size\n 6x6, filter size 4x4, batch size==1,\n n kern==1 and stack size==1, it is faster\n to disable it in valid mode. But if we\n grow the batch size to 10, it is faster\n with openmp on a core 2 duo.\n\n Returns\n -------\n symbolic 4D tensor\n Set of feature maps generated by convolutional layer. Tensor is\n of shape (batch size, nb filters, output row, output col).\n\n \"\"\"\n\n warnings.warn(\n \"aesara.tensor.nnet.conv.conv2d is deprecated.\"\n \" Use aesara.tensor.nnet.conv2d instead.\"\n )\n\n # accept Constant value for image_shape and filter_shape.\n if image_shape is not None:\n image_shape = list(image_shape)\n for i in range(len(image_shape)):\n if image_shape[i] is not None:\n try:\n image_shape[i] = get_scalar_constant_value(\n as_tensor_variable(image_shape[i])\n )\n except NotScalarConstantError:\n raise NotScalarConstantError(\n \"The convolution need that the shape\"\n \" information are constant values. We got\"\n \" {image_shape[i]} for the image_shape parameter\"\n )\n assert image_shape[i].dtype in discrete_dtypes\n image_shape[i] = int(image_shape[i])\n if filter_shape is not None:\n filter_shape = list(filter_shape)\n for i in range(len(filter_shape)):\n if filter_shape[i] is not None:\n try:\n filter_shape[i] = get_scalar_constant_value(\n as_tensor_variable(filter_shape[i])\n )\n except NotScalarConstantError:\n raise NotScalarConstantError(\n \"The convolution need that the shape\"\n \" information are constant values. We got\"\n \" {filter_shape[i]} for the filter_shape \"\n \"parameter\"\n )\n assert filter_shape[i].dtype in discrete_dtypes\n filter_shape[i] = int(filter_shape[i])\n\n if image_shape and filter_shape:\n try:\n if image_shape[1] is not None and filter_shape[1] is not None:\n assert image_shape[1] == filter_shape[1]\n except Exception:\n print(\"image \", image_shape, \" filters \", filter_shape)\n raise\n\n if filter_shape is not None:\n nkern = filter_shape[0]\n kshp = filter_shape[2:]\n else:\n nkern, kshp = None, None\n\n if image_shape is not None:\n bsize = image_shape[0]\n imshp = image_shape[1:]\n else:\n bsize, imshp = None, None\n\n op = ConvOp(\n output_mode=border_mode,\n dx=subsample[0],\n dy=subsample[1],\n imshp=imshp,\n kshp=kshp,\n nkern=nkern,\n bsize=bsize,\n **kargs,\n )\n\n return op(input, filters)\n\n\nclass ConvOp(OpenMPOp):\n r\"\"\"\n This Op serves a dual purpose: it can implement a vanilla 2D convolution\n (as taught in any signal processing class) or implement the\n convolutional layers found in Convolutional Neural Networks.\n\n In this setting, a set of 3D images is convolved with a set of 3D kernels,\n with the particularity that their leading dimensions are of equal length.\n Vanilla 2D convolution is treated as a special case of this.\n\n The input parameter represents a mini-batch of multiple images. Its shape is:\n batch size x num. input feature maps x image height x image width\n\n The kernel parameter represents a set of 3D kernels. Its shape is:\n number of filters x num. input images x filter height x filter width\n\n The output of ConvOp is a 4D tensor, generated as follows:\n output[b,k,:,:] = \\sum_i input[b,i,:,:] * filter[k,i,:,:] \\forall b,k\n where b is the mini-batch index, k the filter index and * is the\n convolution operator.\n\n The constructor initializes a ConvOp with given output_mode (full/valid).\n All other parameters are optional and are only used to generate more\n optimized c code, or to enable graph optimizers to optimally replace the\n ConvOp.\n\n NOTES ON OPTIMIZATION:\n There are two types of optimization. The first is the selection of the\n fastest algo when bsize and nkern are provided with imshp and kshp.\n By default we try to select the fastest version. You can specify it\n with the unroll_batch, unroll_kern, and unroll_patch parameter.\n\n The second type of optimization is hardcoding some dimensions into the\n code when all shape are know.\n This make a significant difference for the 'full' output_mode.\n\n Sometimes, the fastest implementation on x86-64 uses\n {unroll_batch=4, unroll_kern=4, unroll_patch=False}\n with all other shape parameters being provided.\n\n For optimizing other architectures, see:\n Kazushige Goto and Robert A. Van De Geijn, Anatomy of High-Performance\n Matrix Multiplication, (mr x nr). ACM Transactions on Mathematical\n Software, May 2008.\n Figure 12: (mr x nr). For x86 use 2x4, itanium 8x8, etc.\n\n Parameters\n ----------\n output_mode : {'valid', 'full'}\n 'valid' gives an output smaller then the image.\n 'full' gives an output bigger then the image.\n See 'border_mode' in conv2d's doc.\n\n Optional parameters: (will generate more optimal c code)\n\n imshp : tuple of len 2 or 3: 2 for 2d image, 3 for a stack of 2d images.\n Stacksize, nb image row, nb image col.\n kshp : tuple of len 2\n Nb kernel row, nb kernel col.\n nkern : int\n The number of kernel.\n bsize : int\n The size of the minibatch.\n dx : int\n Patch stride rows.\n dy : int\n Patch stride cols\n\n Params which select the version of code used:\n\n unroll_patch : bool\n Use a version of c_code that unroll the patch loop that don't\n request all shape information to work, but if all shape information\n are present, will use it to hardcode the value in the code for\n faster code.\n unroll_batch : int\n Use a version of c_code that unroll the batch (by unroll_batch)\n and the nkern (by unroll_kern) loop. The size must by a multiple\n of bsize or nkern respectively.\n unroll_kern : int\n Use a version of c_code that unroll the batch\n (by unroll_batch) and the nkern(by unroll_kern) loop. The size\n must by a multiple of bsize or nkern respectively.\n verbose : int\n Passed to GpuConv.\n version: int or str\n Passed to GpuConv, if version='no_fft', fft\n optimization will be deactivated at the op level.\n direction_hint: {'forward', 'bprop weights', 'bprop inputs'}\n Passed to GpuConv, used by graph optimizers to aid algorithm choice.\n\n The 3 following parameters are used internally when we generate\n the gradient when dx!=1 or dy!=1.\n\n imshp_logical\n Default None. None value is equivalent to imshp value.\n When imshp_logical != imshp, it tell we need to insert 0 in\n the image before we do the convolution. For example, when dx==dy==2\n and the image is [[1, 2], [3, 4]], we should make as if the image\n was [[1, 0, 2, 0], [0, 0, 0, 0], [3, 0, 4, 0], [0, 0, 0, 0]].\n Our python code insert the zero, but the c code optimize it.\n imshp_logical != imshp when taking the grad again the weights or\n the image when the output_mode is full and `dx != 1` or `dy != 1`.\n kshp_logical\n Idem but for kshp and used for the grad again the\n weights when the output_mode is valid and `dx != 1` or `dy != 1`.\n kshp_logical_top_aligned\n Used in the same case. Default to True.\n Set to False in the grad again the weight when the\n output_mode is full.\n\n \"\"\"\n\n __attrnames = [\n \"imshp\",\n \"kshp\",\n \"nkern\",\n \"bsize\",\n \"dx\",\n \"dy\",\n \"out_mode\",\n \"unroll_batch\",\n \"unroll_kern\",\n \"unroll_patch\",\n \"imshp_logical\",\n \"kshp_logical\",\n \"kshp_logical_top_aligned\",\n ]\n \"\"\"These attributes uniquely identify the behaviour of this op for\n given inputs. Do not set openmp here.\n \"\"\"\n\n # the value of speed_unroll_batch_kern,speed_unroll_patch_noshape,speed_unroll_patch_shape\n # have bean calculated on maggie36 when their is only 1 session logged on and only this was running.\n # It is an Intel(R) Xeon(R) CPU E5430 @ 2.66GHz. It is computer with aesara/tensor/nnet/tests/speed_test_conv.py\n # and took 5 minutes to run.\n # TODO: we should compute this table for each computer/os as this can change.\n # I saw on one computer that the speed with the shape can be slower than without!\n # using the real shape and the same dtype could also help.\n\n # unroll_batch, unroll_kern, valid time, full time\n speed_unroll_batch_kern = [\n (1, 1, 2.4661250114440918, 6.5472931861877441),\n (1, 2, 1.5869178771972656, 5.1499760150909424),\n (1, 3, 1.4270510673522949, 3.6593470573425293),\n (1, 4, 1.3373479843139648, 3.3451821804046631),\n (1, 5, 1.2818830013275146, 3.1444568634033203),\n (1, 6, 1.2521560192108154, 3.0256359577178955),\n (1, 10, 1.2134110927581787, 2.9174180030822754),\n (2, 1, 1.657214879989624, 4.5261678695678711),\n (2, 2, 1.2123160362243652, 2.9747390747070312),\n (2, 3, 1.0758891105651855, 2.5690360069274902),\n (2, 4, 1.0683329105377197, 2.4233770370483398),\n (2, 5, 1.0955719947814941, 2.3999948501586914),\n (2, 6, 1.5935721397399902, 2.6878271102905273),\n (2, 10, 1.8511250019073486, 3.2417428493499756),\n (3, 1, 1.5948119163513184, 3.631148099899292),\n (3, 2, 1.0761330127716064, 2.6011371612548828),\n (3, 3, 1.0551531314849854, 2.4200370311737061),\n (3, 4, 1.3930759429931641, 2.5211219787597656),\n (3, 5, 1.4330689907073975, 2.5704989433288574),\n (3, 6, 1.362138032913208, 2.5964410305023193),\n (3, 10, 1.6582000255584717, 2.9907989501953125),\n (4, 1, 1.4793620109558105, 3.3473429679870605),\n (4, 2, 1.0671560764312744, 2.4171769618988037),\n (4, 3, 1.2569692134857178, 2.2807950973510742),\n (4, 4, 1.3456289768218994, 2.6219108104705811),\n (4, 5, 1.4055080413818359, 2.4606490135192871),\n (4, 6, 1.372107982635498, 2.551663875579834),\n (4, 10, 1.599470853805542, 2.9172940254211426),\n (5, 1, 1.4115700721740723, 3.2077109813690186),\n (5, 2, 1.0635769367218018, 2.2648060321807861),\n (5, 3, 1.3842809200286865, 2.6135518550872803),\n (5, 4, 1.3470511436462402, 2.3852400779724121),\n (5, 5, 1.3539440631866455, 2.5245928764343262),\n (5, 6, 1.4037849903106689, 2.5985310077667236),\n (5, 10, 1.6120610237121582, 2.8127608299255371),\n (6, 1, 1.3623628616333008, 3.021122932434082),\n (6, 2, 1.1697649955749512, 2.6285450458526611),\n (6, 3, 1.2980999946594238, 2.4746189117431641),\n (6, 4, 1.3739941120147705, 2.5579929351806641),\n (6, 5, 1.3967819213867188, 2.5522029399871826),\n (6, 6, 1.4279270172119141, 2.6127138137817383),\n (6, 10, 1.605496883392334, 2.864037036895752),\n (10, 1, 1.6401121616363525, 2.970099925994873),\n (10, 2, 1.46710205078125, 2.7231831550598145),\n (10, 3, 1.4193780422210693, 2.6087639331817627),\n (10, 4, 1.4657118320465088, 2.6246678829193115),\n (10, 5, 1.5052611827850342, 2.6542458534240723),\n (10, 6, 1.5214400291442871, 2.7243161201477051),\n (10, 10, 1.6116268634796143, 2.956165075302124),\n ]\n\n # valid time, full time\n speed_unroll_patch_noshape = [2.0109100341796875, 5.8175678253173828]\n # valid time, full time\n speed_unroll_patch_shape = [1.2967290878295898, 5.5283889770507812]\n\n @staticmethod\n def has_all_shape(imshp, kshp, nkern=1, bsize=1):\n return (\n nkern is not None\n and bsize is not None\n and all(shp is not None for shp in imshp)\n and all(shp is not None for shp in kshp)\n )\n\n @staticmethod\n def getOutputShape(inshp, kshp, stride=(1, 1), mode=\"valid\"):\n \"\"\"\n Computes the output dimensions of convolving an image of shape \"inshp\"\n with kernels of shape \"kshp\". Accepts symbolic or integer shapes.\n Propagates `None`s (for unknown shapes).\n\n Parameters\n ----------\n inshp\n (rows,cols) of input image.\n kshp\n (rows,cols) of filters.\n mode: {'valid', 'full'}\n See 'border_mode' in conv2d's doc.\n\n Returns\n -------\n object\n (rows,cols) of output image.\n\n \"\"\"\n # The formula would be ceil((i + s * k - s * 1) / float(d)),\n # with s=1 for mode=='full' and s=-1 for mode=='valid'.\n # To support symbolic shapes, we express this with integer arithmetic.\n warnings.warn(\n \"The method `getOutputShape` is deprecated use\"\n \"`get_conv_output_shape` instead.\",\n stacklevel=2,\n )\n return tuple(\n get_conv_shape_1axis(i, k, mode, d) for i, k, d in zip(inshp, kshp, stride)\n )\n\n def __init__(\n self,\n imshp=None,\n kshp=None,\n nkern=None,\n bsize=None,\n dx=1,\n dy=1,\n output_mode=\"valid\",\n unroll_batch=None,\n unroll_kern=None,\n unroll_patch=None,\n imshp_logical=None,\n kshp_logical=None,\n kshp_logical_top_aligned=True,\n verbose=0,\n version=-1,\n direction_hint=\"forward\",\n openmp=None,\n ):\n # Deactivate fft_optimization at the op level if specified\n if version == \"no_fft\":\n self.fft_opt = False\n version = -1\n else:\n self.fft_opt = True\n\n # Expand unknown image / kernel shapes into tuples of Nones\n if imshp is None:\n imshp = (None, None, None)\n else:\n imshp = tuple(imshp)\n if kshp is None:\n kshp = (None, None)\n else:\n kshp = tuple(kshp)\n\n # Check imshp and kshp dimensionality\n if len(imshp) == 2:\n imshp = (1,) + imshp\n elif len(imshp) != 3:\n raise ValueError(f\"len(imshp) must be 2 or 3, got {len(imshp)}\")\n if len(kshp) != 2:\n raise ValueError(f\"len(kshp) must be 2, got {len(kshp)}\")\n\n # We must continue to consider None as 1 for backward compatibility.\n if dx is None:\n dx = 1\n if dy is None:\n dy = 1\n\n if int(dx) != dx:\n raise TypeError(\"ConvOp.__init__ param dx must be an int\", dx)\n dx = int(dx)\n\n if int(dy) != dy:\n raise TypeError(\"ConvOp.__init__ param dy must be an int\", dy)\n dy = int(dy)\n\n all_shape = self.has_all_shape(imshp, kshp, nkern, bsize)\n if (unroll_batch or unroll_kern) and not all_shape:\n raise ValueError(\n \"In ConvOp, when using unroll_batch and\"\n \" unroll_nkern, all shape are needed\"\n )\n\n # Init the openmp attribute\n super().__init__(openmp=openmp)\n if not all_shape or self.openmp:\n # Only this version is parallelized\n unroll_patch = True\n\n self.imshp = imshp\n self.kshp = kshp\n self.nkern = nkern\n self.bsize = bsize\n self.dx = dx\n self.dy = dy\n self.verbose = verbose\n self.version = version\n self.direction_hint = direction_hint\n\n # a triple\n if imshp_logical is None:\n self.imshp_logical = self.imshp\n else:\n imshp_logical = tuple(imshp_logical)\n if len(imshp_logical) != 3:\n raise ValueError(\n f\"len(imshp_logical) must be 3, got {len(imshp_logical)}\"\n )\n self.imshp_logical = imshp_logical\n\n # a pair\n if kshp_logical is None:\n self.kshp_logical = self.kshp\n else:\n kshp_logical = tuple(kshp_logical)\n if len(kshp_logical) != 2:\n raise ValueError(\n f\"len(kshp_logical) must be 2, got {len(kshp_logical)}\"\n )\n self.kshp_logical = kshp_logical\n\n # a bool\n self.kshp_logical_top_aligned = kshp_logical_top_aligned\n\n self.unroll_batch = unroll_batch\n self.unroll_kern = unroll_kern\n self.unroll_patch = unroll_patch\n\n if self.unroll_batch and not self.unroll_kern:\n self.unroll_kern = 1\n if self.unroll_kern and not self.unroll_batch:\n self.unroll_batch = 1\n\n # downcast unroll_batch if not a divisor of batch size\n if (\n self.unroll_batch is not None\n and self.unroll_batch > 0\n and self.bsize % self.unroll_batch != 0\n ):\n\n if self.bsize <= self.unroll_batch:\n self.unroll_batch = self.bsize\n else:\n # find the maximum value under unroll_batch that would work\n new = self.unroll_batch\n assert new >= 1\n while self.bsize % new != 0:\n new -= 1\n\n warnstr = (\n \"In ConvOp.__init__(): \"\n f\"unroll_batch({self.unroll_batch}) must be 0 or a divisor of\"\n f\" bsize({self.bsize}). We revert it to {new}. This\"\n \" won't change the result, but may make it slower.\"\n )\n _logger.warning(warnstr)\n\n self.unroll_batch = new\n\n # downcast unroll_kern if not a divisor of nb of kernel\n if (\n self.unroll_kern is not None\n and self.unroll_kern > 0\n and self.nkern % self.unroll_kern != 0\n ):\n\n if self.nkern <= self.unroll_kern:\n self.unroll_kern = self.nkern\n else:\n # find the maximum value under unroll_kern that would work\n new = self.unroll_kern\n assert new >= 1\n while self.nkern % new != 0:\n new -= 1\n\n warnstr = (\n \"In ConvOp.__init__(): \"\n f\"unroll_kern({self.unroll_kern}) must be 0 or a divisor of\"\n f\" nkern({self.nkern}). We revert it to {new}. This\"\n \" won't change the result, but may make it slower.\"\n )\n _logger.warning(warnstr)\n self.unroll_kern = new\n\n self.outshp = get_conv_output_shape(\n (None,) + self.imshp_logical,\n (\n None,\n None,\n )\n + self.kshp_logical,\n output_mode,\n (dx, dy),\n )[2:]\n self.fulloutshp = get_conv_output_shape(\n (None,) + self.imshp_logical,\n (\n None,\n None,\n )\n + self.kshp_logical,\n output_mode,\n (1, 1),\n )[2:]\n\n self.out_mode = output_mode\n\n if self.out_mode not in [\"valid\", \"full\"]:\n raise NotImplementedError(f\"Mode {self.out_mode} not implemented\")\n\n if any((shp is not None) and (shp <= 0) for shp in self.outshp):\n raise ValueError(\n \"Bad size for the output shape. Verify that [post-\"\n f\"supersampling] input shape ({self.imshp_logical}) and kern\"\n f\" shape({self.kshp_logical}) are ok. (Hint: kerns must fit inside\"\n \" image in valid mode)\"\n )\n\n if (\n self.unroll_kern is None\n and self.unroll_batch is None\n and self.unroll_patch is None\n ):\n # no version specified. Find the faster we have\n if self.bsize is None and self.nkern is None:\n self.unroll_patch = True\n elif self.bsize is not None and self.nkern is not None:\n bsize = self.bsize\n nkern = self.nkern\n mode_idx = 0\n if self.out_mode != \"valid\":\n mode_idx = 1\n if self.has_all_shape(self.imshp, self.kshp):\n time_unroll_patch = self.speed_unroll_patch_shape[mode_idx]\n else:\n time_unroll_patch = self.speed_unroll_patch_noshape[mode_idx]\n time_unroll_batch_kern = 9999999\n for i in range(len(self.speed_unroll_batch_kern)):\n if (\n bsize % self.speed_unroll_batch_kern[i][0] == 0\n and nkern % self.speed_unroll_batch_kern[i][1] == 0\n ):\n if (\n self.speed_unroll_batch_kern[i][2 + mode_idx]\n < time_unroll_batch_kern\n ):\n time_unroll_batch_kern = self.speed_unroll_batch_kern[i][\n 2 + mode_idx\n ]\n time_unroll_batch_kern_idx = i\n if time_unroll_patch < time_unroll_batch_kern:\n self.unroll_patch = True\n else:\n self.unroll_batch = self.speed_unroll_batch_kern[\n time_unroll_batch_kern_idx\n ][0]\n self.unroll_kern = self.speed_unroll_batch_kern[\n time_unroll_batch_kern_idx\n ][1]\n self.unroll_patch = False\n\n _logger.debug(\n \"AUTO FIND VERSION OF C_CODE OF CONV OP \" \"%s %s %s %s %s %s %s\",\n self.unroll_batch,\n self.unroll_kern,\n self.unroll_patch,\n self.bsize,\n self.nkern,\n time_unroll_patch,\n time_unroll_batch_kern,\n )\n\n self._rehash()\n\n def __eq__(self, other):\n if type(self) != type(other):\n return False\n for a in self.__attrnames:\n if getattr(self, a) != getattr(other, a):\n return False\n return True\n\n def __setstate__(self, d):\n super().__setstate__(d)\n self.direction_hint = d.get(\"direction_hint\", None)\n self._rehash()\n\n def _rehash(self):\n hashval = hash(type(self))\n for a in self.__attrnames:\n hashval = hashval ^ hash(getattr(self, a))\n self.__hashval = hashval\n\n def __hash__(self):\n return self.__hashval\n\n def __str__(self):\n return (\n \"ConvOp{\"\n + \",\".join(str((a, getattr(self, a))) for a in self.__attrnames)\n + \"}\"\n )\n\n def flops(self, inputs, outputs):\n \"\"\"\n Useful with the hack in profiling to print the MFlops.\n\n \"\"\"\n images, kerns = inputs\n (out,) = outputs\n assert images[1] == kerns[1]\n flops = 0\n if self.out_mode == \"valid\":\n # nb mul and add by output pixel\n flops = kerns[2] * kerns[3] * 2\n # nb flops by output image\n flops *= out[2] * out[3]\n # nb patch multiplied\n flops *= images[1] * kerns[0] * images[0]\n else:\n flops = (\n images[0]\n * kerns[0]\n * images[1]\n * kerns[2]\n * kerns[3]\n * images[2]\n * images[3]\n * 2\n )\n return flops\n\n def make_node(self, inputs, kerns):\n # TODO: find a way to make ConvOp work for N-D (after NIPS09)\n \"\"\"\n Parameters\n ----------\n inputs\n 4 dim: batches x stacksize x rows x cols.\n kerns\n 4 dim: nkern x stackidx x rows x cols.\n\n \"\"\"\n _inputs = as_tensor_variable(inputs)\n _kerns = as_tensor_variable(kerns)\n # TODO: lift this restriction by upcasting either inputs or kerns\n if _inputs.ndim != 4:\n raise TypeError(\n \"ConvOp (make_node) requires input be a 4D tensor;\"\n f' received \"{inputs}\" ({_inputs.ndim} dims)'\n )\n if _kerns.ndim != 4:\n raise TypeError(\"make_node requires 4D tensor of kernels\")\n if _inputs.type.dtype != _kerns.type.dtype:\n raise NotImplementedError(\n \"The image and the kernel must have the same type.\"\n \"inputs({_inputs.dtype}), kerns({_kerns.dtype})\"\n )\n bcastable23 = [self.outshp[0] == 1, self.outshp[1] == 1]\n output = tensor(\n dtype=_inputs.type.dtype,\n broadcastable=[_inputs.broadcastable[0], _kerns.broadcastable[0]]\n + bcastable23,\n )\n\n return Apply(self, [_inputs, _kerns], [output])\n\n def infer_shape(self, fgraph, node, input_shapes):\n imshp = input_shapes[0] # 4D image shape\n kshp = input_shapes[1] # 4D filter shape\n bsize, imshp = imshp[0], list(imshp[1:])\n nkern, kshp = kshp[0], list(kshp[2:])\n # replace symbolic shapes with known shapes\n if self.bsize is not None:\n bsize = self.bsize\n for i in [0, 1, 2]:\n if self.imshp_logical[i] is not None:\n imshp[i] = self.imshp_logical[i]\n if self.nkern is not None:\n nkern = self.nkern\n for i in [0, 1]:\n if self.kshp_logical[i] is not None:\n kshp[i] = self.kshp_logical[i]\n # infer output shape from what we have\n res = get_conv_output_shape(\n (bsize,) + tuple(imshp),\n (\n nkern,\n None,\n )\n + tuple(kshp),\n self.out_mode,\n (self.dx, self.dy),\n )\n return [res]\n\n def perform(self, node, inp, out):\n \"\"\"\n By default if len(img2d.shape)==3, we TODO\n\n \"\"\"\n img2d, filtersflipped = inp\n (z,) = out\n\n # TODO: move these back out to global scope when they no longer\n # cause an atexit error\n imshp = self.imshp\n if any(x is None for x in imshp):\n imshp = tuple(img2d.shape[1:])\n if imshp != img2d.shape[1:]:\n raise ValueError(\n \"The image shape provided at build time \"\n \"is different from the one passed at run time\",\n imshp,\n img2d.shape[1:],\n )\n kshp = self.kshp\n if any(x is None for x in kshp):\n kshp = tuple(filtersflipped.shape[2:])\n if kshp != filtersflipped.shape[2:]:\n raise ValueError(\n \"The filter shape provided at build time \"\n \"is different from the one passed at run time\",\n kshp,\n filtersflipped.shape[2:],\n )\n bsize = self.bsize\n if bsize is None:\n bsize = img2d.shape[0]\n elif bsize != img2d.shape[0]:\n raise ValueError(\n \"The batch size provided at build time \"\n \"is different from the one passed at run time\",\n bsize,\n img2d.shape[0],\n )\n nkern = self.nkern\n if nkern is None:\n nkern = filtersflipped.shape[0]\n elif nkern != filtersflipped.shape[0]:\n raise ValueError(\n \"The number of filters provided at build time \"\n \"is different from the one passed at run time\",\n nkern,\n filtersflipped.shape[0],\n )\n\n imshp_logical = self.imshp_logical\n if imshp_logical[0] is None:\n imshp_logical = (imshp[0],) + imshp_logical[1:]\n if imshp_logical[1] is None:\n imshp_logical = (imshp_logical[0], imshp[1], imshp_logical[2])\n if imshp_logical[2] is None:\n imshp_logical = imshp_logical[:2] + (imshp[2],)\n assert all(x is not None for x in imshp_logical)\n\n kshp_logical = self.kshp_logical\n if kshp_logical[0] is None:\n kshp_logical = (kshp[0], kshp_logical[1])\n if kshp_logical[1] is None:\n kshp_logical = (kshp_logical[0], kshp[1])\n assert all(x is not None for x in kshp_logical)\n\n if all(shp is not None for shp in self.fulloutshp):\n fulloutshp = tuple(self.fulloutshp)\n else:\n fulloutshp = get_conv_output_shape(\n (None,) + imshp_logical,\n (\n None,\n None,\n )\n + kshp_logical,\n self.out_mode,\n (1, 1),\n )[2:]\n\n if (\n z[0] is None\n or z[0].shape\n != (\n bsize,\n nkern,\n )\n + fulloutshp\n ):\n z[0] = np.zeros(\n (\n bsize,\n nkern,\n )\n + fulloutshp,\n dtype=img2d.dtype,\n )\n zz = z[0]\n\n stacklen = imshp[0]\n\n img2d = img2d.reshape((bsize,) + imshp)\n filtersflipped = filtersflipped.reshape((nkern, stacklen) + kshp)\n\n if self.imshp != self.imshp_logical:\n # assuming that to get from imshp to imshp logical we insert zeros in missing spots\n rstride = int(np.ceil(imshp_logical[1] / float(imshp[1])))\n cstride = int(np.ceil(imshp_logical[2] / float(imshp[2])))\n buf = np.zeros((bsize,) + imshp_logical, dtype=img2d.dtype)\n buf[:, :, ::rstride, ::cstride] = img2d\n img2d = buf\n del buf, rstride, cstride\n\n if kshp != kshp_logical:\n rstride = int(np.ceil(kshp_logical[0] / float(kshp[0])))\n cstride = int(np.ceil(kshp_logical[1] / float(kshp[1])))\n buf = np.zeros(\n (nkern, stacklen) + self.kshp_logical, dtype=filtersflipped.dtype\n )\n if self.kshp_logical_top_aligned:\n roffset = coffset = 0\n else:\n roffset = (\n kshp_logical[0] - (kshp[0] * rstride) - 1 + rstride\n ) % rstride\n coffset = (\n kshp_logical[1] - (kshp[1] * cstride) - 1 + cstride\n ) % cstride\n assert roffset >= 0\n assert coffset >= 0\n buf[:, :, roffset::rstride, coffset::cstride] = filtersflipped\n filtersflipped = buf\n del buf, rstride, cstride\n\n val = _valfrommode(self.out_mode)\n bval = _bvalfromboundary(\"fill\")\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n for b in range(bsize):\n for n in range(nkern):\n zz[b, n, ...].fill(0)\n for im0 in range(stacklen):\n # some cast generates a warning here\n zz[b, n, ...] += _convolve2d(\n img2d[b, im0, ...],\n filtersflipped[n, im0, ...],\n 1,\n val,\n bval,\n 0,\n )\n\n if False:\n if False and self.out_mode == \"full\":\n img2d2 = np.zeros(\n (\n bsize,\n stacklen,\n imshp[1] + 2 * kshp[0] - 2,\n imshp[2] + 2 * kshp[1] - 2,\n )\n )\n img2d2[\n :,\n :,\n kshp[0] - 1 : kshp[0] - 1 + imshp[1],\n kshp[1] - 1 : kshp[1] - 1 + imshp[2],\n ] = img2d\n img2d = img2d2\n # N_image_shape = image_data.shape\n\n for b in range(bsize):\n for n in range(nkern):\n zz[b, n, ...].fill(0)\n for im0 in range(stacklen):\n for row in range(0, zz.shape[2], self.dx):\n for col in range(0, zz.shape[3], self.dy):\n zz[b, n, row, col] += (\n img2d[\n b, im0, row : row + kshp[0], col : col + kshp[1]\n ]\n * filtersflipped[n, im0, ::-1, ::-1]\n ).sum()\n\n # We copy it to remove the Stride mismatch warning from DEBUG_MODE.\n # The copy make that we return an object with the same stride as the c version.\n # The copy don't affect the performance during our experience as in that case we\n # execute the c version which is much faster.\n if self.dx > 1 or self.dy > 1:\n zz = zz[:, :, 0 :: self.dx, 0 :: self.dy].copy()\n z[0] = zz\n\n def R_op(self, inputs, eval_points):\n rval = None\n if eval_points[0] is not None:\n rval = self.make_node(eval_points[0], inputs[1]).outputs[0]\n if eval_points[1] is not None:\n if rval is None:\n rval = self.make_node(inputs[0], eval_points[1]).outputs[0]\n else:\n rval += self.make_node(inputs[0], eval_points[1]).outputs[0]\n return [rval]\n\n def grad(self, inp, grads):\n inputs, kerns = inp\n (gz,) = grads\n\n if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:\n raise NotImplementedError(\"todo\")\n\n if self.out_mode == \"valid\" and (self.dx, self.dy) != (1, 1):\n raise NotImplementedError(\n \"ERROR: ConvOp.grad is now disabled for 'valid' convolutions with\"\n \" stride != (1, 1); call aesara.tensor.nnet.conv2d() instead.\"\n )\n\n if self.dx not in (1, 2) or self.dy not in (1, 2):\n raise NotImplementedError(\n \"ERROR: We disable ConvOp.grad now when output_mode is not\"\n \" 'valid' and dx or dy are greater than 2, as there is a bug\"\n \" in it. See `abstract_conv2d <>`_ for a version that support this.\"\n )\n\n all_shape = self.has_all_shape(self.imshp, self.kshp, self.nkern, self.bsize)\n\n if not all_shape and (self.dx != 1 or self.dy != 1):\n raise ValueError(\n \"ConvOp.grad when dx!=1 or dy!=1 we must have all \"\n \"the optional shape information\"\n )\n\n # Determine gradient on kernels ########\n assert inputs.ndim == 4 and kerns.ndim == 4\n\n newin = inputs.dimshuffle((1, 0, 2, 3))\n newgz = gz.dimshuffle((1, 0, 2, 3))\n\n if self.out_mode == \"valid\":\n (img, filters) = (newin, newgz)\n kshp_logical = self.fulloutshp\n kshp_logical_top_aligned = False\n imshp_logical = None\n (bsize, nkern) = (self.imshp[0], self.nkern)\n imshp = (self.bsize, self.imshp[1], self.imshp[2])\n kshp = self.outshp\n elif self.out_mode == \"full\":\n (img, filters) = (newgz, newin)\n kshp_logical = None\n kshp_logical_top_aligned = True\n imshp_logical = (self.bsize, self.fulloutshp[0], self.fulloutshp[1])\n (bsize, nkern) = (self.nkern, self.imshp[0])\n imshp = (self.bsize, self.outshp[0], self.outshp[1])\n kshp = self.imshp[1:]\n else:\n raise NotImplementedError(\n \"Only [full,valid] modes are currently supported.\"\n )\n\n filters = filters[:, :, ::-1, ::-1] # flip them\n\n dw = ConvOp(\n imshp,\n kshp,\n nkern,\n bsize,\n 1,\n 1,\n output_mode=\"valid\",\n unroll_batch=None,\n unroll_kern=None,\n unroll_patch=None,\n imshp_logical=imshp_logical,\n kshp_logical=kshp_logical,\n kshp_logical_top_aligned=kshp_logical_top_aligned,\n version=self.version,\n direction_hint=\"bprop weights\",\n verbose=self.verbose,\n )\n\n dw = dw(img, filters)\n\n if all_shape:\n assert all(o == k for o, k in zip(dw.owner.op.outshp, self.kshp))\n if self.out_mode == \"valid\":\n # before DimShuffle, dw is of shape visdim x nkern x kshp[0] x kshp[1]\n dw = dw.dimshuffle((1, 0, 2, 3))\n dw = dw[:, :, ::-1, ::-1]\n\n # Determine gradient on inputs ########\n mode = \"valid\"\n if not self.out_mode == \"full\":\n mode = \"full\"\n\n filters = kerns.dimshuffle((1, 0, 2, 3))\n filters = filters[:, :, ::-1, ::-1]\n\n nkern = self.imshp[0]\n imshp = (self.nkern, self.outshp[0], self.outshp[1])\n imshp_logical = (self.nkern, self.fulloutshp[0], self.fulloutshp[1])\n\n din = ConvOp(\n imshp,\n self.kshp,\n nkern,\n self.bsize,\n 1,\n 1,\n output_mode=mode,\n unroll_batch=None,\n unroll_kern=None,\n unroll_patch=None,\n imshp_logical=imshp_logical,\n kshp_logical=None,\n version=-1, # we we change the mode, we don't forward the version.\n direction_hint=\"bprop inputs\",\n verbose=self.verbose,\n )\n\n din = din(gz, filters)\n\n assert all(\n o is None or o == i for o, i in zip(din.owner.op.outshp, self.imshp[1:])\n )\n\n # din and dw should have the same broadcasting pattern as the\n # parameters they are the gradient of (resp. inputs and kerns).\n din = patternbroadcast(din, inputs.broadcastable)\n dw = patternbroadcast(dw, kerns.broadcastable)\n return [din, dw]\n\n def c_headers(self, **kwargs):\n return [\"<numpy/noprefix.h>\", \"<iostream>\", \"<sstream>\"]\n\n def c_code_cache_version(self):\n return (15, self.openmp, blas.blas_header_version())\n\n def c_support_code(self, **kwargs):\n return (\n \"\"\"\n#define STRIDES(arr) (PyArray_STRIDES(arr))\n#define FULL 2\n#define SAME 1\n#define VALID 0\n#define MOD %\nusing namespace std;\n\"\"\"\n + blas.blas_header_text()\n )\n\n def use_blas(self):\n \"\"\"Return True if we will generate code that use gemm.\"\"\"\n # the gemm version only support that case\n if self.out_mode == \"valid\" and self.dx == 0 and self.dy == 0:\n # We use a faster version in those case.\n if (\n self.imshp != self.imshp_logical\n or self.kshp != self.kshp_logical\n or self.unroll_patch\n or self.unroll_batch > 0\n or self.unroll_kern > 0\n ):\n return False\n return True\n return False\n\n def c_libraries(self, **kwargs):\n if self.use_blas():\n return blas.ldflags()\n return []\n\n def c_no_compile_args(self, **kwargs):\n # when the ksph==(1,1) gcc 4.3.0 segfault during the\n # compilation with -O3. This don't happen at -O2\n if aesara.link.c.cmodule.gcc_version() in [\"4.3.0\"] and self.kshp == (1, 1):\n return [\"-O3\"]\n else:\n return []\n\n def c_compile_args(self, **kwargs):\n ret = []\n\n if self.use_blas():\n ret = blas.ldflags(libs=False, flags=True)\n if aesara.link.c.cmodule.gcc_version() in [\"4.3.0\"] and self.kshp == (1, 1):\n ret += [\"-O2\"]\n # Add the -fopenmp flags\n ret += super().c_compile_args(**kwargs)\n\n return ret\n\n def c_lib_dirs(self, **kwargs):\n if self.use_blas():\n return blas.ldflags(libs=False, libs_dir=True)\n return []\n\n def c_header_dirs(self, **kwargs):\n if self.use_blas():\n return blas.ldflags(libs=False, include_dir=True)\n return []\n\n def c_code(self, node, name, inp, out, sub):\n img2d, filtersflipped = inp\n (z,) = out\n if node.inputs[0].type.dtype != node.inputs[1].type.dtype:\n raise NotImplementedError()\n assert node.inputs[0].type.dtype == node.inputs[1].type.dtype\n d = locals()\n d.update(sub)\n\n all_shape = self.has_all_shape(\n self.imshp, self.kshp, self.nkern, self.bsize\n ) and self.has_all_shape(self.imshp_logical, self.kshp_logical)\n\n d[\"self_out_mode\"] = self.out_mode\n d[\"self_dx\"] = self.dx\n d[\"self_dy\"] = self.dy\n d[\"mode\"] = self.out_mode.upper()\n d[\"affectation\"] = \"=\"\n\n # Default values, will be overridden if the shape info is provided\n d[\"self_bsize\"] = f\"PyArray_DIMS({d['img2d']})[0]\"\n d[\"self_nkern\"] = f\"PyArray_DIMS({d['filtersflipped']})[0]\"\n d[\"self_outshp0\"] = \"-1\"\n d[\"self_outshp1\"] = \"-1\"\n d[\"self_imshp0\"] = f\"PyArray_DIMS({d['img2d']})[1]\"\n d[\"self_imshp1\"] = f\"PyArray_DIMS({d['img2d']})[2]\"\n d[\"self_imshp2\"] = f\"PyArray_DIMS({d['img2d']})[3]\"\n d[\"self_kshp0\"] = f\"PyArray_DIMS({d['filtersflipped']})[2]\"\n d[\"self_kshp1\"] = f\"PyArray_DIMS({d['filtersflipped']})[3]\"\n d[\"assert_size\"] = \"\"\n\n # Override the default value if we have it\n if self.kshp[0] is not None:\n expected = d[\"self_kshp0\"]\n value = self.kshp[0]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the number of rows in the filter \"\n \"(%%ld) isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_kshp0\"] = self.kshp[0]\n if self.kshp[1] is not None:\n expected = d[\"self_kshp1\"]\n value = self.kshp[1]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the number of columns in the filter \"\n \"(%%ld) isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_kshp1\"] = self.kshp[1]\n if self.outshp[0] is not None:\n expected = \"dim_zz[0]\"\n value = self.outshp[0]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the number of rows in the output \"\n \"(%%ld) isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_outshp0\"] = self.outshp[0]\n if self.outshp[1] is not None:\n expected = \"dim_zz[1]\"\n value = self.outshp[1]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the number of columns in the output \"\n \"(%%ld) isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_outshp1\"] = self.outshp[1]\n if self.imshp[0] is not None:\n expected = d[\"self_imshp0\"]\n value = self.imshp[0]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the image stack size (%%ld) \"\n \"isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n expected = \"kerns_dim[1]\"\n value = self.imshp[0]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the kernel stack size (%%ld) \"\n \"isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_imshp0\"] = self.imshp[0]\n if self.imshp[1] is not None:\n expected = d[\"self_imshp1\"]\n value = self.imshp[1]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the number of rows in the image \"\n \"(%%ld) isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_imshp1\"] = self.imshp[1]\n if self.imshp[2] is not None:\n expected = d[\"self_imshp2\"]\n value = self.imshp[2]\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the number of columns in the image \"\n \"(%%ld) isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_imshp2\"] = self.imshp[2]\n if self.bsize is not None:\n expected = d[\"self_bsize\"]\n value = self.bsize\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the batch size (%%ld) \"\n \"isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_bsize\"] = self.bsize\n if self.nkern is not None:\n expected = d[\"self_nkern\"]\n value = self.nkern\n d[\n \"assert_size\"\n ] += \"\"\"\nif(%(value)s != %(expected)s){\n PyErr_Format(PyExc_ValueError,\n \"The hardcoded shape for the number of kernels in the filter \"\n \"(%%ld) isn't the run time shape (%%ld).\",\n (long)%(value)s, (long)%(expected)s);\n %(fail)s;\n}\n \"\"\" % dict(\n expected=expected, value=value, **sub\n )\n d[\"self_nkern\"] = self.nkern\n\n # Other hard coded stuff only if we have all shapes\n if all_shape:\n d[\"self_kshp_logical_r\"] = self.kshp_logical[0]\n d[\"self_kshp_logical_c\"] = self.kshp_logical[1]\n d[\"self_kshp_logical_stride_r\"] = int(\n np.ceil(self.kshp_logical[0] / float(self.kshp[0]))\n )\n d[\"self_kshp_logical_stride_c\"] = int(\n np.ceil(self.kshp_logical[1] / float(self.kshp[1]))\n )\n d[\"self_imshp_logical_r\"] = self.imshp_logical[1]\n # numpy.B. 1 not 0\n d[\"self_imshp_logical_c\"] = self.imshp_logical[2]\n # numpy.B. 2 not 1\n d[\"self_imshp_logical_stride_r\"] = int(\n np.ceil(self.imshp_logical[1] / float(self.imshp[1]))\n )\n d[\"self_imshp_logical_stride_c\"] = int(\n np.ceil(self.imshp_logical[2] / float(self.imshp[2]))\n )\n if not self.imshp[0] == 1:\n d[\"affectation\"] = \"+=\"\n d[\"all_shape\"] = \"1\"\n d[\"dim_zz_const\"] = \"const\"\n d[\"dim_zz_affect\"] = \"\"\n else:\n d[\"affectation\"] = \"+=\"\n d[\"all_shape\"] = \"0\"\n d[\"dim_zz_const\"] = \"\"\n d[\"dim_zz_affect\"] = (\n \"\"\"\n if (mode == FULL) {\n dim_zz[0] = (int)ceil((dim_im[0]+dim_ker0-1)/float(%(self_dx)s));\n dim_zz[1] = (int)ceil((dim_im[1]+dim_ker1-1)/float(%(self_dy)s));\n } else {\n dim_zz[0] = (int)ceil((dim_im[0]-dim_ker0+1)/float(%(self_dx)s));\n dim_zz[1] = (int)ceil((dim_im[1]-dim_ker1+1)/float(%(self_dy)s));\n }\n\"\"\"\n % d\n )\n d[\"assert_size\"] += (\n \"\"\"\n// Check the stack size of the filter and images are equals\nif(kerns_dim[1] != img2d_dim[1]){\n PyErr_Format(PyExc_ValueError,\n \"the filter stack size (%%ld) and image stack size (%%ld) differ\",\n (long)kerns_dim[1], (long)img2d_dim[1]);\n %(fail)s;\n}\n \"\"\"\n % sub\n )\n\n if self.kshp_logical_top_aligned:\n d[\"self_kshp_logical_offset_r\"] = 0\n d[\"self_kshp_logical_offset_c\"] = 0\n elif all_shape:\n rstride = d[\"self_kshp_logical_stride_r\"]\n cstride = d[\"self_kshp_logical_stride_c\"]\n d[\"self_kshp_logical_offset_r\"] = (\n self.kshp_logical[0] - (self.kshp[0] * rstride) - 1 + rstride\n ) % rstride\n d[\"self_kshp_logical_offset_c\"] = (\n self.kshp_logical[1] - (self.kshp[1] * cstride) - 1 + cstride\n ) % cstride\n del rstride, cstride\n\n if node.inputs[0].type.dtype == \"float32\":\n d[\"type\"] = \"float\"\n elif node.inputs[0].type.dtype == \"float64\":\n d[\"type\"] = \"double\"\n else:\n raise NotImplementedError(\n f\"Type {node.inputs[0].type.dtype} not implemented\"\n )\n d[\"gemm\"] = \"dgemm_\"\n if not d[\"type\"] == \"double\":\n d[\"gemm\"] = \"sgemm_\"\n\n if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:\n if self.verbose:\n _logger.debug(\n \"return imshp!=imshp_logical or\"\n \" self.kshp != self.kshp_logical shape version\"\n )\n return _conv_op_code_a % d\n\n if self.unroll_patch:\n if self.verbose:\n _logger.debug(\"return unroll patch version. all_shape=%s\", all_shape)\n return _conv_op_code_unroll_patch % d\n if (self.unroll_batch is not None and self.unroll_batch > 0) or (\n self.unroll_kern is not None and self.unroll_kern > 0\n ):\n assert self.unroll_batch > 0\n assert self.unroll_kern > 0\n if self.verbose:\n _logger.debug(\n \"return unrolled batch (%s) and kern code (%s)\",\n str(self.unroll_batch),\n str(self.unroll_kern),\n )\n return gen_conv_code_unroll_batch_kern(\n d, self.unroll_batch, self.unroll_kern\n )\n\n # TODO: should we choose the unroll size automatically with the bigger divisor under 5?\n if self.out_mode == \"valid\" and self.dx == 0 and self.dy == 0:\n if self.verbose:\n _logger.debug(\"return gemm version\")\n return _conv_op_code_valid_gemm % d\n else:\n if self.verbose:\n _logger.debug(\"return no gemm version\")\n return _conv_op_code_a % d\n\n\n_conv_op_code_a = \"\"\"\nconst int mode=%(mode)s;\nint typenum=0, typenum_f=0;\nPyArrayObject *ain1=NULL, *ain2=NULL;\nPyArrayObject *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;\nconst %(type)s fill_value = 0;\n\nint type_im=PyArray_TYPE(%(img2d)s);\nint type_ker=PyArray_TYPE(%(filtersflipped)s);\n\nnpy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};\nnpy_intp dim_im_phys[2]={%(self_imshp1)s,%(self_imshp2)s};\nnpy_intp dim_im_log[2]={%(self_imshp_logical_r)s,%(self_imshp_logical_c)s};\nnpy_intp dim_ker_phys[2]={%(self_kshp0)s,%(self_kshp1)s};\nnpy_intp dim_ker_log[2]={%(self_kshp_logical_r)s,%(self_kshp_logical_c)s};\n\nPyArray_Dims img2d_shape;\nnpy_intp img2d_dim[4]={1,1,0,0};\nimg2d_shape.ptr=img2d_dim;\nimg2d_shape.len=4;\n\nPyArray_Dims kerns_shape;\nnpy_intp kerns_dim[4]={1,1,0,0};\nkerns_shape.ptr=kerns_dim;\nkerns_shape.len=4;\nPyObject *img2d=NULL, *contig, *filtersflipped=NULL;\n\n\nif(PyArray_NDIM(%(img2d)s)==2){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==3){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==4){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else {\n PyErr_SetString(PyExc_ValueError, \"img don't have a good shape\");\n %(fail)s;\n}\n\nif(PyArray_NDIM(%(filtersflipped)s)==3){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else if(PyArray_NDIM(%(filtersflipped)s)==4){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else{\n std::stringstream temp;\n temp << \"nddim=\"<<PyArray_NDIM(%(filtersflipped)s);\n std::string param = temp.str();\n PyErr_SetString(PyExc_ValueError,\n (\"kernel don't have a good shape. \" + param).c_str());\n %(fail)s;\n}\n\n%(assert_size)s\n\nimg2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);\nimg2d_arr = (PyArrayObject*)img2d;\nif ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))\n || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){\n contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));\n Py_DECREF(img2d);\n img2d = contig;\n img2d_arr = (PyArrayObject*)img2d;\n if (!PyArray_ISCONTIGUOUS(img2d_arr)){\n PyErr_SetString(PyExc_ValueError, \"img2d isn't contiguous\");\n %(fail)s;\n }\n}\n\nfiltersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);\nfiltersflipped_arr = (PyArrayObject*)filtersflipped;\nif ((PyArray_STRIDES(filtersflipped_arr)[3] != (npy_intp)sizeof(%(type)s))\n || (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*(npy_intp)sizeof(%(type)s))){\n contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));\n Py_DECREF(filtersflipped);\n filtersflipped = contig;\n filtersflipped_arr = (PyArrayObject*)filtersflipped;\n if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){\n PyErr_SetString(PyExc_ValueError, \"filtersflipped isn't contiguous\");\n %(fail)s;\n }\n}\n\nif(mode != VALID && mode != FULL){\n PyErr_SetString(PyExc_ValueError,\n \"invalid mode, only full and valid are supported\");\n %(fail)s;\n}\ntypenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);\ntypenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);\nif (typenum < 0) {PyErr_SetString(PyExc_ValueError, \"Invalid type\"); %(fail)s;}\nif (typenum != typenum_f) {\n PyErr_SetString(PyExc_ValueError, \"Input types must match\");\n %(fail)s;\n}\n\nif (!img2d)\n{\n PyErr_SetString(PyExc_AssertionError, \"!img2d\");\n %(fail)s;\n}\nif (!filtersflipped)\n{\n PyErr_SetString(PyExc_AssertionError, \"!filtersflipped\");\n %(fail)s;\n}\n\nif ((!%(z)s)\n || *PyArray_DIMS(%(z)s)!=4\n ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)\n ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)\n ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])\n ||(PyArray_DIMS(%(z)s)[3] != dim_zz[1])\n ||!PyArray_ISCONTIGUOUS(%(z)s)\n )\n{\n {Py_XDECREF(%(z)s);}\n npy_intp dims[4] = {0,0,0,0};\n dims[0]=%(self_bsize)s;\n dims[1]=%(self_nkern)s;\n dims[2]=dim_zz[0];\n dims[3]=dim_zz[1];\n %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);\n}else{\n //PyArray_FILLWBYTE((PyObject*)%(z)s,0);\n}\nz_arr = (PyArrayObject*) %(z)s;\n\nint Os[2];\nOs[0]=%(self_outshp0)s;\nOs[1]=%(self_outshp1)s;\n\n//assertions\nif (!PyArray_ISCONTIGUOUS(%(z)s))\n{\n PyErr_SetString(PyExc_AssertionError, \"Output (%(z)s) not contiguous\");\n %(fail)s;\n}\n\nfor(int b=0;b< %(self_bsize)s;b++){\n for(int n_kern=0;n_kern<%(self_nkern)s;n_kern++){\n\n %(type)s * __restrict__ out=(%(type)s *)(PyArray_GETPTR2(z_arr,b,n_kern));\n for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out[i] = 0;\n\n for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){\n\n const %(type)s * __restrict__ in=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b,stack_size));\n const %(type)s * __restrict__ hvals=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern,stack_size));\n\n\n for (int iter_m=0; iter_m < Os[0]; iter_m++) {\n // Reposition index into input image based on requested output size\n //row position in logical output image\n int pos_m = iter_m*%(self_dx)s;\n //row anchor in logical input image (we will loop upward from here)\n int new_m;\n if (mode == FULL) new_m = pos_m ;\n else new_m = (pos_m+dim_ker_log[0]-1);\n\n for (int iter_n=0; iter_n < Os[1]; iter_n++) { // loop over columns\n // current col position in logical output image\n int pos_n=iter_n*%(self_dy)s;\n %(type)s sum=0;\n\n // Sum over kernel, if index into image is out of bounds\n // fill with the value\n // loop over logical rows in kernel\n for (int j_log=0; j_log < %(self_kshp_logical_r)s; j_log++) {\n // ind0_log: row position in logical input image\n int ind0_log = (new_m-j_log);\n\n if ((j_log < %(self_kshp_logical_offset_r)s) ||\n (j_log - %(self_kshp_logical_offset_r)s) MOD %(self_kshp_logical_stride_r)s)\n continue;\n\n if (ind0_log MOD %(self_imshp_logical_stride_r)s)\n continue;\n\n int j_phys = ((j_log- %(self_kshp_logical_offset_r)s) /\n %(self_kshp_logical_stride_r)s);\n int ind0_phys = (ind0_log / %(self_imshp_logical_stride_r)s);\n //std::cerr <<\"j_log\" << j_log << \" j_phys \" << j_phys << \" \" << ind0_phys << \"\\\\n\";\n\n if(mode==FULL){\n //This is a pointer to the current row of the kernel\n const %(type)s * idx_hvals=&hvals[j_phys*dim_ker_phys[1]];\n if(ind0_log < 0 || ind0_log >= dim_im_log[0]){\n // the current row of the kernel is off the image\n }else{\n int k = max((int)(pos_n-dim_im_log[1])+1,0);\n int max_k=min(pos_n+1,(int)dim_ker_log[1]);\n const %(type)s * idx_in=&in[ind0_phys*dim_im_phys[1]];\n for (int ind1_log=pos_n-k; k<max_k; k++,ind1_log--) {\n if (1)\n {\n if ((k < %(self_kshp_logical_offset_c)s) ||\n (k - %(self_kshp_logical_offset_c)s) MOD\n %(self_kshp_logical_stride_c)s)\n continue;\n\n if (ind1_log MOD\n %(self_imshp_logical_stride_c)s)\n continue;\n }\n sum += idx_hvals[(k-%(self_kshp_logical_offset_c)s) /\n %(self_kshp_logical_stride_c)s] *\n idx_in[ind1_log / %(self_imshp_logical_stride_c)s];\n }\n }\n }else{ // mode==VALID\n //JB: should be dim_im[1] right? (was dim_im[0])\n const %(type)s* idx_in=&in[ind0_phys*dim_im_phys[1]];\n const %(type)s* idx_hvals=&hvals[j_phys*dim_ker_phys[1]];\n int new_n = (pos_n+dim_ker_log[1]-1);\n if (%(self_imshp_logical_stride_c)s != 1) // a general loop\n {\n for (int k=0,last=new_n; k < dim_ker_log[1]; k++,last--) {\n if ((k < %(self_kshp_logical_offset_c)s) ||\n (k - %(self_kshp_logical_offset_c)s) MOD\n %(self_kshp_logical_stride_c)s)\n continue;\n\n else if (last MOD %(self_imshp_logical_stride_c)s)\n continue;\n else\n {\n sum+=idx_hvals[(k-%(self_kshp_logical_offset_c)s) /\n %(self_kshp_logical_stride_c)s] *\n idx_in[last/%(self_imshp_logical_stride_c)s];\n }\n }\n }\n else // self_imshp_stride_c == 1\n {\n int offset = %(self_kshp_logical_offset_c)s;\n int k_phys=0;\n for (int k_log=offset,last=new_n-offset;\n k_log < dim_ker_log[1]; ) {\n sum += idx_hvals[k_phys]*idx_in[last];\n ++k_phys;\n last -= %(self_kshp_logical_stride_c)s;\n k_log += %(self_kshp_logical_stride_c)s;\n }\n }\n }\n }//for j_log\n out[iter_m*dim_zz[1]+iter_n] %(affectation)s sum;\n }//for iter_n\n }//for iter_m\n }//for stack_size\n if (0 && (mode==FULL)){\n for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i)\n std::cout << \" \" << out[i];\n std::cout << \"\\\\n\";\n }\n }//for n_kern\n}//for b\nPy_XDECREF(img2d);\nPy_XDECREF(filtersflipped);\n\"\"\"\n\n\n#########\n# ConvOp c_code for valid mode (uses gemm)\n#########\n\n_conv_op_code_valid_gemm = \"\"\"\nint typenum=0, typenum_f=0;\nPyArrayObject *ain1=NULL, *ain2=NULL, *img2d_arr=NULL, *z_arr=NULL;\nconst int NKERN = %(self_nkern)s;\n\nint type_im=PyArray_TYPE(%(img2d)s);\nint type_ker=PyArray_TYPE(%(filtersflipped)s);\n\nnpy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};\nnpy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};\nconst npy_intp dim_ker0=%(self_kshp0)s;\nconst npy_intp dim_ker1=%(self_kshp1)s;\n\nPyArray_Dims img2d_shape;\nnpy_intp img2d_dim[4]={1,1,0,0};\nimg2d_shape.ptr=img2d_dim;\nimg2d_shape.len=4;\n\nPyArray_Dims kerns_shape;\nnpy_intp kerns_dim[4]={1,1,0,0};\nkerns_shape.ptr=kerns_dim;\nkerns_shape.len=4;\nPyObject *img2d=NULL, *contig;\n\nif(PyArray_NDIM(%(img2d)s)==2){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==3){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==4){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else {\n PyErr_SetString(PyExc_ValueError, \"img don't have a good shape\");\n %(fail)s;\n}\n\nif(PyArray_NDIM(%(filtersflipped)s)==3){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else if(PyArray_NDIM(%(filtersflipped)s)==4){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else{\n std::stringstream temp;\n temp << \"nddim=\"<<PyArray_NDIM(%(filtersflipped)s);\n std::string param = temp.str();\n PyErr_SetString(PyExc_ValueError,\n (\"kernel don't have a good shape. \" + param).c_str());\n %(fail)s;\n}\nif (NKERN != kerns_dim[0])\n{\n PyErr_SetString(PyExc_NotImplementedError, \"nonsense nkern\");\n %(fail)s;\n}\n\nimg2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);\nimg2d_arr = (PyArrayObject*)img2d;\nif ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))\n || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){\n contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));\n Py_DECREF(img2d);\n img2d = contig;\n img2d_arr = (PyArrayObject*)img2d;\n if (!PyArray_ISCONTIGUOUS(img2d_arr)){\n PyErr_SetString(PyExc_ValueError, \"img2d isn't contiguous\");\n %(fail)s;\n }\n}\n\ntypenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);\ntypenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);\nif (typenum < 0) {PyErr_SetString(PyExc_ValueError, \"Invalid type\"); %(fail)s;}\nif (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, \"Input types must match\"); %(fail)s;}\n\nif (!img2d) {\n PyErr_SetString(PyExc_ValueError, \"Null argument img2d\");\n %(fail)s;\n}\nif ((!%(z)s)\n || *PyArray_DIMS(%(z)s)!=4\n ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)\n ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)\n ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])\n || (PyArray_DIMS(%(z)s)[3] != dim_zz[1])\n )\n{\n {Py_XDECREF(%(z)s);}\n npy_intp dims[4] = {0,0,0,0};\n dims[0]=%(self_bsize)s;\n dims[1]=%(self_nkern)s;\n dims[2]=dim_zz[0];\n dims[3]=dim_zz[1];\n %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);\n}else{\n PyArray_FILLWBYTE((PyObject*)%(z)s,0);\n}\nz_arr = (PyArrayObject*) %(z)s;\n\n%(assert_size)s\n\nint Os[2];\nOs[0] = dim_im[0]-dim_ker0+1;\nOs[1] = dim_im[1]-dim_ker1+1;\n\n// allocate a temporary buffer for storing the inner product of each nth kernel row\n// with each row of an image\n{\n%(type)s * kbuf = (%(type)s *)malloc((Os[0] * NKERN + PyArray_Size((PyObject*)%(filtersflipped)s))* (npy_intp)sizeof(%(type)s));\nint kbufstride = NKERN;\n%(type)s * myfilters = kbuf + Os[0] * NKERN;\n\n//copy out filtersflipped into filters un-flipped format\n//std::cerr << \"__filling myfilters__\\\\n\";\nfor(int i=0;i < kerns_dim[0];++i){\n for(int j=0;j < kerns_dim[1];++j){\n for(int k=0;k < kerns_dim[2];++k){\n for(int l=0;l < kerns_dim[3];++l){\n %(type)s * ff = ((PyArray_NDIM(%(filtersflipped)s)) == 3)\n ? (%(type)s *)PyArray_GETPTR3(%(filtersflipped)s, i, kerns_dim[2]-1-k, kerns_dim[3]-1-l)\n : (%(type)s *)PyArray_GETPTR4(%(filtersflipped)s, i, j, kerns_dim[2]-1-k, kerns_dim[3]-1-l);\n myfilters[i * (kerns_dim[1]*kerns_dim[2]*kerns_dim[3])\n + j * (kerns_dim[2]*kerns_dim[3])\n + k * (kerns_dim[3])\n + l] = ff[0];\n //std::cerr << \" \" << ff[0];\n }\n //std::cerr << \"\\\\n\";\n }\n //std::cerr << \"(end of stack/batch \" <<j << \"/\" << i << \" ) \\\\n\";\n }\n}\n\n//std::cerr << \"-----new loop ----\\\\n\";\nfor(int b=0;b< %(self_bsize)s;b++){\n for (int img_col = 0; img_col < Os[1]; ++img_col){\n for (int filter_row = 0; filter_row < kerns_dim[2]; ++filter_row){\n for (int stackidx = 0; stackidx < %(self_imshp0)s; ++stackidx){\n %(type)s * img_colview =\n (%(type)s *)(PyArray_GETPTR4(img2d, b, stackidx, filter_row, img_col));\n %(type)s * filter_rows = myfilters + stackidx * (kerns_dim[2]*kerns_dim[3]) +\n filter_row * kerns_dim[3];\n //std::cerr << \"filterview offset: \" << filter_rows - myfilters << \"\\\\n\";\n\n char N = 'N'; char T = 'T';\n int Nz0 = Os[0];\n int Nz1 = NKERN;\n int K = kerns_dim[3];\n %(type)s alpha = 1.0;\n %(type)s beta = stackidx ? 1.0 : 0.0;\n int imgview_stride = dim_im[1];\n int filter_rows_stride =kerns_dim[1]*kerns_dim[2]*kerns_dim[3];\n //remember, Fortran wants a column-major interpretation\n assert(PyArray_STRIDES(img2d)[3] == (npy_intp)sizeof(%(type)s));\n\n if (0){\n std::cerr << \"b \" << b << \" img_col \" << img_col << \" filterrow \" << filter_row << \" stackidx \" <<stackidx << \"\\\\n\";\n std::cerr << \"colview (physical layout) stride: \" << imgview_stride << \"\\\\n\";\n for (int ii = 0; ii < Nz0; ++ii){\n for (int jj = 0; jj < K; ++jj){\n std::cerr << \" \" << img_colview[ii * imgview_stride + jj];\n }\n std::cerr << \"\\\\n\";\n }\n std::cerr << \"filterview (\"<<filter_row<<\"'th rows) stride: \" << filter_rows_stride << \"\\\\n\";\n for (int ii = 0; ii < Nz1; ++ii){\n for (int jj = 0; jj < K; ++jj){\n std::cerr << \" \" << filter_rows[ii * filter_rows_stride + jj];\n }\n std::cerr << \"\\\\n\";\n }\n\n std::cerr << Nz1 << \" \" << Nz0 << \" \" << K << \"\\\\n\" ;\n }\n\n %(gemm)s(&T, &N,\n &Nz1, &Nz0, &K,\n &alpha,\n filter_rows, &filter_rows_stride,\n img_colview, &imgview_stride,\n &beta, kbuf, &kbufstride);\n\n if (0){\n std::cerr << \"z (logical layout) beta\" << beta << \"\\\\n\";\n for (int ii = 0; ii < Nz0; ++ii){\n for (int jj = 0; jj < Nz1; ++jj){\n std::cerr << \" \" << kbuf[ii * kbufstride + jj];\n }\n std::cerr << \"\\\\n\";\n }\n }\n }\n // now kbuf the sum over the stack, put it into the outbuf\n for (int img_row = 0; img_row < Os[0]; ++img_row) {\n for (int kernel_idx = 0; kernel_idx < NKERN; ++kernel_idx) {\n %(type)s * z_p = (%(type)s *)PyArray_GETPTR4(%(z)s, b, kernel_idx, img_row, img_col);\n if (0)\n {\n if (b >= PyArray_DIMS(%(z)s)[0]) %(fail)s;\n if (kernel_idx >= PyArray_DIMS(%(z)s)[1]) %(fail)s;\n if (img_row >= PyArray_DIMS(%(z)s)[2]) %(fail)s;\n if (img_col >= PyArray_DIMS(%(z)s)[3]) %(fail)s;\n }\n z_p[0] += kbuf[img_row * kbufstride + kernel_idx];\n }\n }\n }\n }\n}\nfree(kbuf);\n}\nPy_XDECREF(img2d);\n\"\"\"\n\n\ndef gen_conv_code_unroll_batch_kern(d, unroll_bsize=1, unroll_ksize=1):\n \"\"\"\n c_code for ConvOp that unroll the batch size loop.\n\n \"\"\"\n assert unroll_bsize > 0 and unroll_ksize > 0\n if (\n \"unroll_bsize\" in d\n or \"unroll_ksize\" in d\n or \"unroll_iter\" in d\n or \"unroll_biter\" in d\n or \"unroll_kiter\" in d\n ):\n raise ValueError(\n \"We can't use this dictionary as we will overwrite some of its content\"\n )\n d = d.copy()\n\n d[\"unroll_bsize\"] = unroll_bsize\n d[\"unroll_ksize\"] = unroll_ksize\n\n def my_dup(st, size):\n s = \"\"\n for i in range(size):\n d[\"unroll_iter\"] = i\n s += st % d\n return s + \"\\n\"\n\n def my_dup2(st):\n s = \"\"\n iter = 0\n for i in range(unroll_bsize):\n d[\"unroll_biter\"] = i\n for j in range(unroll_ksize):\n d[\"unroll_kiter\"] = j\n d[\"unroll_iter\"] = iter\n iter += 1\n s += st % d\n return s + \"\\n\"\n\n ret = (\n \"\"\"\nconst int mode=%(mode)s;\nint typenum=0, typenum_f=0;\nPyArrayObject *ain1=NULL, *ain2=NULL, *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;;\nconst %(type)s fill_value = 0;\n\nint type_im=PyArray_TYPE(%(img2d)s);\nint type_ker=PyArray_TYPE(%(filtersflipped)s);\n\nnpy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};\nnpy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};\nconst npy_intp dim_ker0=%(self_kshp0)s;\nconst npy_intp dim_ker1=%(self_kshp1)s;\n\nPyArray_Dims img2d_shape;\nnpy_intp img2d_dim[4]={1,1,0,0};\nimg2d_shape.ptr=img2d_dim;\nimg2d_shape.len=4;\n\nPyArray_Dims kerns_shape;\nnpy_intp kerns_dim[4]={1,1,0,0};\nkerns_shape.ptr=kerns_dim;\nkerns_shape.len=4;\nPyObject *img2d=NULL, *contig, *filtersflipped=NULL;\n\nif(PyArray_NDIM(%(img2d)s)==2){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==3){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==4){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else {\n std::stringstream temp;\n temp << \"nddim=\"<<PyArray_NDIM(%(img2d)s);\n std::string param = temp.str();\n PyErr_SetString(PyExc_ValueError,\n (\"img don't have a good shape. \" + param).c_str());\n %(fail)s;\n}\n\nif(PyArray_NDIM(%(filtersflipped)s)==3){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else if(PyArray_NDIM(%(filtersflipped)s)==4){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else{\n PyErr_SetString(PyExc_ValueError, \"kernel don't have a good shape\");\n %(fail)s;\n}\n\n%(assert_size)s\n\nimg2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);\nimg2d_arr = (PyArrayObject*)img2d;\nif ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))\n || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){\n contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));\n Py_DECREF(img2d);\n img2d = contig;\n img2d_arr = (PyArrayObject*)img2d;\n if (!PyArray_ISCONTIGUOUS(img2d_arr)){\n PyErr_SetString(PyExc_ValueError, \"img2d isn't contiguous\");\n %(fail)s;\n }\n}\n\nfiltersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);\nfiltersflipped_arr = (PyArrayObject*)filtersflipped;\nif ((PyArray_STRIDES(filtersflipped_arr)[3] != (npy_intp)sizeof(%(type)s))\n || (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*(npy_intp)sizeof(%(type)s))){\n contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));\n Py_DECREF(filtersflipped);\n filtersflipped = contig;\n filtersflipped_arr = (PyArrayObject*)filtersflipped;\n if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){\n PyErr_SetString(PyExc_ValueError, \"filtersflipped isn't contiguous\");\n %(fail)s;\n }\n}\n\nif(mode != VALID && mode != FULL){\n PyErr_SetString(PyExc_ValueError, \"invalid mode, only full and valid are supported\"); %(fail)s;\n}\ntypenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);\ntypenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);\nif (typenum < 0) {PyErr_SetString(PyExc_ValueError, \"Invalid type\"); %(fail)s;}\nif (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, \"Input types must match\"); %(fail)s;}\n\nif (!img2d)\n{\n PyErr_SetString(PyExc_AssertionError, \"!img2d\");\n %(fail)s;\n}\nif (!filtersflipped)\n{\n PyErr_SetString(PyExc_AssertionError, \"!filtersflipped\");\n %(fail)s;\n}\n\nif ((!%(z)s)\n || *PyArray_DIMS(%(z)s)!=4\n ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)\n ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)\n ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])\n ||(PyArray_DIMS(%(z)s)[3] != dim_zz[1])\n ||!PyArray_ISCONTIGUOUS(%(z)s)\n )\n{\n {Py_XDECREF(%(z)s);}\n npy_intp dims[4] = {0,0,0,0};\n dims[0]=%(self_bsize)s;\n dims[1]=%(self_nkern)s;\n dims[2]=dim_zz[0];\n dims[3]=dim_zz[1];\n %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);\n}else{\n //PyArray_FILLWBYTE((PyObject*)%(z)s,0);\n}\nz_arr = (PyArrayObject*) %(z)s;\n\nint Os[2];\nOs[0]=%(self_outshp0)s;\nOs[1]=%(self_outshp1)s;\n\n//assertions\nif (!PyArray_ISCONTIGUOUS(%(z)s))\n{\n PyErr_SetString(PyExc_AssertionError, \"Output (%(z)s) not contiguous\");\n %(fail)s;\n}\n\nfor(int b=0;b< %(self_bsize)s ;b+=%(unroll_bsize)s){\n for(int n_kern=0;n_kern<%(self_nkern)s;n_kern+=%(unroll_ksize)s){\n\n\"\"\"\n % d\n )\n ret += my_dup2(\n \"%(type)s * __restrict__ out%(unroll_iter)s=(%(type)s *)(PyArray_GETPTR2(z_arr,b+%(unroll_biter)s,n_kern+%(unroll_kiter)s));\"\n )\n ret += my_dup(\n \"for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out%(unroll_iter)s[i] = 0;\",\n unroll_bsize * unroll_ksize,\n )\n ret += (\n \"\"\"\n for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){\n\"\"\"\n % d\n )\n ret += my_dup(\n \"const %(type)s * __restrict__ in%(unroll_iter)d=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b+%(unroll_iter)s,stack_size));\",\n unroll_bsize,\n )\n ret += my_dup(\n \"const %(type)s * __restrict__ hvals%(unroll_iter)s=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern+%(unroll_iter)s,stack_size));\",\n unroll_ksize,\n )\n ret += (\n \"\"\"\n\n int new_m;\n\n for (int iter_m=0; iter_m < Os[0]; iter_m++) {\n // Reposition index into input image based on requested output size\n int pos_m = iter_m*%(self_dx)s;//The position of the patch in the image\n if (mode == FULL) new_m = pos_m ;\n else new_m = (pos_m+dim_ker0-1);\n\n for (int iter_n=0; iter_n < Os[1]; iter_n++) { // loop over columns\n int pos_n=iter_n*%(self_dy)s;\n \"\"\"\n % d\n )\n ret += my_dup(\"%(type)s sum%(unroll_iter)s=0;\", unroll_bsize * unroll_ksize)\n ret += (\n \"\"\"\n\n // Sum over kernel, if index into image is out of bounds\n // fill with the value\n for (int j=0; j < dim_ker0; j++) {\n int ind0 = (new_m-j);\n\n if(mode==FULL){\n\"\"\"\n % d\n )\n ret += my_dup(\n \"const %(type)s * idx_hvals%(unroll_iter)s=&hvals%(unroll_iter)s[j*dim_ker1];\",\n unroll_ksize,\n )\n ret += (\n \"\"\"\n if(ind0 < 0 || ind0 >= dim_im[0]){\n if(fill_value!=0)\n for (int k=0; k < dim_ker1; k++) {\n\"\"\"\n % d\n )\n ret += my_dup2(\"sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;\")\n ret += (\n \"\"\"\n }\n }else{\n //do the part where kernel is to the right of the img\n\n int k=0,max_k=max((int)(pos_n-dim_im[1])+1,0);\n if(fill_value!=0){\n\n for(k=0;k<max_k;k++){\n\"\"\"\n % d\n )\n ret += my_dup2(\"sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;\")\n ret += (\n \"\"\"\n }\n }else {k=max_k;}\n\n //do the part where the kernel is on the img\n max_k=min(pos_n+1,(int)dim_ker1);\n\"\"\"\n % d\n )\n ret += my_dup(\n \"const %(type)s * idx_in%(unroll_iter)s=&in%(unroll_iter)s[ind0*dim_im[1]];\",\n unroll_bsize,\n )\n ret += (\n \"\"\"\n for (int ind1=pos_n-k; k<max_k; k++,ind1--) {\n\n\"\"\"\n % d\n )\n ret += my_dup2(\n \"sum%(unroll_iter)s+= idx_hvals%(unroll_kiter)s[k] * idx_in%(unroll_biter)s[ind1];\"\n )\n ret += (\n \"\"\"\n }\n //do the part to the left of the img\n if(fill_value!=0)\n for(;k<dim_ker1;k++){\n\"\"\"\n % d\n )\n ret += my_dup2(\"sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;\")\n ret += (\n \"\"\"\n }\n }\n }else{//valid mode\n\"\"\"\n % d\n )\n ret += my_dup(\n \"const %(type)s* idx_in%(unroll_iter)s=&in%(unroll_iter)s[ind0*dim_im[1]];\",\n unroll_bsize,\n )\n ret += my_dup(\n \"const %(type)s* idx_hvals%(unroll_iter)s=&hvals%(unroll_iter)s[j*dim_ker1];\",\n unroll_ksize,\n )\n ret += (\n \"\"\"\n int new_n = (pos_n+dim_ker1-1);\n\n for (int k=0,last=new_n; k < dim_ker1; k++,last--) {\n\"\"\"\n % d\n )\n ret += my_dup2(\n \"sum%(unroll_iter)s+=idx_hvals%(unroll_kiter)s[k]*idx_in%(unroll_biter)s[last];\"\n )\n ret += (\n \"\"\"\n }\n }\n\n }//for j\n\"\"\"\n % d\n )\n ret += my_dup(\n \"out%(unroll_iter)s[iter_m*dim_zz[1]+iter_n] %(affectation)s sum%(unroll_iter)s;\",\n unroll_bsize * unroll_ksize,\n )\n ret += \"\"\"\n }//for n\n }//for m\n }//for stack_size\n }//for n_kern\n}//for b\nPy_XDECREF(img2d);\nPy_XDECREF(filtersflipped);\n\"\"\"\n return ret\n\n\n_conv_op_code_unroll_patch = \"\"\"\nconst int mode=%(mode)s;\nint typenum=0, typenum_f=0;\nPyArrayObject *ain1=NULL, *ain2=NULL, *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;\nconst %(type)s fill_value = 0;//only value of 0 are currently tested and correctly implemented\n\nint type_im=PyArray_TYPE(%(img2d)s);\nint type_ker=PyArray_TYPE(%(filtersflipped)s);\n\nconst npy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};\n//The following line caused gcc 4.3.0 20080428 (Red Hat 4.3.0-8) to crash\n//const npy_intp dim_ker[2]={%(self_kshp0)s,%(self_kshp1)s};\n// The next line had gcc don't crash.\nconst npy_intp dim_ker0=%(self_kshp0)s;\nconst npy_intp dim_ker1=%(self_kshp1)s;\n%(dim_zz_const)s npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};\n\n%(dim_zz_affect)s\nPyArray_Dims img2d_shape;\nnpy_intp img2d_dim[4]={1,1,0,0};\nimg2d_shape.ptr=img2d_dim;\nimg2d_shape.len=4;\n\nPyArray_Dims kerns_shape;\nnpy_intp kerns_dim[4]={1,1,0,0};\nkerns_shape.ptr=kerns_dim;\nkerns_shape.len=4;\nPyObject *img2d=NULL, *contig, *filtersflipped=NULL;\n\nif(PyArray_NDIM(%(img2d)s)==2){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==3){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else if(PyArray_NDIM(%(img2d)s)==4){\n img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];\n img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];\n img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];\n img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];\n}else {\n PyErr_Format(PyExc_ValueError,\n \"image don't have a good number of dimensions %%d. \", PyArray_NDIM(%(filtersflipped)s));\n %(fail)s;\n}\n\nif(PyArray_NDIM(%(filtersflipped)s)==3){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else if(PyArray_NDIM(%(filtersflipped)s)==4){\n kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];\n kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];\n kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];\n kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];\n}else{\n PyErr_Format(PyExc_ValueError,\n \"kernel don't have a good number of dimensions %%d. \", PyArray_NDIM(%(filtersflipped)s));\n %(fail)s;\n}\n\n%(assert_size)s\n\nimg2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);\nimg2d_arr = (PyArrayObject*)img2d;\nif ((PyArray_STRIDES(img2d_arr)[3] != sizeof(%(type)s))\n || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*sizeof(%(type)s))){\n contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));\n Py_DECREF(img2d);\n img2d = contig;\n img2d_arr = (PyArrayObject*)img2d;\n if (!PyArray_ISCONTIGUOUS(img2d_arr)){\n PyErr_SetString(PyExc_ValueError, \"img2d isn't contiguous\");\n %(fail)s;\n }\n}\n\nfiltersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);\nfiltersflipped_arr = (PyArrayObject*)filtersflipped;\nif ((PyArray_STRIDES(filtersflipped_arr)[3] != sizeof(%(type)s))\n || (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*sizeof(%(type)s))){\n contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));\n Py_DECREF(filtersflipped);\n filtersflipped = contig;\n filtersflipped_arr = (PyArrayObject*)filtersflipped;\n if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){\n PyErr_SetString(PyExc_ValueError, \"filtersflipped isn't contiguous\");\n %(fail)s;\n }\n}\n\nif(mode != VALID && mode != FULL){\n PyErr_SetString(PyExc_ValueError, \"invalid mode, only full and valid are supported\"); %(fail)s;\n}\n\nif(dim_zz[0]<=0 || dim_zz[1]<=0){\nPyErr_Format(PyExc_ValueError,\n \"Output dimensions are not valid %%ldx%%ld\",(long int)dim_zz[0],(long int)dim_zz[1]);\n %(fail)s;\n}\n\ntypenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);\ntypenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);\nif (typenum < 0) {PyErr_SetString(PyExc_ValueError, \"Invalid type\"); %(fail)s;}\nif (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, \"Input types must match\"); %(fail)s;}\n\nif (!img2d) %(fail)s;\nif (!filtersflipped) %(fail)s;\nif ((!%(z)s)\n || *PyArray_DIMS(%(z)s)!=4\n ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)\n ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)\n ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])\n || (PyArray_DIMS(%(z)s)[3] != dim_zz[1])\n )\n{\n if (%(z)s) Py_DECREF(%(z)s);\n npy_intp dims[4] = {0,0,0,0};\n if(!dims) %(fail)s;\n dims[0]=%(self_bsize)s;\n dims[1]=%(self_nkern)s;\n dims[2]=dim_zz[0];\n dims[3]=dim_zz[1];\n %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);\n}else{\n //PyArray_FILLWBYTE((PyObject*)%(z)s,0);\n}\nz_arr = (PyArrayObject*) %(z)s;\n\n// assert the output is C-contiguous\nif (!PyArray_ISCONTIGUOUS(%(z)s))\n{\n PyErr_SetString(PyExc_AssertionError, \"Output (%(z)s) not contiguous\");\n %(fail)s;\n}\n\n//The if on the number of loop make a speed up for small array.\n//with g++ 4.5.1. The compiler should be smart enough to do this himself!\n#pragma omp parallel for schedule(static) if(%(self_bsize)s * %(self_nkern)s > 1)\n// We merge the 2 loop into one to make it easier to parallelize on both\n// This is the equivalent of those 2 lines.\n//for(int b=0;b< %(self_bsize)s;b++){\n// for(int n_kern=0;n_kern<%(self_nkern)s;n_kern++){\nfor(int batch_kern_idx=0;\n batch_kern_idx < %(self_bsize)s * %(self_nkern)s;\n batch_kern_idx++){\n int b = batch_kern_idx / %(self_nkern)s;\n int n_kern = batch_kern_idx %% %(self_nkern)s;\n\n %(type)s * __restrict__ out=(%(type)s *)(PyArray_GETPTR2(z_arr,b,n_kern));\n for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out[i] = 0;\n\n for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){\n\n const %(type)s * __restrict__ in=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b,stack_size));\n const %(type)s * __restrict__ hvals=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern,stack_size));\n\n int new_m;\n\n for (int iter_m=0; iter_m < dim_zz[0]; iter_m++) {\n // Reposition index into input image based on requested output size\n int pos_m = iter_m*%(self_dx)s;//The position of the patch in the image\n if (mode == FULL) new_m = pos_m ;\n else new_m = (pos_m+dim_ker0-1);\n\n for (int iter_n=0; iter_n < dim_zz[1]; iter_n++) { // loop over columns\n int pos_n=iter_n*%(self_dy)s;\n %(type)s sum=0;\n %(type)s sum2=0;\n %(type)s sum3=0;\n %(type)s sum4=0;\n int nb_sum=0;\n // Sum over kernel, if index into image is out of bounds\n // fill with the value\n for (int j=0; j < dim_ker0; j++) {\n int ind0 = (new_m-j);\n\n if(mode==FULL){\n const %(type)s * idx_hvals=&hvals[j*dim_ker1];\n if(ind0 < 0 || ind0 >= dim_im[0]){\n if(fill_value!=0)\n for (int k=0; k < dim_ker1; k++) {\n sum+= idx_hvals[k] * fill_value;\n }\n }else{\n //do the part where kernel is to the right of the img\n int k=0,max_k=max((int)(pos_n-dim_im[1])+1,0);\n if(fill_value!=0){\n\n for(k=0;k<max_k;k++){\n sum+= idx_hvals[k]*fill_value;\n }\n }else {k=max_k;}\n\n //do the part where the kernel is on the img\n max_k=min(pos_n+1,(int)dim_ker1);\n const %(type)s * idx_in=&in[ind0*dim_im[1]];\n\n if(iter_n + 4*%(self_dy)s < dim_zz[1]\n && iter_n>dim_ker1-1\n && iter_n<dim_im[1]-dim_ker1+1-3){\n nb_sum=4;\n for (int ind1=pos_n-k; k<max_k; k++,ind1--) {\n sum+=idx_hvals[k]*idx_in[ind1];\n sum2+=idx_hvals[k]*idx_in[ind1+%(self_dy)s];\n sum3+=idx_hvals[k]*idx_in[ind1+2*%(self_dy)s];\n sum4+=idx_hvals[k]*idx_in[ind1+3*%(self_dy)s];\n }\n }else if(iter_n + 2*%(self_dy)s < dim_zz[1]\n && iter_n>dim_ker1-1\n && iter_n<dim_im[1]-dim_ker1+1){\n nb_sum=2;\n for (int ind1=pos_n-k; k<max_k; k++,ind1--) {\n sum+=idx_hvals[k]*idx_in[ind1];\n sum2+=idx_hvals[k]*idx_in[ind1+%(self_dy)s];\n }\n }else{\n nb_sum=1;\n /*\n %(type)s sum_=0;\n if((k-max_k) & 0x1 != 0){\n sum+= idx_hvals[k] * idx_in[pos_n-k];\n }\n for (int ind1=pos_n-k; k<max_k; k+=2,ind1-=2) {\n sum+= idx_hvals[k] * idx_in[ind1];\n sum_+= idx_hvals[k+1] * idx_in[ind1-1];\n }\n sum+=sum_;\n */\n for (int ind1=pos_n-k; k<max_k; k++,ind1--) {\n sum+=idx_hvals[k]*idx_in[ind1];\n }\n }\n //do the part to the left of the img\n if(fill_value!=0)\n for(;k<dim_ker1;k++) sum+= idx_hvals[k]*fill_value;\n }\n }else{//valid mode\n const %(type)s* idx_in=&in[ind0*dim_im[1]];\n const %(type)s* idx_hvals=&hvals[j*dim_ker1];\n if(iter_n + 4*%(self_dy)s < dim_zz[1]){\n nb_sum=4;\n for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {\n sum+=idx_hvals[k]*idx_in[im_idx];\n sum2+=idx_hvals[k]*idx_in[im_idx+%(self_dy)s];\n sum3+=idx_hvals[k]*idx_in[im_idx+2*%(self_dy)s];\n sum4+=idx_hvals[k]*idx_in[im_idx+3*%(self_dy)s];\n }\n }else if(iter_n + 2*%(self_dy)s < dim_zz[1]){\n nb_sum=2;\n for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {\n sum+=idx_hvals[k]*idx_in[im_idx];\n sum2+=idx_hvals[k]*idx_in[im_idx+%(self_dy)s];\n }\n }else{\n nb_sum=1;\n for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {\n sum+=idx_hvals[k]*idx_in[im_idx];\n }\n }\n }//else valid mode\n }//for j\n switch(nb_sum){\n case 4: out[iter_m*dim_zz[1]+iter_n+3] %(affectation)s sum4;\n case 3: out[iter_m*dim_zz[1]+iter_n+2] %(affectation)s sum3;\n case 2: out[iter_m*dim_zz[1]+iter_n+1] %(affectation)s sum2;\n case 1: out[iter_m*dim_zz[1]+iter_n] %(affectation)s sum;\n }\n iter_n+=nb_sum-1;\n }//for iter_n\n }//for iter_m\n }//for stack_size\n}//for b and n_kern\n\nPy_XDECREF(img2d);\nPy_XDECREF(filtersflipped);\n\"\"\"\n", "import numpy as np\n\nimport aesara\nfrom aesara.graph.type import Type\n\n\ngen_states_keys = {\n \"MT19937\": ([\"state\"], [\"key\", \"pos\"]),\n \"PCG64\": ([\"state\", \"has_uint32\", \"uinteger\"], [\"state\", \"inc\"]),\n \"Philox\": (\n [\"state\", \"buffer\", \"buffer_pos\", \"has_uint32\", \"uinteger\"],\n [\"counter\", \"key\"],\n ),\n \"SFC64\": ([\"state\", \"has_uint32\", \"uinteger\"], [\"state\"]),\n}\n\n# We map bit generators to an integer index so that we can avoid using strings\nnumpy_bit_gens = {0: \"MT19937\", 1: \"PCG64\", 2: \"Philox\", 3: \"SFC64\"}\n\n\nclass RandomType(Type):\n r\"\"\"A Type wrapper for `numpy.random.Generator` and `numpy.random.RandomState`.\"\"\"\n\n @classmethod\n def filter(cls, data, strict=False, allow_downcast=None):\n if cls.is_valid_value(data, strict):\n return data\n else:\n raise TypeError()\n\n @staticmethod\n def may_share_memory(a, b):\n return a._bit_generator is b._bit_generator\n\n\nclass RandomStateType(RandomType):\n r\"\"\"A Type wrapper for `numpy.random.RandomState`.\n\n The reason this exists (and `Generic` doesn't suffice) is that\n `RandomState` objects that would appear to be equal do not compare equal\n with the ``==`` operator.\n\n This `Type` also works with a ``dict`` derived from\n `RandomState.get_state(legacy=False)`, unless the ``strict`` argument to `Type.filter`\n is explicitly set to ``True``.\n\n \"\"\"\n\n def __repr__(self):\n return \"RandomStateType\"\n\n @staticmethod\n def is_valid_value(a, strict):\n if isinstance(a, np.random.RandomState):\n return True\n\n if not strict and isinstance(a, dict):\n gen_keys = [\"bit_generator\", \"gauss\", \"has_gauss\", \"state\"]\n state_keys = [\"key\", \"pos\"]\n\n for key in gen_keys:\n if key not in a:\n return False\n\n for key in state_keys:\n if key not in a[\"state\"]:\n return False\n\n state_key = a[\"state\"][\"key\"]\n if state_key.shape == (624,) and state_key.dtype == np.uint32:\n return True\n\n return False\n\n @staticmethod\n def values_eq(a, b):\n sa = a if isinstance(a, dict) else a.get_state(legacy=False)\n sb = b if isinstance(b, dict) else b.get_state(legacy=False)\n\n def _eq(sa, sb):\n for key in sa:\n if isinstance(sa[key], dict):\n if not _eq(sa[key], sb[key]):\n return False\n elif isinstance(sa[key], np.ndarray):\n if not np.array_equal(sa[key], sb[key]):\n return False\n else:\n if sa[key] != sb[key]:\n return False\n\n return True\n\n return _eq(sa, sb)\n\n def __eq__(self, other):\n return type(self) == type(other)\n\n def __hash__(self):\n return hash(type(self))\n\n\n# Register `RandomStateType`'s C code for `ViewOp`.\naesara.compile.register_view_op_c_code(\n RandomStateType,\n \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n \"\"\",\n 1,\n)\n\nrandom_state_type = RandomStateType()\n\n\nclass RandomGeneratorType(RandomType):\n r\"\"\"A Type wrapper for `numpy.random.Generator`.\n\n The reason this exists (and `Generic` doesn't suffice) is that\n `Generator` objects that would appear to be equal do not compare equal\n with the ``==`` operator.\n\n This `Type` also works with a ``dict`` derived from\n `Generator.__get_state__`, unless the ``strict`` argument to `Type.filter`\n is explicitly set to ``True``.\n\n \"\"\"\n\n def __repr__(self):\n return \"RandomGeneratorType\"\n\n @staticmethod\n def is_valid_value(a, strict):\n if isinstance(a, np.random.Generator):\n return True\n\n if not strict and isinstance(a, dict):\n if \"bit_generator\" not in a:\n return False\n else:\n bit_gen_key = a[\"bit_generator\"]\n\n if hasattr(bit_gen_key, \"_value\"):\n bit_gen_key = int(bit_gen_key._value)\n bit_gen_key = numpy_bit_gens[bit_gen_key]\n\n gen_keys, state_keys = gen_states_keys[bit_gen_key]\n\n for key in gen_keys:\n if key not in a:\n return False\n\n for key in state_keys:\n if key not in a[\"state\"]:\n return False\n\n return True\n\n return False\n\n @staticmethod\n def values_eq(a, b):\n sa = a if isinstance(a, dict) else a.__getstate__()\n sb = b if isinstance(b, dict) else b.__getstate__()\n\n def _eq(sa, sb):\n for key in sa:\n if isinstance(sa[key], dict):\n if not _eq(sa[key], sb[key]):\n return False\n elif isinstance(sa[key], np.ndarray):\n if not np.array_equal(sa[key], sb[key]):\n return False\n else:\n if sa[key] != sb[key]:\n return False\n\n return True\n\n return _eq(sa, sb)\n\n def __eq__(self, other):\n return type(self) == type(other)\n\n def __hash__(self):\n return hash(type(self))\n\n\n# Register `RandomGeneratorType`'s C code for `ViewOp`.\naesara.compile.register_view_op_c_code(\n RandomGeneratorType,\n \"\"\"\n Py_XDECREF(%(oname)s);\n %(oname)s = %(iname)s;\n Py_XINCREF(%(oname)s);\n \"\"\",\n 1,\n)\n\nrandom_generator_type = RandomGeneratorType()\n", "\"\"\"\nIf you have two expressions containing unification variables, these expressions\ncan be \"unified\" if there exists an assignment to all unification variables\nsuch that the two expressions are equal.\n\nFor instance, [5, A, B] and [A, C, 9] can be unified if A=C=5 and B=9,\nyielding [5, 5, 9].\n[5, [A, B]] and [A, [1, 2]] cannot be unified because there is no value for A\nthat satisfies the constraints. That's useful for pattern matching.\n\n\"\"\"\n\nfrom collections.abc import Mapping\nfrom numbers import Number\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nfrom cons.core import ConsError, _car, _cdr\nfrom etuples import apply, etuple, etuplize\nfrom etuples.core import ExpressionTuple\nfrom unification.core import _unify, assoc\nfrom unification.utils import transitive_get as walk\nfrom unification.variable import Var, isvar, var\n\nfrom aesara.graph.basic import Constant, Variable\nfrom aesara.graph.op import Op\nfrom aesara.graph.type import Type\n\n\ndef eval_if_etuple(x):\n if isinstance(x, ExpressionTuple):\n return x.evaled_obj\n return x\n\n\nclass ConstrainedVar(Var):\n \"\"\"A logical variable with a constraint.\n\n These will unify with other `Var`s regardless of the constraints.\n \"\"\"\n\n __slots__ = (\"constraint\",)\n\n def __new__(cls, constraint, token=None, prefix=\"\"):\n if token is None:\n token = f\"{prefix}_{Var._id}\"\n Var._id += 1\n\n key = (token, constraint)\n obj = cls._refs.get(key, None)\n\n if obj is None:\n obj = object.__new__(cls)\n obj.token = token\n obj.constraint = constraint\n cls._refs[key] = obj\n\n return obj\n\n def __eq__(self, other):\n if type(self) == type(other):\n return self.token == other.token and self.constraint == other.constraint\n return NotImplemented\n\n def __hash__(self):\n return hash((type(self), self.token, self.constraint))\n\n def __str__(self):\n return f\"~{self.token} [{self.constraint}]\"\n\n def __repr__(self):\n return f\"ConstrainedVar({repr(self.constraint)}, {self.token})\"\n\n\ndef car_Variable(x):\n if x.owner:\n return x.owner.op\n else:\n raise ConsError(\"Not a cons pair.\")\n\n\n_car.add((Variable,), car_Variable)\n\n\ndef cdr_Variable(x):\n if x.owner:\n x_e = etuple(_car(x), *x.owner.inputs, evaled_obj=x)\n else:\n raise ConsError(\"Not a cons pair.\")\n\n return x_e[1:]\n\n\n_cdr.add((Variable,), cdr_Variable)\n\n\ndef car_Op(x):\n if hasattr(x, \"__props__\"):\n return type(x)\n\n raise ConsError(\"Not a cons pair.\")\n\n\n_car.add((Op,), car_Op)\n\n\ndef cdr_Op(x):\n if not hasattr(x, \"__props__\"):\n raise ConsError(\"Not a cons pair.\")\n\n x_e = etuple(\n _car(x),\n *[getattr(x, p) for p in getattr(x, \"__props__\", ())],\n evaled_obj=x,\n )\n return x_e[1:]\n\n\n_cdr.add((Op,), cdr_Op)\n\n\ndef car_Type(x):\n return type(x)\n\n\n_car.add((Type,), car_Type)\n\n\ndef cdr_Type(x):\n x_e = etuple(\n _car(x), *[getattr(x, p) for p in getattr(x, \"__props__\", ())], evaled_obj=x\n )\n return x_e[1:]\n\n\n_cdr.add((Type,), cdr_Type)\n\n\ndef apply_Op_ExpressionTuple(op, etuple_arg):\n res = op.make_node(*etuple_arg)\n\n try:\n return res.default_output()\n except ValueError:\n return res.outputs\n\n\napply.add((Op, ExpressionTuple), apply_Op_ExpressionTuple)\n\n\ndef _unify_etuplize_first_arg(u, v, s):\n try:\n u_et = etuplize(u, shallow=True)\n yield _unify(u_et, v, s)\n except TypeError:\n yield False\n return\n\n\n_unify.add((Op, ExpressionTuple, Mapping), _unify_etuplize_first_arg)\n_unify.add(\n (ExpressionTuple, Op, Mapping), lambda u, v, s: _unify_etuplize_first_arg(v, u, s)\n)\n\n_unify.add((Type, ExpressionTuple, Mapping), _unify_etuplize_first_arg)\n_unify.add(\n (ExpressionTuple, Type, Mapping), lambda u, v, s: _unify_etuplize_first_arg(v, u, s)\n)\n\n\ndef _unify_Variable_Variable(u, v, s):\n # Avoid converting to `etuple`s, when possible\n if u == v:\n yield s\n return\n\n if not u.owner and not v.owner:\n yield False\n return\n\n yield _unify(\n etuplize(u, shallow=True) if u.owner else u,\n etuplize(v, shallow=True) if v.owner else v,\n s,\n )\n\n\n_unify.add((Variable, Variable, Mapping), _unify_Variable_Variable)\n\n\ndef _unify_Constant_Constant(u, v, s):\n # XXX: This ignores shape and type differences. It's only implemented this\n # way for backward compatibility\n if np.array_equiv(u.data, v.data):\n yield s\n else:\n yield False\n\n\n_unify.add((Constant, Constant, Mapping), _unify_Constant_Constant)\n\n\ndef _unify_Variable_ExpressionTuple(u, v, s):\n # `Constant`s are \"atomic\"\n if not u.owner:\n yield False\n return\n\n yield _unify(etuplize(u, shallow=True), v, s)\n\n\n_unify.add(\n (Variable, ExpressionTuple, Mapping),\n _unify_Variable_ExpressionTuple,\n)\n_unify.add(\n (ExpressionTuple, Variable, Mapping),\n lambda u, v, s: _unify_Variable_ExpressionTuple(v, u, s),\n)\n\n\n@_unify.register(ConstrainedVar, (ConstrainedVar, Var, object), Mapping)\ndef _unify_ConstrainedVar_object(u, v, s):\n u_w = walk(u, s)\n\n if isvar(v):\n v_w = walk(v, s)\n else:\n v_w = v\n\n if u_w == v_w:\n yield s\n elif isvar(u_w):\n if (\n not isvar(v_w)\n and isinstance(u_w, ConstrainedVar)\n and not u_w.constraint(eval_if_etuple(v_w))\n ):\n yield False\n return\n yield assoc(s, u_w, v_w)\n elif isvar(v_w):\n if (\n not isvar(u_w)\n and isinstance(v_w, ConstrainedVar)\n and not v_w.constraint(eval_if_etuple(u_w))\n ):\n yield False\n return\n yield assoc(s, v_w, u_w)\n else:\n yield _unify(u_w, v_w, s)\n\n\n_unify.add((object, ConstrainedVar, Mapping), _unify_ConstrainedVar_object)\n\n\ndef convert_strs_to_vars(\n x: Union[Tuple, str, Dict], var_map: Optional[Dict[str, Var]] = None\n) -> Union[ExpressionTuple, Var]:\n r\"\"\"Convert tuples and strings to `etuple`\\s and logic variables, respectively.\n\n Constrained logic variables are specified via `dict`s with the keys\n `\"pattern\"`, which specifies the logic variable as a string, and\n `\"constraint\"`, which provides the `Callable` constraint.\n \"\"\"\n if var_map is None:\n var_map = {}\n\n def _convert(y):\n if isinstance(y, str):\n v = var_map.get(y, var(y))\n var_map[y] = v\n return v\n elif isinstance(y, dict):\n pattern = y[\"pattern\"]\n if not isinstance(pattern, str):\n raise TypeError(\n \"Constraints can only be assigned to logic variables (i.e. strings)\"\n )\n constraint = y[\"constraint\"]\n v = var_map.get(pattern, ConstrainedVar(constraint, pattern))\n var_map[pattern] = v\n return v\n elif isinstance(y, tuple):\n return etuple(*tuple(_convert(e) for e in y))\n elif isinstance(y, (Number, np.ndarray)):\n from aesara.tensor import as_tensor_variable\n\n return as_tensor_variable(y)\n return y\n\n return _convert(x)\n", "import numpy as np\n\nimport aesara\nimport aesara.tensor.basic as basic\nfrom aesara import function\nfrom aesara.compile.io import In\nfrom aesara.misc.safe_asarray import _asarray\nfrom aesara.tensor.basic import (\n _convert_to_float32,\n _convert_to_float64,\n _convert_to_int8,\n _convert_to_int16,\n _convert_to_int32,\n _convert_to_int64,\n cast,\n)\nfrom aesara.tensor.type import (\n TensorType,\n bvector,\n dmatrix,\n dvector,\n fvector,\n ivector,\n zmatrix,\n)\n\n\nclass TestCasting:\n def test_0(self):\n for op_fn in [_convert_to_int32, _convert_to_float32, _convert_to_float64]:\n for type_fn in bvector, ivector, fvector, dvector:\n x = type_fn()\n f = function([x], op_fn(x))\n\n xval = _asarray(np.random.rand(10) * 10, dtype=type_fn.dtype)\n yval = f(xval)\n assert (\n str(yval.dtype)\n == op_fn.scalar_op.output_types_preference.spec[0].dtype\n )\n\n def test_illegal(self):\n try:\n x = zmatrix()\n function([x], cast(x, \"float64\"))(np.ones((2, 3), dtype=\"complex128\"))\n except TypeError:\n return\n assert 0\n\n def test_basic(self):\n for type1 in [\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ]:\n x = TensorType(dtype=type1, broadcastable=(False,))()\n for type2, converter in zip(\n [\"int8\", \"int16\", \"int32\", \"int64\", \"float32\", \"float64\"],\n [\n _convert_to_int8,\n _convert_to_int16,\n _convert_to_int32,\n _convert_to_int64,\n _convert_to_float32,\n _convert_to_float64,\n ],\n ):\n y = converter(x)\n f = function([In(x, strict=True)], y)\n a = np.arange(10, dtype=type1)\n b = f(a)\n assert np.all(b == np.arange(10, dtype=type2))\n\n def test_convert_to_complex(self):\n val64 = np.ones(3, dtype=\"complex64\") + 0.5j\n val128 = np.ones(3, dtype=\"complex128\") + 0.5j\n\n vec64 = TensorType(\"complex64\", (False,))()\n vec128 = TensorType(\"complex128\", (False,))()\n\n f = function([vec64], basic._convert_to_complex128(vec64))\n # we need to compare with the same type.\n assert vec64.type.values_eq_approx(val128, f(val64))\n\n f = function([vec128], basic._convert_to_complex128(vec128))\n assert vec64.type.values_eq_approx(val128, f(val128))\n\n f = function([vec64], basic._convert_to_complex64(vec64))\n assert vec64.type.values_eq_approx(val64, f(val64))\n\n f = function([vec128], basic._convert_to_complex64(vec128))\n assert vec128.type.values_eq_approx(val64, f(val128))\n\n # upcasting to complex128\n for t in [\"int8\", \"int16\", \"int32\", \"int64\", \"float32\", \"float64\"]:\n a = aesara.shared(np.ones(3, dtype=t))\n b = aesara.shared(np.ones(3, dtype=\"complex128\"))\n f = function([], basic._convert_to_complex128(a))\n assert a.type.values_eq_approx(b.get_value(), f())\n\n # upcasting to complex64\n for t in [\"int8\", \"int16\", \"int32\", \"int64\", \"float32\"]:\n a = aesara.shared(np.ones(3, dtype=t))\n b = aesara.shared(np.ones(3, dtype=\"complex64\"))\n f = function([], basic._convert_to_complex64(a))\n assert a.type.values_eq_approx(b.get_value(), f())\n\n # downcast to complex64\n for t in [\"float64\"]:\n a = aesara.shared(np.ones(3, dtype=t))\n b = aesara.shared(np.ones(3, dtype=\"complex64\"))\n f = function([], basic._convert_to_complex64(a))\n assert a.type.values_eq_approx(b.get_value(), f())\n\n def test_bug_complext_10_august_09(self):\n v0 = dmatrix()\n v1 = basic._convert_to_complex128(v0)\n\n inputs = [v0]\n outputs = [v1]\n f = function(inputs, outputs)\n i = np.zeros((2, 2))\n assert (f(i) == np.zeros((2, 2))).all()\n", "import numpy as np\nimport pytest\n\nfrom aesara import config, function\nfrom aesara.compile.mode import Mode\nfrom aesara.graph.optdb import OptimizationQuery\nfrom aesara.tensor.random.utils import RandomStream, broadcast_params\nfrom aesara.tensor.type import matrix, tensor\nfrom tests import unittest_tools as utt\n\n\[email protected](scope=\"module\", autouse=True)\ndef set_aesara_flags():\n opts = OptimizationQuery(include=[None], exclude=[])\n py_mode = Mode(\"py\", opts)\n with config.change_flags(mode=py_mode, compute_test_value=\"warn\"):\n yield\n\n\ndef test_broadcast_params():\n\n ndims_params = [0, 0]\n\n mean = np.array([0, 1, 2])\n cov = np.array(1e-6)\n params = [mean, cov]\n res = broadcast_params(params, ndims_params)\n assert np.array_equal(res[0], mean)\n assert np.array_equal(res[1], np.broadcast_to(cov, (3,)))\n\n ndims_params = [1, 2]\n\n mean = np.r_[1, 2, 3]\n cov = np.stack([np.eye(3) * 1e-5, np.eye(3) * 1e-4])\n params = [mean, cov]\n res = broadcast_params(params, ndims_params)\n assert np.array_equal(res[0], np.broadcast_to(mean, (2, 3)))\n assert np.array_equal(res[1], cov)\n\n mean = np.stack([np.r_[0, 0, 0], np.r_[1, 1, 1]])\n cov = np.arange(3 * 3).reshape((3, 3))\n params = [mean, cov]\n res = broadcast_params(params, ndims_params)\n assert np.array_equal(res[0], mean)\n assert np.array_equal(res[1], np.broadcast_to(cov, (2, 3, 3)))\n\n mean = np.stack([np.r_[0, 0, 0], np.r_[1, 1, 1]])\n cov = np.stack(\n [np.arange(3 * 3).reshape((3, 3)), np.arange(3 * 3).reshape((3, 3)) * 10]\n )\n params = [mean, cov]\n res = broadcast_params(params, ndims_params)\n assert np.array_equal(res[0], mean)\n assert np.array_equal(res[1], cov)\n\n mean = np.array([[1, 2, 3]])\n cov = np.stack([np.eye(3) * 1e-5, np.eye(3) * 1e-4])\n params = [mean, cov]\n res = broadcast_params(params, ndims_params)\n assert np.array_equal(res[0], np.array([[1, 2, 3], [1, 2, 3]]))\n assert np.array_equal(res[1], cov)\n\n mean = np.array([[0], [10], [100]])\n cov = np.diag(np.array([1e-6]))\n params = [mean, cov]\n res = broadcast_params(params, ndims_params)\n assert np.array_equal(res[0], mean)\n assert np.array_equal(res[1], np.broadcast_to(cov, (3, 1, 1)))\n\n # Try it in Aesara\n with config.change_flags(compute_test_value=\"raise\"):\n mean = tensor(config.floatX, [False, True])\n mean.tag.test_value = np.array([[0], [10], [100]], dtype=config.floatX)\n cov = matrix()\n cov.tag.test_value = np.diag(np.array([1e-6], dtype=config.floatX))\n params = [mean, cov]\n res = broadcast_params(params, ndims_params)\n assert np.array_equal(res[0].get_test_value(), mean.get_test_value())\n assert np.array_equal(\n res[1].get_test_value(), np.broadcast_to(cov.get_test_value(), (3, 1, 1))\n )\n\n\nclass TestSharedRandomStream:\n def test_tutorial(self):\n srng = RandomStream(seed=234)\n rv_u = srng.uniform(0, 1, size=(2, 2))\n rv_n = srng.normal(0, 1, size=(2, 2))\n\n f = function([], rv_u)\n # Disabling `default_updates` means that we have to pass\n # `srng.state_updates` to `function` manually, if we want the shared\n # state to change\n g = function([], rv_n, no_default_updates=True)\n nearly_zeros = function([], rv_u + rv_u - 2 * rv_u)\n\n assert np.all(f() != f())\n assert np.all(g() == g())\n assert np.all(abs(nearly_zeros()) < 1e-5)\n assert isinstance(rv_u.rng.get_value(borrow=True), np.random.Generator)\n\n @pytest.mark.parametrize(\"rng_ctor\", [np.random.RandomState, np.random.default_rng])\n def test_basics(self, rng_ctor):\n random = RandomStream(seed=utt.fetch_seed(), rng_ctor=rng_ctor)\n\n with pytest.raises(ValueError):\n random.uniform(0, 1, size=(2, 2), rng=np.random.default_rng(23))\n\n with pytest.raises(AttributeError):\n random.blah\n\n # test if standard_normal is available in the namespace, See: GH issue #528\n random.standard_normal\n\n with pytest.raises(AttributeError):\n np_random = RandomStream(namespace=np, rng_ctor=rng_ctor)\n np_random.ndarray\n\n fn = function([], random.uniform(0, 1, size=(2, 2)), updates=random.updates())\n\n fn_val0 = fn()\n fn_val1 = fn()\n\n rng_seed = np.random.default_rng(utt.fetch_seed()).integers(2 ** 30)\n rng = rng_ctor(int(rng_seed)) # int() is for 32bit\n\n numpy_val0 = rng.uniform(0, 1, size=(2, 2))\n numpy_val1 = rng.uniform(0, 1, size=(2, 2))\n\n assert np.allclose(fn_val0, numpy_val0)\n assert np.allclose(fn_val1, numpy_val1)\n\n @pytest.mark.parametrize(\"rng_ctor\", [np.random.RandomState, np.random.default_rng])\n def test_seed(self, rng_ctor):\n init_seed = 234\n random = RandomStream(init_seed, rng_ctor=rng_ctor)\n\n ref_state = np.random.default_rng(init_seed).__getstate__()\n random_state = random.gen_seedgen.__getstate__()\n assert random.default_instance_seed == init_seed\n assert random_state[\"bit_generator\"] == ref_state[\"bit_generator\"]\n assert random_state[\"state\"] == ref_state[\"state\"]\n\n new_seed = 43298\n random.seed(new_seed)\n\n ref_state = np.random.default_rng(new_seed).__getstate__()\n random_state = random.gen_seedgen.__getstate__()\n assert random_state[\"bit_generator\"] == ref_state[\"bit_generator\"]\n assert random_state[\"state\"] == ref_state[\"state\"]\n\n random.seed()\n ref_state = np.random.default_rng(init_seed).__getstate__()\n random_state = random.gen_seedgen.__getstate__()\n assert random.default_instance_seed == init_seed\n assert random_state[\"bit_generator\"] == ref_state[\"bit_generator\"]\n assert random_state[\"state\"] == ref_state[\"state\"]\n\n # Reset the seed\n random.seed(new_seed)\n\n # Check state updates\n _ = random.normal()\n\n # Now, change the seed when there are state updates\n random.seed(new_seed)\n\n update_seed = np.random.default_rng(new_seed).integers(2 ** 30)\n ref_rng = rng_ctor(update_seed)\n state_rng = random.state_updates[0][0].get_value(borrow=True)\n\n if hasattr(state_rng, \"get_state\"):\n ref_state = ref_rng.get_state()\n random_state = state_rng.get_state()\n assert np.array_equal(random_state[1], ref_state[1])\n assert random_state[0] == ref_state[0]\n assert random_state[2:] == ref_state[2:]\n else:\n ref_state = ref_rng.__getstate__()\n random_state = state_rng.__getstate__()\n assert random_state[\"bit_generator\"] == ref_state[\"bit_generator\"]\n assert random_state[\"state\"] == ref_state[\"state\"]\n\n @pytest.mark.parametrize(\"rng_ctor\", [np.random.RandomState, np.random.default_rng])\n def test_uniform(self, rng_ctor):\n # Test that RandomStream.uniform generates the same results as numpy\n # Check over two calls to see if the random state is correctly updated.\n random = RandomStream(utt.fetch_seed(), rng_ctor=rng_ctor)\n fn = function([], random.uniform(-1, 1, size=(2, 2)))\n fn_val0 = fn()\n fn_val1 = fn()\n\n rng_seed = np.random.default_rng(utt.fetch_seed()).integers(2 ** 30)\n rng = rng_ctor(int(rng_seed)) # int() is for 32bit\n numpy_val0 = rng.uniform(-1, 1, size=(2, 2))\n numpy_val1 = rng.uniform(-1, 1, size=(2, 2))\n\n assert np.allclose(fn_val0, numpy_val0)\n assert np.allclose(fn_val1, numpy_val1)\n\n @pytest.mark.parametrize(\"rng_ctor\", [np.random.RandomState, np.random.default_rng])\n def test_default_updates(self, rng_ctor):\n # Basic case: default_updates\n random_a = RandomStream(utt.fetch_seed(), rng_ctor=rng_ctor)\n out_a = random_a.uniform(0, 1, size=(2, 2))\n fn_a = function([], out_a)\n fn_a_val0 = fn_a()\n fn_a_val1 = fn_a()\n assert not np.all(fn_a_val0 == fn_a_val1)\n\n nearly_zeros = function([], out_a + out_a - 2 * out_a)\n assert np.all(abs(nearly_zeros()) < 1e-5)\n\n # Explicit updates #1\n random_b = RandomStream(utt.fetch_seed(), rng_ctor=rng_ctor)\n out_b = random_b.uniform(0, 1, size=(2, 2))\n fn_b = function([], out_b, updates=random_b.updates())\n fn_b_val0 = fn_b()\n fn_b_val1 = fn_b()\n assert np.all(fn_b_val0 == fn_a_val0)\n assert np.all(fn_b_val1 == fn_a_val1)\n\n # Explicit updates #2\n random_c = RandomStream(utt.fetch_seed(), rng_ctor=rng_ctor)\n out_c = random_c.uniform(0, 1, size=(2, 2))\n fn_c = function([], out_c, updates=[out_c.update])\n fn_c_val0 = fn_c()\n fn_c_val1 = fn_c()\n assert np.all(fn_c_val0 == fn_a_val0)\n assert np.all(fn_c_val1 == fn_a_val1)\n\n # No updates at all\n random_d = RandomStream(utt.fetch_seed(), rng_ctor=rng_ctor)\n out_d = random_d.uniform(0, 1, size=(2, 2))\n fn_d = function([], out_d, no_default_updates=True)\n fn_d_val0 = fn_d()\n fn_d_val1 = fn_d()\n assert np.all(fn_d_val0 == fn_a_val0)\n assert np.all(fn_d_val1 == fn_d_val0)\n\n # No updates for out\n random_e = RandomStream(utt.fetch_seed(), rng_ctor=rng_ctor)\n out_e = random_e.uniform(0, 1, size=(2, 2))\n fn_e = function([], out_e, no_default_updates=[out_e.rng])\n fn_e_val0 = fn_e()\n fn_e_val1 = fn_e()\n assert np.all(fn_e_val0 == fn_a_val0)\n assert np.all(fn_e_val1 == fn_e_val0)\n\n @pytest.mark.parametrize(\"rng_ctor\", [np.random.RandomState, np.random.default_rng])\n def test_multiple_rng_aliasing(self, rng_ctor):\n # Test that when we have multiple random number generators, we do not alias\n # the state_updates member. `state_updates` can be useful when attempting to\n # copy the (random) state between two similar aesara graphs. The test is\n # meant to detect a previous bug where state_updates was initialized as a\n # class-attribute, instead of the __init__ function.\n\n rng1 = RandomStream(1234, rng_ctor=rng_ctor)\n rng2 = RandomStream(2392, rng_ctor=rng_ctor)\n assert rng1.state_updates is not rng2.state_updates\n assert rng1.gen_seedgen is not rng2.gen_seedgen\n\n @pytest.mark.parametrize(\"rng_ctor\", [np.random.RandomState, np.random.default_rng])\n def test_random_state_transfer(self, rng_ctor):\n # Test that random state can be transferred from one aesara graph to another.\n\n class Graph:\n def __init__(self, seed=123):\n self.rng = RandomStream(seed, rng_ctor=rng_ctor)\n self.y = self.rng.uniform(0, 1, size=(1,))\n\n g1 = Graph(seed=123)\n f1 = function([], g1.y)\n g2 = Graph(seed=987)\n f2 = function([], g2.y)\n\n for (su1, su2) in zip(g1.rng.state_updates, g2.rng.state_updates):\n su2[0].set_value(su1[0].get_value())\n\n np.testing.assert_array_almost_equal(f1(), f2(), decimal=6)\n" ]
[ [ "numpy.asarray", "numpy.dtype" ], [ "scipy.signal.signaltools._bvalfromboundary", "numpy.zeros", "scipy.signal.signaltools._valfrommode", "scipy.signal.sigtools._convolve2d" ], [ "numpy.array_equal" ], [ "numpy.array_equiv" ], [ "numpy.arange", "numpy.zeros", "numpy.random.rand", "numpy.ones" ], [ "numpy.allclose", "numpy.array_equal", "numpy.arange", "numpy.eye", "numpy.stack", "numpy.all", "numpy.broadcast_to", "numpy.array", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
synicalsyntax/eeg-notebooks
[ "1edcaf24d55eabe076f6ba98645fcc09b3459613", "1edcaf24d55eabe076f6ba98645fcc09b3459613" ]
[ "notebooks/stimulus_presentation/spatial_gratings.py", "notebooks/stimulus_presentation/vep.py" ]
[ "\"\"\"\nGenerate spatial gratings\n=========================\n\nStimulus presentation based on gratings of different spatial frequencies\nfor generating ERPs, high frequency oscillations, and alpha reset.\n\nInspired from:\n\n> Hermes, Dora, K. J. Miller, B. A. Wandell, and Jonathan Winawer. \"Stimulus\ndependence of gamma oscillations in human visual cortex.\" Cerebral Cortex 25,\nno. 9 (2015): 2951-2959.\n\n\"\"\"\n\nfrom time import time\nfrom optparse import OptionParser\n\nimport numpy as np\nimport pandas as pd\nfrom psychopy import visual, core, event\nfrom pylsl import StreamInfo, StreamOutlet\n\n\ndef present(duration=120):\n\n # Create markers stream outlet\n info = StreamInfo('Markers', 'Markers', 3, 0, 'float32', 'myuidw43536')\n channels = info.desc().append_child(\"channels\")\n\n for c in ['Frequency', 'Contrast', 'Orientation']:\n channels.append_child(\"channel\") \\\n .append_child_value(\"label\", c)\n\n outlet = StreamOutlet(info)\n\n start = time()\n\n # Set up trial parameters\n n_trials = 2010\n iti = 1.0\n soa = 1.5\n jitter = 0.5\n record_duration = np.float32(duration)\n\n # Setup trial list\n frequency = np.random.binomial(1, 0.5, n_trials)\n contrast = np.ones(n_trials, dtype=int)\n orientation = np.random.randint(0, 4, n_trials) * 45\n\n trials = pd.DataFrame(dict(frequency=frequency,\n contrast=contrast,\n orientation=orientation))\n\n # graphics\n mywin = visual.Window([1920, 1080], monitor=\"testMonitor\", units=\"deg\",\n fullscr=True)\n grating = visual.GratingStim(win=mywin, mask='circle', size=40, sf=4)\n fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,\n rgb=[1, 0, 0])\n\n rs = np.random.RandomState(42)\n\n core.wait(2)\n\n for ii, trial in trials.iterrows():\n\n # onset\n fre = trials['frequency'].iloc[ii]\n contrast = trials['contrast'].iloc[ii]\n ori = trials['orientation'].iloc[ii]\n grating.sf = 4 * fre + 0.1\n grating.ori = ori\n grating.contrast = contrast\n grating.draw()\n fixation.draw()\n\n # Send marker\n outlet.push_sample([fre + 1, contrast, ori], time())\n mywin.flip()\n\n # offset\n core.wait(soa)\n fixation.draw()\n outlet.push_sample([fre + 3, contrast, ori], time())\n mywin.flip()\n\n if len(event.getKeys()) > 0 or (time() - start) > record_duration:\n break\n event.clearEvents()\n\n # Intertrial interval\n core.wait(iti + np.random.rand() * jitter)\n\n # Cleanup\n mywin.close()\n\n\ndef main():\n parser = OptionParser()\n\n parser.add_option(\"-d\", \"--duration\",\n dest=\"duration\", type='int', default=120,\n help=\"duration of the recording in seconds.\")\n\n (options, args) = parser.parse_args()\n present(options.duration)\n\n\nif __name__ == '__main__':\n main()\n", "import numpy as np\nfrom pandas import DataFrame\nfrom psychopy import visual, core, event\nfrom time import time, strftime, gmtime\nfrom optparse import OptionParser\nfrom pylsl import StreamInfo, StreamOutlet\n\n\ndef present(duration=120):\n\n # create\n info = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')\n\n # next make an outlet\n outlet = StreamOutlet(info)\n\n markernames = [1, 2]\n\n start = time()\n\n n_trials = 2000\n iti = .2\n jitter = .1\n soa = 0.2\n record_duration = np.float32(duration)\n\n # Setup log\n position = np.random.randint(0, 2, n_trials)\n trials = DataFrame(dict(position=position,\n timestamp=np.zeros(n_trials)))\n\n # graphics\n mywin = visual.Window([1920, 1080], monitor=\"testMonitor\", units=\"deg\",\n fullscr=True)\n grating = visual.GratingStim(win=mywin, mask='circle', size=20, sf=4)\n fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,\n rgb=[1, 0, 0])\n\n for ii, trial in trials.iterrows():\n # inter trial interval\n core.wait(iti + np.random.rand() * jitter)\n\n # onset\n grating.phase += np.random.rand()\n pos = trials['position'].iloc[ii]\n grating.pos = [25*(pos-0.5), 0]\n grating.draw()\n fixation.draw()\n outlet.push_sample([markernames[pos]], time())\n mywin.flip()\n\n # offset\n core.wait(soa)\n fixation.draw()\n mywin.flip()\n if len(event.getKeys()) > 0 or (time() - start) > record_duration:\n break\n event.clearEvents()\n # Cleanup\n mywin.close()\n\n\ndef main():\n parser = OptionParser()\n\n parser.add_option(\"-d\", \"--duration\",\n dest=\"duration\", type='int', default=120,\n help=\"duration of the recording in seconds.\")\n\n (options, args) = parser.parse_args()\n present(options.duration)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.ones", "numpy.random.rand", "numpy.float32", "numpy.random.binomial", "numpy.random.RandomState", "numpy.random.randint" ], [ "numpy.zeros", "numpy.random.rand", "numpy.float32", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
flaport/gdsfactory
[ "1f2e844c1fe27b9c6340e2d51500fd3358fa16e5" ]
[ "pp/components/waveguide_heater.py" ]
[ "from typing import Callable, Dict, List, Tuple\n\nimport numpy as np\n\nimport pp\nfrom pp.cell import cell\nfrom pp.component import Component\nfrom pp.components.electrical.tlm import tlm\nfrom pp.components.extension import line\nfrom pp.components.hline import hline\nfrom pp.components.waveguide import waveguide\nfrom pp.layers import LAYER\nfrom pp.port import Port, deco_rename_ports\n\n\n@cell\ndef heater(\n length: float = 10.0,\n width: float = 0.5,\n layers_heater: List[Tuple[int, int]] = [LAYER.HEATER],\n) -> Component:\n \"\"\" straight heater\n \"\"\"\n c = pp.Component()\n for layer in layers_heater:\n _ref = c.add_ref(hline(length=length, width=width, layer=layer))\n c.ports = _ref.ports # Use ports from latest layer as heater ports\n c.absorb(_ref)\n return c\n\n\ndef add_trenches(\n c: Component,\n sstw: float = 2.0,\n trench_width: float = 0.5,\n trench_keep_out: float = 2.0,\n trenches: List[Dict[str, int]] = [\n {\"nb_segments\": 2, \"lane\": 1, \"x_start_offset\": 0},\n {\"nb_segments\": 2, \"lane\": -1, \"x_start_offset\": 0},\n ],\n layer_trench: Tuple[int, int] = LAYER.DEEPTRENCH,\n) -> Component:\n \"\"\"\n Add trenches to a waveguide-heater-like component\n \"\"\"\n\n heater_width = c.settings[\"heater_width\"]\n heater_spacing = c.settings[\"heater_spacing\"]\n width = c.settings[\"width\"]\n length = c.settings[\"length\"]\n\n a = heater_spacing + (width + heater_width) / 2\n\n # Add trenches\n if trench_width and trench_width > 0:\n tko = trench_keep_out\n\n for trench in trenches:\n lane = trench[\"lane\"]\n td = tko + a + (trench_width + heater_width) / 2\n y = np.sign(lane) * (td + (abs(lane) - 1) * (trench_width + tko))\n x_start_offset = trench[\"x_start_offset\"]\n\n if \"segments\" not in trench:\n nb_segments = trench[\"nb_segments\"]\n trench_length = (length - (nb_segments - 1) * sstw) / nb_segments\n segments = [trench_length] * nb_segments\n else:\n segments = trench[\"segments\"]\n x = x_start_offset\n for i, trench_length in enumerate(segments):\n trench = hline(\n length=trench_length, width=trench_width, layer=layer_trench\n )\n _trench = trench.ref(\n port_id=\"W0\", position=c.ports[\"W0\"].position + (x, y)\n )\n c.add(_trench)\n c.absorb(_trench)\n x += trench_length + sstw\n\n return c\n\n\n@cell\ndef waveguide_heater(\n length: float = 10.0,\n width: float = 0.5,\n heater_width: float = 0.5,\n heater_spacing: float = 1.2,\n sstw: float = 2.0,\n trench_width: float = 0.5,\n trench_keep_out: float = 2.0,\n trenches: List[Dict[str, int]] = [\n {\"nb_segments\": 2, \"lane\": 1, \"x_start_offset\": 0},\n {\"nb_segments\": 2, \"lane\": -1, \"x_start_offset\": 0},\n ],\n layers_heater: List[Tuple[int, int]] = [LAYER.HEATER],\n waveguide_factory: Callable = waveguide,\n layer_trench: Tuple[int, int] = LAYER.DEEPTRENCH,\n) -> Component:\n \"\"\" waveguide with heater\n\n .. code::\n\n TTTTTTTTTTTTT TTTTTTTTTTTTT <-- trench\n\n HHHHHHHHHHHHHHHHHHHHHHHHHHHHHH <-- heater\n\n ------------------------------ <-- waveguide\n\n HHHHHHHHHHHHHHHHHHHHHHHHHHHHHH <-- heater\n\n TTTTTTTTTTTTT TTTTTTTTTTTTT <-- trench\n\n .. plot::\n :include-source:\n\n import pp\n\n c = pp.c.waveguide_heater()\n pp.plotgds(c)\n\n \"\"\"\n c = Component()\n\n _heater = heater(length=length, width=heater_width, layers_heater=layers_heater)\n\n y_heater = heater_spacing + (width + heater_width) / 2\n heater_top = c << _heater\n heater_bot = c << _heater\n\n heater_top.movey(+y_heater)\n heater_bot.movey(-y_heater)\n\n wg = c << waveguide_factory(length=length, width=width)\n\n for i in [heater_top, heater_bot, wg]:\n c.absorb(i)\n\n # Add wg ports\n for p in wg.ports.values():\n c.add_port(name=p.name, port=p)\n\n # Add heater ports\n for p in heater_top.ports.values():\n c.add_port(name=\"HT\" + p.name, port=p)\n\n for p in heater_bot.ports.values():\n c.add_port(name=\"HB\" + p.name, port=p)\n\n c.settings[\"width\"] = width\n c.settings[\"heater_width\"] = heater_width\n c.settings[\"heater_spacing\"] = heater_spacing\n c.settings[\"length\"] = length\n add_trenches(\n c, sstw, trench_width, trench_keep_out, trenches, layer_trench=layer_trench\n )\n\n return c\n\n\n@cell\ndef wg_heater_connector(\n heater_ports: List[Port],\n metal_width: float = 10.0,\n tlm_layers: List[Tuple[int, int]] = [\n LAYER.VIA1,\n LAYER.M1,\n LAYER.VIA2,\n LAYER.M2,\n LAYER.VIA3,\n LAYER.M3,\n ],\n) -> Component:\n \"\"\"\n Connects together a pair of wg heaters and connect to a M3 port\n \"\"\"\n\n cmp = Component()\n assert len(heater_ports) == 2\n assert (\n heater_ports[0].orientation == heater_ports[1].orientation\n ), \"both ports should be facing in the same direction\"\n angle = heater_ports[0].orientation\n angle = angle % 360\n assert angle in [0, 180], \"angle should be 0 or 180, got {}\".format(angle)\n\n dx = 0.0\n dy = 0.0\n\n angle_to_dps = {0: [(-dx, -dy), (-dx, dy)], 180: [(dx, -dy), (dx, dy)]}\n ports = heater_ports\n hw = heater_ports[0].width\n\n if angle in [0, 180]:\n ports.sort(key=lambda p: p.y)\n else:\n ports.sort(key=lambda p: p.x)\n\n _heater_to_metal = tlm(width=0.5, height=0.5, layers=tlm_layers, vias=[])\n\n tlm_positions = []\n for port, dp in zip(ports, angle_to_dps[angle]):\n # Extend heater\n p = port.midpoint\n\n # Add via/metal transitions\n tlm_pos = p + dp\n hm = _heater_to_metal.ref(position=tlm_pos)\n tlm_positions += [tlm_pos]\n cmp.add(hm)\n\n ss = 1 if angle == 0 else -1\n\n # Connect both sides with top metal\n edge_metal_piece_width = 7.0\n x = ss * edge_metal_piece_width / 2\n top_metal_layer = tlm_layers[-1]\n cmp.add_polygon(\n line(\n tlm_positions[0] + (x, -hw / 2),\n tlm_positions[1] + (x, hw / 2),\n edge_metal_piece_width,\n ),\n layer=top_metal_layer,\n )\n\n # Add metal port\n cmp.add_port(\n name=\"0\",\n midpoint=0.5 * sum(tlm_positions) + (ss * edge_metal_piece_width / 2, 0),\n orientation=angle,\n width=metal_width,\n layer=top_metal_layer,\n port_type=\"dc\",\n )\n\n return cmp\n\n\n@deco_rename_ports\n@cell\ndef wg_heater_connected(\n waveguide_heater: Callable = waveguide_heater,\n wg_heater_connector: Callable = wg_heater_connector,\n tlm_layers: List[Tuple[int, int]] = [\n LAYER.VIA1,\n LAYER.M1,\n LAYER.VIA2,\n LAYER.M2,\n LAYER.VIA3,\n LAYER.M3,\n ],\n **kwargs\n) -> Component:\n \"\"\"\n .. plot::\n :include-source:\n\n import pp\n\n c = pp.c.wg_heater_connected()\n pp.plotgds(c)\n\n \"\"\"\n wg_heater = waveguide_heater(**kwargs)\n # print(wg_heater.ports.keys())\n conn1 = wg_heater_connector(\n heater_ports=[wg_heater.ports[\"HBE0\"], wg_heater.ports[\"HTE0\"]],\n tlm_layers=tlm_layers,\n )\n\n conn2 = wg_heater_connector(\n heater_ports=[wg_heater.ports[\"HBW0\"], wg_heater.ports[\"HTW0\"]],\n tlm_layers=tlm_layers,\n )\n\n cmp = Component()\n for c in [wg_heater, conn1, conn2]:\n _c = cmp.add_ref(c)\n cmp.absorb(_c)\n\n for port_name, p in wg_heater.ports.items():\n cmp.add_port(name=port_name, port=p)\n\n cmp.add_port(name=1, port=conn1.ports[\"0\"])\n cmp.add_port(name=2, port=conn2.ports[\"0\"])\n\n return cmp\n\n\ndef _demo_waveguide_heater():\n c = waveguide_heater(width=0.5)\n pp.write_gds(c)\n\n\nif __name__ == \"__main__\":\n # print(c.get_optical_ports())\n\n c = waveguide_heater()\n # c = wg_heater_connector(heater_ports=[c.ports[\"HBW0\"], c.ports[\"W0\"]])\n # c = wg_heater_connected(length=100.0, width=0.5)\n print(c.ports.keys())\n for p in c.ports.values():\n print(p.name, p.port_type)\n\n pp.show(c)\n" ]
[ [ "numpy.sign" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LeBenchmark/Interspeech2021
[ "1b368c6461a9a56a4337f9ee86888e286a55f2f9" ]
[ "SLU/slu_models.py" ]
[ "# coding: utf8\n\nimport os\nimport sys\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n#sys.path.append( os.environ['RNNTAGGERPATH'] )\nfrom fairseq.globals import *\n#import utils_classes as Cl\n\n# ---------- Decoders from LD-RNN tool ----------\n# This part is from my first system coded from scratch with pytorch, so be comprehensive if you see bullshits reading it :-)\n\nclass SimpleDecoder(nn.Module):\n\n def __init__(self, nn_params, input_size, direction):\n\n super(SimpleDecoder, self).__init__()\n\n # TMP FOR DEBUG\n self.debug_flag = False\n\n self.attention_heads = nn_params.attention_heads\n self.start_tag_idx = nn_params.start_tag_idx\n self.end_tag_idx = nn_params.end_tag_idx\n self.batch_size = nn_params.batch_size\n self.vocab_size = nn_params.word_vocab_size\n self.char_vocab_size = nn_params.char_vocab_size\n self.tagset_size = nn_params.tag_vocab_size\n self.hidden_dim = 2*nn_params.hidden_dim\n self.label_embed_dim = nn_params.label_embed_dim # NEW\n self.char_embed_dim = nn_params.char_embed_dim\n self.char_hidden_dim = nn_params.char_hidden_dim\n self.label_context_size = nn_params.label_context_size\n self.lex_hidden_layers = nn_params.lex_hidden_layers\n self.lab_hidden_layers = nn_params.lab_hidden_layers\n \n # TMP FOR DEBUG\n #self.word_dict = nn_params.word_dict\n #self.label_dict = nn_params.label_dict\n #self.ix_to_sublabel = nn_params.ix_to_sublabel\n \n print(' - SimpleDecoder init:')\n print(' - start_tag_idx: {}'.format(self.start_tag_idx))\n print(' - end_tag_idx: {}'.format(self.end_tag_idx))\n print(' - batch_size: {}'.format(self.batch_size))\n print(' - vocab_size: {}'.format(self.vocab_size))\n print(' - char_vocab_size: {}'.format(self.char_vocab_size))\n print(' - tagset_size: {}'.format(self.tagset_size))\n print(' - hidden_dim: {}'.format(self.hidden_dim))\n print(' - label_embed_dim: {}'.format(self.label_embed_dim))\n print(' - char_embed_dim: {}'.format(self.char_embed_dim))\n print(' - char_hidden_dim: {}'.format(self.char_hidden_dim))\n print(' - label_context_size: {}'.format(self.label_context_size))\n print(' - lex_hidden_layers: {}'.format(self.lex_hidden_layers))\n print(' - lab_hidden_layers: {}'.format(self.lab_hidden_layers))\n print(' ----------')\n \n self.n_subparts = nn_params.n_subparts\n self.sl_batch_size = 1\n if self.n_subparts > 0:\n self.tag_to_subparts = nn_params.tag_to_subparts\n self.num_directions = 1\n self.CUDA = nn_params.CUDA\n self.TEST = 0\n self.TeachingSignal = True\n self.dtype = nn_params.dtype\n self.ltype = nn_params.ltype\n self.direction = direction\n self.output_length_factor = nn_params.output_length_factor\n if self.direction == 0 or self.direction == 1:\n self.output_length_factor = 1.0\n print(' *** SimpleDecoder, output-length-factor: {}'.format(self.output_length_factor))\n sys.stdout.flush()\n \n self.bw_label_embeddings = nn.Embedding(self.tagset_size, nn_params.label_embed_dim, sparse=False)\n self.emb_dropout_p = nn_params.embed_dropout # NEW\n self.embed_dropout = nn.Dropout(p=nn_params.embed_dropout)\n\n self.attention_size = input_size # TMP\n attention_size = input_size\n #self.hidden_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim + nn_params.attention_heads * attention_size\n #if self.n_subparts > 0:\n # self.hidden_dim = self.hidden_dim + nn_params.sublabel_hidden_dim\n self.input_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim\n #if self.n_subparts > 0:\n # self.input_dim = self.input_dim + nn_params.sublabel_hidden_dim\n \n self.BWInputNorm = nn.LayerNorm(self.input_dim)\n self.HiddenSizeMap = nn.Linear(self.input_dim, self.hidden_dim)\n if self.attention_heads > 0:\n print(' *** SimpleDecoder: using gated attention context')\n sys.stdout.flush()\n self.h_lin = nn.Linear(input_size, input_size)\n self.a_lin = nn.Linear(attention_size, attention_size)\n self.LexAttention = ga.GlobalAttention([self.hidden_dim, attention_size, attention_size], attention_size)\n #self.LexAttention = MultiHeadAttention(attention_size, self.hidden_dim, attention_size, nn_params.attention_heads, nn_params.attention_type, self.dtype) # NEW\n #self.SemAttention = AttentionModule(self.hidden_dim, self.hidden_dim, self.hidden_dim, nn_params.attention_heads, nn_params.attention_type, self.dtype) # NEW\n self.SLM = SubLabelModule(nn_params, input_size)\n \n self.RNNInputNorm = nn.LayerNorm(self.hidden_dim)\n #self.bw_RNN = nn.GRU(self.hidden_dim, self.hidden_dim, bidirectional=False)\n self.bw_RNN = ContextualFeatureEncoder(self.hidden_dim, self.hidden_dim, self.batch_size, 1, False, nn_params.dtype, nn_params.contextual_encoder_type)\n #self.bw_RNN.flatten_parameters()\n \n self.MLPInputNorm = nn.LayerNorm(self.hidden_dim)\n self.BWOutputNorm = nn.LayerNorm(self.tagset_size)\n self.output_mlp = ReLU_MLP( [2,self.hidden_dim, self.hidden_dim] )\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n self.bw_hidden2tag = nn.Linear(output_dim, self.tagset_size)\n self.hid_dropout_p = nn_params.hidden_dropout # NEW\n self.hidden_dropout = nn.Dropout(p=nn_params.hidden_dropout)\n \n #self.dir_hidden = self.bw_RNN.get_hidden_state()\n \n def init_hidden(self):\n #self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n self.bw_RNN.init_hidden()\n self.SLM.init_hidden()\n \n def resize_embeddings(self, nn_params):\n\n if nn_params.tag_vocab_size > self.tagset_size:\n old_embeddings = self.bw_label_embeddings\n self.bw_label_embeddings = nn.Embedding(nn_params.tag_vocab_size, nn_params.label_embed_dim, sparse=False)\n self.bw_label_embeddings.weight[:self.tagset_size,:] = old_embeddings.weight\n \n old_lin = self.bw_hidden2tag\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n self.bw_hidden2tag = nn.Linear(output_dim, nn_params.tag_vocab_size)\n self.bw_hidden2tag.weight[:self.tagset_size,:] = old_lin.weight\n \n old_norm = self.BWOutputNorm\n self.BWOutputNorm = nn.LayerNorm(nn_params.tag_vocab_size)\n self.BWOutputNorm.weight[:self.tagset_size] = old_norm.weight\n \n self.tagset_size = nn_params.tag_vocab_size\n\n\n def train_forward(self, input, bw_streams):\n \n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n bw_label_streams = bw_streams[0]\n next_sublabels = bw_streams[1]\n \n indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = bw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n source_idxs = [int( i / self.output_length_factor ) for i in indeces]\n target_idxs = [int( i * gold_to_hyp_length_factor ) for i in indeces]\n \n # NEW: TEST IT!!!\n bin_size = 1\n if self.output_length_factor < 1.0:\n bin_size = int(1 / self.output_length_factor) + 1\n\n input_tsr = torch.cat( input, 2 )[source_idxs,:,:]\n local_input = [input_tsr]\n local_input.append( self.embed_dropout( self.bw_label_embeddings(bw_label_streams[0][target_idxs,:]) ) )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n print('')\n print(' ************************************************')\n print(' * SimpleDecoder.train_forward -')\n print('')\n print(' *** indeces ({}): {}'.format(len(indeces), list(indeces)))\n print(' *** source_idxs ({}): {}'.format(len(source_idxs), source_idxs))\n print(' *** target_idxs ({}): {}'.format(len(target_idxs), target_idxs))\n print('*')\n print(' * Size of input: {}'.format( torch.cat(input, 2).size() ))\n print(' * Size of local_input: {}'.format( torch.cat(local_input, 2).size() ))\n print(' * Size of bw_label_streams: {}'.format(bw_label_streams[0].size()))\n print(' *')\n print(' * SimpleDecoder.train_forward, backward sublabels and labels:')\n for tgt_idx in target_idxs:\n # print(' {}'.format([self.ix_to_sublabel[sl.item()] for sl in next_sublabels[tgt_idx,:,0]]))\n print(' -----')\n print('@{}, {}'.format(tgt_idx, self.label_dict.index2token(bw_label_streams[0][tgt_idx,0])))\n print('')\n print(' * SimpleDecoder.train_forward, len of local_input: {}'.format(len(local_input)))\n for debug_idx in range(len(local_input)):\n print(' * {}'.format(local_input[debug_idx].size()))\n print(' ---')\n #print(' * SimpleDecoder.train_forward, size of next_sublabels: {}'.format(next_sublabels.size()))\n print(' * SimpleDecoder.train_forward, size of bw_label_streams[0]: {}'.format(bw_label_streams[0].size()))\n print('')\n # END TMP FOR DEBUG\n\n bw_sublabels_rep = []\n if self.n_subparts > 0:\n bw_sublabels_rep = self.SLM( input_tsr, next_sublabels[target_idxs,:,:], 1 )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n #print(' * SimpleDecoder.train_forward, size of bw_sublabels_rep: {}'.format(bw_sublabels_rep[0].size()))\n print(' ***********************************************************')\n sys.stdout.flush()\n\n #local_input = local_input + bw_sublabels_rep\n bw_total_input = self.BWInputNorm( torch.cat( local_input, 2 ) )\n \n #self.bw_RNN.flatten_parameters()\n #idxs = range(bw_total_input.size(0),-1,-1)\n rnn_input = self.RNNInputNorm( self.HiddenSizeMap( bw_total_input ) )\n bw_hidden_state, self.dir_hidden = self.bw_RNN( rnn_input )\n \n bw_mlp_input = rnn_input + self.hidden_dropout( bw_hidden_state )\n deep_reps = self.output_mlp( self.MLPInputNorm( bw_mlp_input ) )\n \n #bw_final_input = [bw_mlp_input + self.hidden_dropout( deep_reps )] + bw_sublabels_rep\n bw_final_input = torch.cat( [bw_mlp_input + self.hidden_dropout(deep_reps)] + bw_sublabels_rep, -1 )\n bw_scores = F.log_softmax( self.BWOutputNorm( self.bw_hidden2tag( bw_final_input ) ), dim=2 )\n \n return (bw_hidden_state, bw_scores)\n\n # NOTE: we assume \"input\" is a list of all inputs given to this layer, \"bw_label_stream\" is the stream of backward labels, so that accessing the i-th position of bw_label_stream when predicting label at position i, gives the label on the right of the current position.\n def fast_forward(self, input, bw_streams):\n \n vflag = (self.TEST == 1)\n if self.TeachingSignal and (not vflag):\n return self.train_forward(input, bw_streams)\n else:\n return self.test_forward(input, bw_streams)\n\n def test_forward(self, input, bw_streams):\n \n # NOTE: we assume the first element of input is the lexical-level representation computed by the encoder, that is its hidden state.\n #lex_rnn_out = input[0]\n vflag = (self.TEST == 1)\n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n bw_label_streams = bw_streams[0]\n\n #print(' - SimpleDecoder.forward, input size: {}'.format(input[0].size()))\n #sys.stdout.flush()\n\n indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = bw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n embedding_mask = dropout_mask_dims( [1, batch_size, self.label_embed_dim], self.emb_dropout_p, self.dtype)\n hidden_layer_mask = dropout_mask_dims( [batch_size, self.hidden_dim], self.hid_dropout_p, self.dtype)\n if vflag:\n embedding_mask = torch.ones( [1, batch_size, self.label_embed_dim] ).type(self.dtype)\n hidden_layer_mask = torch.ones( [batch_size, self.hidden_dim] ).type(self.dtype)\n \n hidden_state = torch.zeros(sequence_length, batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n scores = torch.zeros(sequence_length, batch_size, self.tagset_size).type(self.dtype) #VARIABLE\n start_idx = 0\n if self.direction == 1 or self.direction == 3:\n start_idx = -1\n next_labels = bw_label_streams[0][start_idx,:]\n prev_input = torch.cat( input, 2 )\n next_sublabels = bw_streams[1] #VARIABLE\n \n # NEW: TEST IT!!!\n bin_size = 1\n if self.output_length_factor < 1.0:\n bin_size = int(1 / self.output_length_factor) + 1\n\n for i in indeces:\n source_idx = int( i / self.output_length_factor )\n bin_bound = min(source_length,source_idx+bin_size) # NEW: TEXT IT!!!\n target_idx = int( i * gold_to_hyp_length_factor )\n\n if self.TeachingSignal and (not vflag):\n next_labels = bw_label_streams[0][target_idx,:]\n if self.n_subparts > 0:\n next_sublabels = bw_streams[1][target_idx,:,:] #VARIABLE #GRAPHCHECKPOINT\n\n curr_lex_input = torch.sum( prev_input[source_idx:bin_bound,:,:], 0 ) # SOURCE INDEXING ## This is ~different in 'train_forward'\n #curr_lex_input = prev_input[source_idx,:,:] # TMP, SOURCE INDEXING ...\n bw_sublabels_rep = self.SLM( curr_lex_input, next_sublabels, 0 )\n bw_total_input_lst = [curr_lex_input.view(1, batch_size, -1)] # SOURCE INDEXING # NEW: TEST IT!!!\n bw_total_input_lst.append( self.embed_dropout( self.bw_label_embeddings( next_labels ).view(1, batch_size, -1) ) )\n \n if self.attention_heads > 0:\n #print(' xxx SimpleDecoder, applying attention: {}, {}'.format(hidden_state[i,:,:].size(), prev_input.size()))\n #sys.stdout.flush()\n c, alphas = self.LexAttention( hidden_state[i,:,:].clone().detach().view(batch_size, 1, -1), prev_input.transpose(0,1).contiguous().detach() )\n #bw_total_input_lst.append( c )\n # We gate-mix the original input and the attention vector\n g_lambda = F.sigmoid( self.h_lin( bw_total_input_lst[0] ) + self.a_lin(c) )\n bw_total_input_lst[0] = g_lambda * bw_total_input_lst[0] + (1.0 - g_lambda) * c\n\n bw_total_input = self.BWInputNorm( torch.cat( bw_total_input_lst, 2 ) )\n rnn_input = self.RNNInputNorm( self.hidden_dropout( self.HiddenSizeMap( bw_total_input ) ) ) # NEW: hidden_dropout !\n _, dec_hidden_state = self.bw_RNN( rnn_input )\n #hidden_state[i,:,:] = dec_hidden_state[0,:,:]\n\n bw_mlp_input = self.MLPInputNorm( rnn_input[0] + self.hidden_dropout( dec_hidden_state[0,:,:] ) )\n deep_reps = self.output_mlp( bw_mlp_input )\n \n final_dec_state = bw_mlp_input + self.hidden_dropout( deep_reps )\n hidden_state[i,:,:] = final_dec_state\n\n bw_final_input = torch.cat( [final_dec_state] + bw_sublabels_rep, -1 )\n scores[i,:,:] = F.log_softmax( self.BWOutputNorm( self.bw_hidden2tag( bw_final_input ) ), dim=1 )\n\n (max_scores, max_indeces) = torch.max(scores[i,:,:], 1)\n max_indeces = max_indeces.squeeze()\n \n if vflag:\n next_labels = max_indeces\n next_labels = next_labels.view(self.batch_size)\n max_indeces = max_indeces.unsqueeze(0)\n\n if self.n_subparts > 0:\n next_sublabels = torch.LongTensor(self.tag_to_subparts[max_indeces].transpose(0,1)).type(self.ltype) #VARIABLE #GRAPHCHECKPOINT\n\n return (hidden_state, scores)\n\n def forward(self, input, bw_streams):\n\n return self.test_forward(input, bw_streams)\n\n def set_batch_size(self, val):\n self.batch_size = val\n if self.n_subparts > 0:\n self.sl_batch_size = val\n self.bw_RNN.set_batch_size( val )\n self.SLM.set_batch_size(val)\n\n def set_test_mode(self, val):\n self.TEST = val\n self.bw_RNN.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n\n self.TeachingSignal = val\n\n\nclass BidirectionalDecoder(nn.Module):\n\n def __init__(self, nn_params, input_size, direction):\n \n super(BidirectionalDecoder, self).__init__()\n \n # TMP FOR DEBUG\n self.debug_flag = False\n \n self.attention_heads = nn_params.attention_heads\n self.start_tag_idx = nn_params.start_tag_idx\n self.end_tag_idx = nn_params.end_tag_idx\n self.batch_size = nn_params.batch_size\n self.vocab_size = nn_params.word_vocab_size\n self.char_vocab_size = nn_params.char_vocab_size\n self.tagset_size = nn_params.tag_vocab_size\n self.hidden_dim = 2*nn_params.hidden_dim\n self.label_embed_dim = nn_params.label_embed_dim # NEW\n self.char_embed_dim = nn_params.char_embed_dim\n self.char_hidden_dim = nn_params.char_hidden_dim\n self.label_context_size = nn_params.label_context_size\n self.lex_hidden_layers = nn_params.lex_hidden_layers\n self.lab_hidden_layers = nn_params.lab_hidden_layers\n self.n_subparts = nn_params.n_subparts\n self.sl_batch_size = 1\n if self.n_subparts > 0:\n self.tag_to_subparts = nn_params.tag_to_subparts\n self.num_directions = 1\n self.CUDA = nn_params.CUDA\n self.TEST = 0\n self.TeachingSignal = True\n self.dtype = nn_params.dtype\n self.ltype = nn_params.ltype\n self.direction = direction\n self.output_length_factor = nn_params.output_length_factor\n if self.direction == 0 or self.direction == 1:\n self.output_length_factor = 1.0\n \n # TMP FOR DEBUG\n #self.word_dict = nn_params.word_dict\n #self.label_dict = nn_params.label_dict\n #self.ix_to_sublabel = nn_params.ix_to_sublabel\n\n self.fw_label_embeddings = nn.Embedding(self.tagset_size, nn_params.label_embed_dim, sparse=False)\n self.emb_dropout_p = nn_params.embed_dropout # NEW\n self.embed_dropout = nn.Dropout(p=nn_params.embed_dropout)\n \n attention_size = input_size\n sem_attention_size = self.hidden_dim\n self.input_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim\n \n self.FWInputNorm = nn.LayerNorm( self.input_dim )\n self.HiddenSizeMap = nn.Linear(self.input_dim, self.hidden_dim)\n if self.attention_heads > 0:\n self.h_lin = nn.Linear(attention_size, attention_size)\n self.a_lin = nn.Linear(attention_size, attention_size)\n self.LexAttention = ga.GlobalAttention([self.hidden_dim, attention_size, attention_size], attention_size)\n self.SemAttention = ga.GlobalAttention([self.hidden_dim, self.hidden_dim, self.hidden_dim], sem_attention_size)\n self.SLM = SubLabelModule(nn_params, input_size)\n\n self.RNNInputNorm = nn.LayerNorm( self.hidden_dim )\n self.fw_RNN = ContextualFeatureEncoder(self.hidden_dim, self.hidden_dim, self.batch_size, 1, False, nn_params.dtype, nn_params.contextual_encoder_type)\n self.hid_dropout_p = nn_params.hidden_dropout # NEW\n self.hidden_dropout = nn.Dropout(p=nn_params.hidden_dropout)\n \n self.MLPInputNorm = nn.LayerNorm( self.hidden_dim )\n self.FWOutputNorm = nn.LayerNorm( self.tagset_size )\n self.output_mlp = ReLU_MLP( [2,self.hidden_dim, self.hidden_dim] )\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n output_dim = output_dim + nn_params.attention_heads * sem_attention_size\n self.hidden2tag = nn.Linear(output_dim, self.tagset_size)\n\n #self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n \n def init_hidden(self):\n #self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n self.fw_RNN.init_hidden()\n self.SLM.init_hidden()\n \n def resize_embeddings(self, nn_params):\n\n if nn_params.tag_vocab_size > self.tagset_size:\n old_embeddings = self.fw_label_embeddings\n self.fw_label_embeddings = nn.Embedding(nn_params.tag_vocab_size, nn_params.label_embed_dim, sparse=False)\n self.fw_label_embeddings.weight[:self.tagset_size,:] = old_embeddings.weight\n \n old_lin = self.hidden2tag\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n self.hidden2tag = nn.Linear(output_dim, nn_params.tag_vocab_size)\n self.hidden2tag.weight[:self.tagset_size,:] = old_lin.weight\n \n old_norm = self.FWOutputNorm\n self.FWOutputNorm = nn.LayerNorm(nn_params.tag_vocab_size)\n self.FWOutputNorm.weight[:self.tagset_size] = old_norm.weight\n \n self.tagset_size = nn_params.tag_vocab_size\n \n def train_forward(self, input, fw_streams, bw_states):\n\n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n fw_label_streams = fw_streams[0]\n prev_sublabels = fw_streams[1]\n \n indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = fw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n source_idxs = [int( i / self.output_length_factor ) for i in indeces]\n target_idxs = [int( i * gold_to_hyp_length_factor ) for i in indeces]\n\n input_tsr = torch.cat( input, 2 )[source_idxs,:,:]\n local_input = [input_tsr]\n local_input.append( self.embed_dropout( self.fw_label_embeddings(fw_label_streams[0][target_idxs,:]) ) )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n print('')\n print(' ************************************************')\n print(' * BidirectionalDecoder.train_forward -')\n print('')\n print(' *** indeces ({}): {}'.format(len(indeces), list(indeces)))\n print(' *** source_idxs ({}): {}'.format(len(source_idxs), source_idxs))\n print(' *** target_idxs ({}): {}'.format(len(target_idxs), target_idxs))\n print('*')\n print(' * Size of input: {}'.format( torch.cat(input, 2).size() ))\n print(' * Size of local_input: {}'.format( torch.cat(local_input, 2).size() ))\n print(' * Size of bw_label_streams: {}'.format(fw_label_streams[0].size()))\n print(' *')\n print(' * BidirectionalDecoder.train_forward, forward sublabels and labels:')\n for tgt_idx in target_idxs:\n # print(' {}'.format([self.ix_to_sublabel[sl.item()] for sl in prev_sublabels[tgt_idx,:,0]]))\n print(' -----')\n print('@{}, {}'.format(tgt_idx, self.label_dict.index2token(fw_label_streams[0][tgt_idx,0])))\n print('')\n print(' * BidirectionalDecoder.train_forward, len of local_input: {}'.format(len(local_input)))\n for debug_idx in range(len(local_input)):\n print(' * {}'.format(local_input[debug_idx].size()))\n print(' ---')\n #print(' * BidirectionalDecoder.train_forward, size of prev_sublabels: {}'.format(prev_sublabels.size()))\n print(' * BidirectionalDecoder.train_forward, size of fw_label_streams[0]: {}'.format(fw_label_streams[0].size()))\n #print(' ***********************************************************')\n #print('')\n # END TMP FOR DEBUG\n\n fw_sublabels_rep = []\n if self.n_subparts > 0:\n fw_sublabels_rep = self.SLM( input_tsr, prev_sublabels[target_idxs,:,:], 1 )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n #print(' * BidirectionalDecoder.train_forward, size of fw_sublabels_rep: {}'.format(fw_sublabels_rep[0].size()))\n print(' ***********************************************************')\n sys.stdout.flush()\n\n #local_input = local_input + fw_sublabels_rep\n fw_total_input = self.FWInputNorm( torch.cat( local_input, 2 ) )\n \n rnn_input = self.RNNInputNorm( self.HiddenSizeMap( fw_total_input ) )\n fw_hidden_state, self.dir_hidden = self.fw_RNN( rnn_input )\n \n fw_mlp_input = rnn_input + self.hidden_dropout( fw_hidden_state )\n deep_reps = self.output_mlp( self.MLPInputNorm( fw_mlp_input ) )\n \n fw_final_input = torch.cat( [fw_mlp_input + self.hidden_dropout( deep_reps + bw_states[0][indeces] )] + fw_sublabels_rep, -1 )\n fw_scores = F.log_softmax( self.FWOutputNorm( self.hidden2tag( fw_final_input ) ), dim=2 )\n \n return (fw_hidden_state, fw_scores)\n \n # NOTE: we assume \"bw_states\" contains backward hidden states and backward predictions, this and only this information, and in this order.\n # OBSOLETE: remove it !\n def fast_forward(self, input, fw_streams, bw_states):\n \n vflag = (self.TEST == 1)\n if self.TeachingSignal and (not vflag):\n #print(' * BidirectionalDecoder.train_forward...')\n #sys.stdout.flush()\n return self.train_forward(input, fw_streams, bw_states)\n else:\n #print(' * BidirectionalDecoder.test_forward...')\n #sys.stdout.flush()\n return self.test_forward(input, fw_streams, bw_states)\n\n\n # NOTE: we assume \"bw_states\" contains backward hidden states and backward predictions, this and only this information, and in this order.\n def test_forward(self, input, fw_streams, bw_states):\n \n # NOTE: we assume the first element of input is the lexical-level representation computed by the encoder, that is its hidden state.\n vflag = (self.TEST == 1)\n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n fw_label_streams = fw_streams[0]\n \n target_length = bw_states[0].size(0)\n indeces = decoding_indeces_(self.direction, target_length, 1.0) # We use the length of the output sequence predicted by a previous simple-decoder\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = fw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n embedding_mask = dropout_mask_dims( [1, batch_size, self.label_embed_dim], self.emb_dropout_p, self.dtype)\n hidden_layer_mask = dropout_mask_dims( [batch_size, self.hidden_dim], self.hid_dropout_p, self.dtype)\n if vflag:\n embedding_mask = torch.ones( [1, batch_size, self.label_embed_dim] ).type(self.dtype)\n hidden_layer_mask = torch.ones( [batch_size, self.hidden_dim] ).type(self.dtype)\n\n fw_hidden_state = torch.zeros(sequence_length, batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n fw_scores = torch.zeros(sequence_length, batch_size, self.tagset_size).type(self.dtype) #VARIABLE\n start_idx = 0\n if self.direction == 1 or self.direction == 3:\n start_idx = -1\n prev_labels = fw_label_streams[0][start_idx,:]\n prev_input = torch.cat( input, 2 )\n prev_sublabels = fw_streams[1] #VARIABLE\n\n # NEW: TEST IT!!!\n bin_size = 1\n if self.output_length_factor < 1.0:\n bin_size = int(1 / self.output_length_factor) + 1\n\n self.fw_RNN.set_hidden_state( bw_states[0][0,:,:].view(1, batch_size, -1))\n for i in indeces:\n source_idx = int( i / self.output_length_factor )\n bin_bound = min(source_length,source_idx+bin_size) # NEW: TEXT IT!!!\n target_idx = int( i * gold_to_hyp_length_factor )\n \n if self.TeachingSignal and (not vflag):\n prev_labels = fw_label_streams[0][target_idx,:]\n if self.n_subparts > 0:\n prev_sublabels = fw_streams[1][target_idx,:,:] #VARIABLE #GRAPHCHECKPOINT\n\n curr_lex_input = torch.sum(prev_input[source_idx:bin_size,:,:],0) ## This is ~different in 'train_forward'\n #curr_lex_input = prev_input[source_idx,:,:]\n fw_sublabels_rep = self.SLM( curr_lex_input, prev_sublabels, 0 )\n fw_total_input_lst = [curr_lex_input.view(1, batch_size, -1)] # SOURCE INDEXING # NEW: TEST IT!!!\n fw_total_input_lst.append( self.embed_dropout( self.fw_label_embeddings( prev_labels ).view(1, batch_size, -1) ) )\n \n if self.attention_heads > 0:\n c, alphas = self.LexAttention( fw_hidden_state[i,:,:].clone().view(batch_size, 1, -1), prev_input.transpose(0, 1).contiguous() )\n #fw_total_input_lst.append( c )\n g_lambda = F.sigmoid( self.h_lin( fw_total_input_lst[0] ) + self.a_lin(c) )\n fw_total_input_lst[0] = g_lambda * fw_total_input_lst[0] + (1.0 - g_lambda) * c\n \n fw_total_input = self.FWInputNorm( torch.cat( fw_total_input_lst, 2 ) )\n rnn_input = self.RNNInputNorm( self.hidden_dropout( self.HiddenSizeMap( fw_total_input ) ) )\n _, dec_hidden_state = self.fw_RNN( rnn_input )\n #fw_hidden_state[i,:,:] = dec_hidden_state[0,:,:]\n\n #mlp_input = fw_total_input[0] + hidden_layer_mask*( dec_hidden_state[0,:,:] )\n mlp_input = self.MLPInputNorm( rnn_input[0] + self.hidden_dropout( dec_hidden_state[0,:,:] ) )\n deep_reps = self.output_mlp( mlp_input )\n \n dec_final_state = mlp_input + self.hidden_dropout(deep_reps)\n fw_hidden_state[i,:,:] = dec_final_state\n atts = []\n if self.attention_heads > 0:\n sem_c, sem_alphas = self.SemAttention(dec_final_state.clone().view(batch_size, 1, -1), bw_states[0].transpose(0, 1).contiguous())\n atts = [sem_c.view(batch_size, -1)]\n \n #fw_final_input = torch.cat( [mlp_input + self.hidden_dropout(deep_reps) + bw_states[0][i,:,:]] + fw_sublabels_rep + atts, -1 )\n fw_final_input = torch.cat( [dec_final_state + bw_states[0][i,:,:]] + fw_sublabels_rep + atts, -1 )\n \n #fw_scores[i,:,:] = F.log_softmax( self.hidden2tag( fw_final_input + torch.sum( hidden_layer_mask*( torch.stack(fw_sem_atts) ) )), dim=1 )\n fw_scores[i,:,:] = F.log_softmax( self.FWOutputNorm( self.hidden2tag( fw_final_input ) ), dim=1 )\n\n (max_scores, max_indeces) = torch.max(fw_scores[i,:,:], 1)\n max_indeces = max_indeces.squeeze()\n \n if vflag:\n prev_labels = max_indeces\n prev_labels = prev_labels.view(self.batch_size)\n max_indeces = max_indeces.unsqueeze(0)\n\n if self.n_subparts > 0:\n prev_sublabels = torch.LongTensor(self.tag_to_subparts[max_indeces].transpose(0,1)).type(self.ltype) #VARIABLE #GRAPHCHECKPOINT\n\n return (fw_hidden_state, fw_scores)\n\n def forward(self, input, fw_streams, bw_states):\n\n # TMP FOR DEBUG\n #self.train_forward(input, fw_streams, bw_states)\n \n return self.test_forward(input, fw_streams, bw_states)\n\n def set_batch_size(self, val):\n self.batch_size = val\n if self.n_subparts > 0:\n self.sl_batch_size = val\n self.fw_RNN.set_batch_size( val )\n self.SLM.set_batch_size(val)\n\n def set_test_mode(self, val):\n self.TEST = val\n self.fw_RNN.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n\n self.TeachingSignal = val\n\n# ---------- Models for Speech decoding ----------\n\nclass Conv1dNormWrapper(nn.Module):\n '''\n class Conv1dNormWrapper\n \n Wrap a Conv1d class to be used in a nn.Sequential module, adding a layer normalization module.\n '''\n\n def __init__(self, input_size, output_size, kernel, stride_factor):\n\n super(Conv1dNormWrapper,self).__init__()\n\n self.conv = nn.Conv1d(input_size, output_size, kernel, stride=stride_factor)\n self.cNorm = nn.LayerNorm( output_size )\n\n def forward(self, input):\n\n return self.cNorm( self.conv( input ).permute(2,0,1) ).permute(1,2,0)\n\nclass LSTMWrapper(nn.Module):\n '''\n LSTMWrapper\n \n Wrap a LSTM layer to be used in a nn.Sequential module.\n '''\n\n def __init__(self, input_size, output_size, bidirFlag):\n\n super(LSTMWrapper,self).__init__()\n self.lstm = nn.LSTM(input_size, output_size, bidirectional=bidirFlag)\n\n def forward(self, input):\n\n output, _ = self.lstm( input )\n return output\n\nclass BasicEncoder(nn.Module):\n \n def __init__(self, params):\n \n super(BasicEncoder,self).__init__()\n #self.window_size = params.window_size\n\n # Parameter initialization\n # 1. Size of convolution layer\n self.input_size = params.num_features\n self.input_conv = self.input_size\n self.speech_conv_size = params.speech_conv_size\n \n # 2. Size of LSTM layer\n self.input_size_lstm = self.speech_conv_size\n self.hidden_size = params.speech_lstm_size\n \n # 3. Size of the output, that is of the linear layer\n self.output_size = params.output_size\n \n self.num_conv = params.speech_conv\n self.num_lstm_layers = params.num_lstm_layers\n self.conv_kernel = params.conv_kernel\n self.conv_kernel_width = params.conv_kernel_width\n self.conv_kernel_height = params.conv_kernel_height\n self.conv2d_dim = params.small_dim\n self.kernel_2d_hw_ratio = params.kernel_2d_hw_ratio\n self.stride_factor1 = params.conv_stride1\n self.stride_factor2 = params.conv_stride2\n\n # Layer initialization\n # 1. Convolutions\n conv_layers = []\n for i in range(self.num_conv):\n conv_stride = 1\n if i == self.num_conv-1:\n conv_stride = 2\n input_size = self.speech_conv_size\n if i == 0:\n input_size = self.input_conv\n conv_layers.append( ('Conv'+str(i+1), Conv1dNormWrapper(input_size, self.speech_conv_size, self.conv_kernel, conv_stride)) )\n conv_layers.append( ('Dropout'+str(i+1), nn.Dropout(p=params.drop_ratio)) )\n #conv_layers.append( ('ConvNorm'+str(i+1), nn.BatchNorm1d( self.speech_conv_size )) )\n self.convolutions = nn.Sequential( OrderedDict(conv_layers) )\n \n '''#self.conv1 = nn.Conv2d(self.input_conv,self.speech_conv_size, (self.conv_kernel_width, self.conv_kernel_height), stride=(self.stride_factor1, self.stride_factor1))\n self.conv1 = nn.Conv1d(self.input_conv,self.speech_conv_size, self.conv_kernel, stride=self.stride_factor1)\n #self.conv2 = nn.Conv1d(self.speech_conv_size,self.speech_conv_size,self.conv_kernel,stride=self.stride_factor2)'''\n #self.CONV_norm = nn.LayerNorm( self.speech_conv_size )\n \n # 2. Recurrent layers\n recurrent_layers = []\n for i in range(self.num_lstm_layers):\n input_size = 2*self.hidden_size\n if i == 0:\n input_size = self.input_size_lstm\n recurrent_layers.append( ('LSTM'+str(i+1), LSTMWrapper(input_size, self.hidden_size, True)) )\n recurrent_layers.append( ('ConvNorm'+str(i+1), nn.LayerNorm( 2*self.hidden_size )) )\n recurrent_layers.append( ('Dropout'+str(i+1), nn.Dropout(p=params.drop_ratio)) )\n self.rnns = nn.Sequential( OrderedDict(recurrent_layers) )\n\n #self.h_dropout = nn.Dropout(p=params.drop_ratio)\n #self.LSTM_norm = nn.LayerNorm(self.hidden_size*2)\n #self.rnns = nn.LSTM(self.input_size_lstm,self.hidden_size,num_layers = self.num_lstm_layers,bidirectional=True)\n \n #Linear Layer\n self.linear_layer = nn.Linear(2*self.hidden_size, self.output_size)\n \n #small_dim = int( math.sqrt(seq_len / hw_ratio) + 0.5 )\n #x_pad = torch.randn(num_features, batch_size, small_dim * hw_ratio * small_dim - seq_len)\n #x_padded = torch.cat( [x, x_pad], 2 )\n #x_conv = x_padded.view(num_features, batch_size, hw_ratio*small_dim, small_dim)\n\n '''\n print(' *** Initializing BasicEncoder:')\n print(' * Input size: {}'.format(params.num_features))\n print(' * Output size: {}'.format(params.output_size))\n print(' * Convolution size: {}'.format(params.speech_conv_size))\n print(' * Hidden size: {}'.format(params.speech_lstm_size))\n print(' -')\n print(' * Stride factor 1: {}'.format(params.conv_stride1))\n print(' * Stride factor 2: {}'.format(params.conv_stride2))\n print(' * Num. LSTM layers: {}'.format(params.num_lstm_layers))\n print(' ***')\n '''\n\n\n def forward(self, x):\n # Input has shape (sequence_length, batch_size, num. of channels), that is (L, N, C), convolution needs it to be (N, C, L)\n \n # 1. For Conv2d\n #(L, N, C) = x.size()\n #small_dim = int( math.sqrt(float(L) / float(self.kernel_2d_hw_ratio)) )\n #out = self.conv1( x.permute(1, 2, 0).view(N, C, small_dim * self.kernel_2d_hw_ratio, small_dim) )\n #out = self.h_dropout( out.view(N, self.speech_conv_size, -1).permute(2,0,1) )\n # ---------------------\n \n '''# 2. For Conv1d\n out = self.conv1( x.permute(1, 2, 0) )\n out = self.h_dropout( out.permute(2,0,1) )\n # ---------------------\n \n #out = self.conv2(x)\n\n output, _ = self.rnns( self.conv_output_norm( out ) )\n output = self.h_dropout(output)\n \n output = self.linear_layer( self.LSTM_norm(output) )\n #output = self.log_softmax(output)'''\n \n # New forward code with generic layer structures\n out = self.convolutions( x.permute(1, 2, 0) )\n #out = self.rnns( self.CONV_norm( out.permute(2,0,1) ) )\n #output = self.linear_layer( self.LSTM_norm( out ) )\n out = self.rnns( out.permute(2, 0, 1) )\n output = self.linear_layer( out )\n \n return (output, output, out)\n\n\nclass BasicSpeechEncoder(nn.Module):\n\n def __init__(self, params, nn_params):\n\n super(BasicSpeechEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n\n self.encoder = BasicEncoder(params)\n self.log_softmax = nn.LogSoftmax(dim = 2)\n\n def get_fw_parameters(self):\n return self.parameters()\n\n def get_bw_parameters(self):\n return self.get_fw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n\n (representations, reps, hidden_states) = self.encoder( x )\n scores = self.log_softmax( representations )\n\n return (scores, scores, hidden_states) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n #return (scores, representations)\n\n def set_test_mode(self, val):\n \n return\n\n def set_teaching_signal_flag(self, val):\n\n return\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n\n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n #SpkID = torch.ones_like(input)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n #SpkID[:,i,:] = SpkID[:,i,:] * self.speaker_val[i] * 0.002\n #return torch.cat( [padder, input + SpkID, padder], 0 )\n return torch.cat( [padder, input, padder], 0 )\n\nclass BasicSpeechSeqEncoder(nn.Module):\n \n def __init__(self, params, nn_params):\n \n super(BasicSpeechSeqEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n self.encoder = BasicEncoder(params)\n self.seq_encoder = SimpleDecoder(nn_params, 2*params.speech_lstm_size, 0)\n\n def get_fw_parameters(self):\n \n return self.parameters()\n\n def get_bw_parameters(self):\n\n return self.get_fw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n \n (sequence_length, batch_size, num_features) = x.size()\n self.seq_encoder.set_batch_size( batch_size )\n \n (representations, reps, hidden_states) = self.encoder(x)\n\n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0]))\n fw_streams = (prev_labels, prev_sublabels)\n \n self.seq_encoder.init_hidden()\n (fw_hidden_state, fw_scores) = self.seq_encoder([hidden_states], fw_streams) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n\n return (fw_scores, fw_scores, fw_hidden_state)\n \n def set_test_mode(self, val):\n \n self.seq_encoder.set_test_mode( val )\n \n def set_teaching_signal_flag(self, val):\n \n self.seq_encoder.set_teaching_signal_flag( val )\n \n def load_encoder(self, bsencoder):\n \n self.encoder.load_state_dict( bsencoder.encoder.state_dict() )\n \n def set_speaker_val(self, val):\n \n self.speaker_val = val\n \n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.cuda.FloatTensor(1, batch_size, num_features)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\nclass BasicSpeechBiseqEncoder(nn.Module):\n \n def __init__(self, params, nn_params):\n \n super(BasicSpeechBiseqEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n self.encoder = BasicEncoder(params)\n #self.seq_encoder = SimpleDecoder(nn_params, params.output_size, 0)\n #self.seq_encoder = SimpleDecoder(nn_params, params.output_size, 2) # NEW: TEST IT!!!\n self.bw_seq_encoder = SimpleDecoder(nn_params, 2*params.speech_lstm_size, 1) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n #self.log_softmax = nn.LogSoftmax(dim = 2)\n self.fw_seq_encoder = BidirectionalDecoder(nn_params, 2*params.speech_lstm_size, 0)\n\n def get_fw_parameters(self):\n\n return list(filter(lambda p: p.requires_grad, self.encoder.parameters())) + list(filter(lambda p: p.requires_grad, self.fw_seq_encoder.parameters()))\n\n def get_bw_parameters(self):\n\n return list(filter(lambda p: p.requires_grad, self.encoder.parameters())) + list(filter(lambda p: p.requires_grad, self.bw_seq_encoder.parameters()))\n \n def forward(self, x, next_labels, prev_labels):\n \n (sequence_length, batch_size, num_features) = x.size()\n self.fw_seq_encoder.set_batch_size( batch_size )\n self.bw_seq_encoder.set_batch_size( batch_size )\n \n (representations, reps, hidden_states) = self.encoder(x)\n\n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0]))\n fw_streams = (prev_labels, prev_sublabels)\n bw_streams = (next_labels, next_sublabels)\n \n self.bw_seq_encoder.init_hidden()\n self.fw_seq_encoder.init_hidden()\n #(fw_hidden_state, fw_scores) = self.seq_encoder([representations], fw_streams)\n (bw_hidden_state, bw_scores) = self.bw_seq_encoder([hidden_states], bw_streams) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n (fw_hidden_state, fw_scores) = self.fw_seq_encoder([hidden_states], fw_streams, [bw_hidden_state, bw_scores])\n global_scores = 0.5 * (fw_scores + bw_scores)\n\n return (fw_scores, bw_scores, fw_hidden_state)\n\n def set_test_mode(self, val):\n\n self.bw_seq_encoder.set_test_mode( val )\n self.fw_seq_encoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n\n self.bw_seq_encoder.set_teaching_signal_flag( val )\n self.fw_seq_encoder.set_teaching_signal_flag( val )\n\n def load_encoder(self, bsencoder):\n\n self.encoder.load_state_dict( bsencoder.encoder.state_dict() )\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n #self.speaker_val = val\n #(sequence_length, batch_size, num_features) = input.size()\n #padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n #SpkID = torch.ones_like(input)\n #for i in range( batch_size ):\n # padder[:,i,:] = self.speaker_val[i]\n # SpkID[:,i,:] = SpkID[:,i,:] * self.speaker_val[i] * 0.002\n #return torch.cat( [padder, input + SpkID, padder], 0 )\n\nclass MLSpeechEncoder(nn.Module):\n\n def __init__(self, ch_params, tk_params, nn_params):\n\n super(MLSpeechEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n\n self.char_encoder = BasicSpeechEncoder(ch_params, nn_params)\n self.token_encoder = BasicSpeechEncoder(tk_params, nn_params)\n\n def get_fw_parameters(self):\n\n return self.parameters()\n\n def get_bw_parameters(self):\n\n return self.get_fw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n\n (ch_scores, ch_sc, ch_reps) = self.char_encoder(x, next_labels, prev_labels)\n (tk_scores, tk_sc, tk_reps) = self.token_encoder(ch_reps, next_labels, prev_labels)\n\n return (tk_scores, tk_scores, tk_reps)\n\n def load_char_encoder(self, char_encoder):\n\n self.char_encoder.encoder.load_state_dict( char_encoder.encoder.state_dict() )\n #for param in self.char_encoder.encoder.parameters():\n # param.requires_grad = False\n \n def freeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = False\n\n def unfreeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = True\n\n def load_token_encoder(self, token_encoder):\n\n self.token_encoder.encoder.rnns.load_state_dict( token_encoder.encoder.rnns.state_dict() )\n\n def set_test_mode(self, val):\n \n return\n\n def set_teaching_signal_flag(self, val):\n \n return\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\nclass MLSpeechSeqEncoder(nn.Module):\n \n def __init__(self, ch_params, tk_params, nn_params):\n \n super(MLSpeechSeqEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n self.char_encoder = BasicSpeechEncoder(ch_params, nn_params)\n self.token_encoder = BasicSpeechSeqEncoder(tk_params, nn_params)\n\n def get_fw_parameters(self):\n\n return self.char_encoder.get_fw_parameters() + self.token_encoder.get_fw_parameters()\n\n def get_bw_parameters(self):\n\n return self.char_encoder.get_bw_parameters() + self.token_encoder.get_bw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n \n (ch_scores, ch_sc, ch_reps) = self.char_encoder(x, next_labels, prev_labels)\n (fw_tk_scores, bw_tk_scores, tk_reps) = self.token_encoder(ch_reps, next_labels, prev_labels)\n \n return (fw_tk_scores, bw_tk_scores, tk_reps)\n \n def load_char_encoder(self, char_encoder):\n \n self.char_encoder.encoder.load_state_dict( char_encoder.encoder.state_dict() )\n #for param in self.char_encoder.encoder.parameters():\n # param.requires_grad = False\n \n def freeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = False\n\n def unfreeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = True\n\n def load_token_encoder(self, token_encoder):\n \n self.token_encoder.encoder.rnns.load_state_dict( token_encoder.encoder.rnns.state_dict() )\n self.token_encoder.bw_seq_encoder.load_state_dict( token_encoder.bw_seq_encoder.state_dict() )\n self.token_encoder.fw_seq_encoder.load_state_dict( token_encoder.fw_seq_encoder.state_dict() )\n\n def load_ml_encoder(self, ml_encoder):\n \n self.char_encoder.load_state_dict( ml_encoder.char_encoder.state_dict() )\n #print(' -- MLSpeechSeqEncoder: freezing char-encoder parameters...')\n #for param in self.char_encoder.parameters():\n # param.requires_grad = False\n self.token_encoder.encoder.load_state_dict( ml_encoder.token_encoder.encoder.state_dict() )\n #print(' -- MLSpeechSeqEncoder: freezing token-encoder (encoder only) parameters...')\n #sys.stdout.flush()\n #for param in self.token_encoder.encoder.parameters():\n # param.requires_grad = False\n \n def load_ml_seq_decoder(self, ml_encoder):\n \n self.char_encoder.load_state_dict( ml_encoder.char_encoder.state_dict() )\n self.token_encoder.load_state_dict( ml_encoder.token_encoder.state_dict() )\n\n def set_test_mode(self, val):\n \n self.token_encoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n \n self.token_encoder.set_teaching_signal_flag( val )\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n# ---------- Models for End-to-end SLU ----------\n\nclass SLUSimpleDecoder(nn.Module):\n\n def __init__(self, ch_params, tk_params, nn_params):\n \n super(SLUSimpleDecoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n\n tmp = nn_params.tag_vocab_size\n nn_params.tag_vocab_size = nn_params.sd_tag_vocab_size\n decoder_output_size = 0\n if nn_params.train_char_decoder or nn_params.load_char_decoder:\n print(' -- SLUSimpleDecoder: using character speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(ch_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_token_decoder or nn_params.load_token_decoder:\n print(' -- SLUSimpleDecoder: using token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_ml_decoder or nn_params.load_ml_decoder:\n print(' -- SLUSimpleDecoder: using 2-stage token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = MLSpeechSeqEncoder(ch_params, tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n\n nn_params.tag_vocab_size = tmp\n nn_params.label_embed_dim = 2 * nn_params.label_embed_dim\n nn_params.hidden_dim = 2 * nn_params.hidden_dim\n self.slu_decoder = SimpleDecoder(nn_params, decoder_output_size, 0)\n\n def get_fw_parameters(self):\n\n return self.speech_decoder.get_fw_parameters() + list(filter(lambda p: p.requires_grad, self.slu_decoder.parameters()))\n\n def get_bw_parameters(self):\n\n return self.speech_decoder.get_bw_parameters() + list(filter(lambda p: p.requires_grad, self.slu_decoder.parameters()))\n\n def forward(self, input, bw_label_streams, fw_label_streams):\n \n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0])) #VARIABLE x2\n fw_streams = (fw_label_streams, prev_sublabels)\n bw_streams = (bw_label_streams, next_sublabels)\n \n #(sequence_length, batch_size, num_features) = input.size()\n #padder = torch.cuda.FloatTensor(1, batch_size, num_features)\n #for i in range( batch_size ):\n # padder[:,i,:] = self.speaker_val[i]\n #padded_input = torch.cat( [padder, input, padder], 0 )\n\n self.slu_decoder.set_batch_size( batch_size )\n (fw_tk_scores, bw_tk_scores, tk_reps) = self.speech_decoder(input, bw_label_streams, fw_label_streams)\n self.slu_decoder.init_hidden()\n (sem_hidden_states, sem_scores) = self.slu_decoder([tk_reps], fw_streams)\n\n return (sem_scores, sem_scores, sem_hidden_states)\n\n def load_speech_encoder(self, speech_encoder):\n\n self.speech_decoder.load_state_dict( speech_encoder.state_dict() )\n if isinstance(speech_encoder, MLSpeechSeqEncoder):\n print(' -- SLUSimpleDecoder: freezing speech-encoder parameters...')\n sys.stdout.flush()\n for param in self.speech_decoder.char_encoder.parameters():\n param.requires_grad = False\n\n def set_speaker_val(self, val):\n \n self.speaker_val = val\n \n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n def set_test_mode(self, val):\n \n self.speech_decoder.set_test_mode( val )\n self.slu_decoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n \n self.speech_decoder.set_teaching_signal_flag( val )\n self.slu_decoder.set_teaching_signal_flag( val )\n\nclass SLUBiDecoder(nn.Module):\n \n def __init__(self, ch_params, tk_params, nn_params):\n \n super(SLUBiDecoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n tmp = nn_params.tag_vocab_size\n nn_params.tag_vocab_size = nn_params.sd_tag_vocab_size\n decoder_output_size = 0\n if nn_params.train_char_decoder or nn_params.load_char_decoder:\n print(' -- SLUBiDecoder: using character speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(ch_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_token_decoder or nn_params.load_token_decoder:\n print(' -- SLUBiDecoder: using token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_ml_decoder or nn_params.load_ml_decoder:\n print(' -- SLUBiDecoder: using 2-stage token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = MLSpeechSeqEncoder(ch_params, tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n\n nn_params.tag_vocab_size = tmp\n nn_params.label_embed_dim = 2 * nn_params.label_embed_dim\n nn_params.hidden_dim = 2 * nn_params.hidden_dim\n self.bw_slu_decoder = SimpleDecoder(nn_params, decoder_output_size, 1)\n self.fw_slu_decoder = BidirectionalDecoder(nn_params, decoder_output_size, 0)\n\n def forward(self, input, bw_label_streams, fw_label_streams):\n \n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0])) #VARIABLE x2\n fw_streams = (fw_label_streams, prev_sublabels)\n bw_streams = (bw_label_streams, next_sublabels) \n \n self.bw_slu_decoder.set_batch_size( batch_size )\n self.fw_slu_decoder.set_batch_size( batch_size )\n (fw_tk_scores, bw_tk_scores, tk_reps) = self.speech_decoder(input, bw_label_streams, fw_label_streams)\n self.bw_slu_decoder.init_hidden()\n self.fw_slu_decoder.init_hidden()\n (sem_bw_hidden_states, sem_bw_scores) = self.bw_slu_decoder([tk_reps], bw_streams)\n (sem_fw_hidden_states, sem_fw_scores) = self.fw_slu_decoder([tk_reps], fw_streams, [sem_bw_hidden_states, sem_bw_scores])\n global_scores = 0.5 * (sem_fw_scores + sem_bw_scores)\n\n return (global_scores, sem_bw_scores, sem_hidden_states)\n\n def load_speech_encoder(self, speech_encoder):\n \n self.speech_decoder.load_state_dict( speech_encoder.state_dict() )\n if isinstance(speech_encoder, MLSpeechSeqEncoder):\n print(' -- SLUBiDecoder: freezing speech-encoder parameters...')\n sys.stdout.flush()\n for param in self.speech_decoder.char_encoder.parameters():\n param.requires_grad = False\n\n def set_speaker_val(self, val):\n \n self.speaker_val = val\n \n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n def set_test_mode(self, val):\n \n self.speech_decoder.set_test_mode( val )\n self.slu_decoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n \n self.speech_decoder.set_teaching_signal_flag( val )\n self.slu_decoder.set_teaching_signal_flag( val )\n\n" ]
[ [ "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.LongTensor", "torch.max", "torch.ones", "torch.cat", "torch.nn.LSTM", "torch.zeros", "torch.sum", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.cuda.FloatTensor", "torch.nn.Linear", "torch.FloatTensor", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hisiter97/Transformer_OCR_API
[ "78322ec2b9648d0b027326dced7c4aec967bcab3" ]
[ "vietocr/model/trainer.py" ]
[ "from vietocr.optim.optim import ScheduledOptim\nfrom vietocr.optim.labelsmoothingloss import LabelSmoothingLoss\nfrom torch.optim import Adam, SGD, AdamW\nfrom torch import nn\nfrom vietocr.tool.translate import build_model\nfrom vietocr.tool.translate import translate, batch_translate_beam_search\nfrom vietocr.tool.utils import download_weights\nfrom vietocr.tool.logger import Logger\nfrom vietocr.loader.aug import ImgAugTransform\n\nimport yaml\nimport torch\nfrom vietocr.loader.DataLoader import DataGen\nfrom vietocr.loader.dataloader import OCRDataset, ClusterRandomSampler, collate_fn\nfrom torch.utils.data import DataLoader\nfrom einops import rearrange\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, CyclicLR, OneCycleLR\n\nimport torchvision \n\nfrom vietocr.tool.utils import compute_accuracy\nfrom PIL import Image\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport time\n\nclass Trainer():\n def __init__(self, config, pretrained=True):\n\n self.config = config\n self.model, self.vocab = build_model(config)\n \n self.device = config['device']\n self.num_iters = config['trainer']['iters']\n self.beamsearch = config['predictor']['beamsearch']\n\n self.data_root = config['dataset']['data_root']\n self.train_annotation = config['dataset']['train_annotation']\n self.valid_annotation = config['dataset']['valid_annotation']\n self.dataset_name = config['dataset']['name']\n\n self.batch_size = config['trainer']['batch_size']\n self.print_every = config['trainer']['print_every']\n self.valid_every = config['trainer']['valid_every']\n\n self.checkpoint = config['trainer']['checkpoint']\n self.export_weights = config['trainer']['export']\n self.metrics = config['trainer']['metrics']\n logger = config['trainer']['log']\n \n if logger:\n self.logger = Logger(logger) \n\n if pretrained:\n weight_file = download_weights(**config['pretrain'], quiet=config['quiet'])\n self.load_weights(weight_file)\n\n self.iter = 0\n \n self.optimizer = AdamW(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09)\n self.scheduler = OneCycleLR(self.optimizer, total_steps=self.num_iters, **config['optimizer'])\n# self.optimizer = ScheduledOptim(\n# Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),\n# #config['transformer']['d_model'], \n# 512,\n# **config['optimizer'])\n\n self.criterion = LabelSmoothingLoss(len(self.vocab), padding_idx=self.vocab.pad, smoothing=0.1)\n \n transforms = ImgAugTransform()\n\n self.train_gen = self.data_gen('train_{}'.format(self.dataset_name), \n self.data_root, self.train_annotation, transform=transforms)\n if self.valid_annotation:\n self.valid_gen = self.data_gen('valid_{}'.format(self.dataset_name), \n self.data_root, self.valid_annotation)\n\n self.train_losses = []\n \n def train(self):\n total_loss = 0\n \n total_loader_time = 0\n total_gpu_time = 0\n best_acc = 0\n\n data_iter = iter(self.train_gen)\n for i in range(self.num_iters):\n self.iter += 1\n\n start = time.time()\n\n try:\n batch = next(data_iter)\n except StopIteration:\n data_iter = iter(self.train_gen)\n batch = next(data_iter)\n\n total_loader_time += time.time() - start\n\n start = time.time()\n loss = self.step(batch)\n total_gpu_time += time.time() - start\n\n total_loss += loss\n self.train_losses.append((self.iter, loss))\n\n if self.iter % self.print_every == 0:\n info = 'iter: {:06d} - train loss: {:.3f} - lr: {:.2e} - load time: {:.2f} - gpu time: {:.2f}'.format(self.iter, \n total_loss/self.print_every, self.optimizer.param_groups[0]['lr'], \n total_loader_time, total_gpu_time)\n\n total_loss = 0\n total_loader_time = 0\n total_gpu_time = 0\n print(info) \n self.logger.log(info)\n\n if self.valid_annotation and self.iter % self.valid_every == 0:\n val_loss = self.validate()\n acc_full_seq, acc_per_char = self.precision(self.metrics)\n\n info = 'iter: {:06d} - valid loss: {:.3f} - acc full seq: {:.4f} - acc per char: {:.4f}'.format(self.iter, val_loss, acc_full_seq, acc_per_char)\n print(info)\n self.logger.log(info)\n\n if acc_full_seq > best_acc:\n self.save_weights(self.export_weights)\n best_acc = acc_full_seq\n\n \n def validate(self):\n self.model.eval()\n\n total_loss = []\n \n with torch.no_grad():\n for step, batch in enumerate(self.valid_gen):\n batch = self.batch_to_device(batch)\n img, tgt_input, tgt_output, tgt_padding_mask = batch['img'], batch['tgt_input'], batch['tgt_output'], batch['tgt_padding_mask']\n\n outputs = self.model(img, tgt_input, tgt_padding_mask)\n# loss = self.criterion(rearrange(outputs, 'b t v -> (b t) v'), rearrange(tgt_output, 'b o -> (b o)'))\n \n outputs = outputs.flatten(0,1)\n tgt_output = tgt_output.flatten()\n loss = self.criterion(outputs, tgt_output)\n\n total_loss.append(loss.item())\n \n del outputs\n del loss\n\n total_loss = np.mean(total_loss)\n self.model.train()\n \n return total_loss\n \n def predict(self, sample=None):\n pred_sents = []\n actual_sents = []\n img_files = []\n\n for batch in self.valid_gen:\n batch = self.batch_to_device(batch)\n\n if self.beamsearch:\n translated_sentence = batch_translate_beam_search(batch['img'], self.model)\n else:\n translated_sentence = translate(batch['img'], self.model)\n\n pred_sent = self.vocab.batch_decode(translated_sentence.tolist())\n actual_sent = self.vocab.batch_decode(batch['tgt_output'].tolist())\n\n img_files.extend(batch['filenames'])\n\n pred_sents.extend(pred_sent)\n actual_sents.extend(actual_sent)\n \n if sample != None and len(pred_sents) > sample:\n break\n\n return pred_sents, actual_sents, img_files\n\n def precision(self, sample=None):\n\n pred_sents, actual_sents, _ = self.predict(sample=sample)\n\n acc_full_seq = compute_accuracy(actual_sents, pred_sents, mode='full_sequence')\n acc_per_char = compute_accuracy(actual_sents, pred_sents, mode='per_char')\n \n return acc_full_seq, acc_per_char\n \n def visualize_prediction(self, sample=16, errorcase=False, fontname='serif', fontsize=16):\n \n pred_sents, actual_sents, img_files = self.predict(sample)\n\n if errorcase:\n wrongs = []\n for i in range(len(img_files)):\n if pred_sents[i]!= actual_sents[i]:\n wrongs.append(i)\n\n pred_sents = [pred_sents[i] for i in wrongs]\n actual_sents = [actual_sents[i] for i in wrongs]\n img_files = [img_files[i] for i in wrongs]\n\n\n img_files = img_files[:sample]\n\n fontdict = {\n 'family':fontname,\n 'size':fontsize\n } \n\n for vis_idx in range(0, len(img_files)):\n img_path = img_files[vis_idx]\n pred_sent = pred_sents[vis_idx]\n actual_sent = actual_sents[vis_idx]\n\n img = Image.open(open(img_path, 'rb'))\n plt.figure()\n plt.imshow(img)\n plt.title('pred: {} - actual: {}'.format(pred_sent, actual_sent), loc='left', fontdict=fontdict)\n plt.axis('off')\n\n plt.show()\n \n def visualize_dataset(self, sample=16, fontname='serif'):\n n = 0\n for batch in self.train_gen:\n for i in range(self.batch_size):\n img = batch['img'][i].numpy().transpose(1,2,0)\n sent = self.vocab.decode(batch['tgt_input'].T[i].tolist())\n \n plt.figure()\n plt.title('sent: {}'.format(sent), loc='center', fontname=fontname)\n plt.imshow(img)\n plt.axis('off')\n \n n += 1\n if n >= sample:\n plt.show()\n return\n\n\n def load_checkpoint(self, filename):\n checkpoint = torch.load(filename)\n \n optim = ScheduledOptim(\n\t Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),\n \tself.config['transformer']['d_model'], **self.config['optimizer'])\n\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.load_state_dict(checkpoint['state_dict'])\n self.iter = checkpoint['iter']\n\n self.train_losses = checkpoint['train_losses']\n\n def save_checkpoint(self, filename):\n state = {'iter':self.iter, 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(), 'train_losses': self.train_losses}\n \n path, _ = os.path.split(filename)\n os.makedirs(path, exist_ok=True)\n\n torch.save(state, filename)\n\n def load_weights(self, filename):\n state_dict = torch.load(filename, map_location=torch.device(self.device))\n\n for name, param in self.model.named_parameters():\n if name not in state_dict:\n print('{} not found'.format(name))\n elif state_dict[name].shape != param.shape:\n print('{} missmatching shape, required {} but found {}'.format(name, param.shape, state_dict[name].shape))\n del state_dict[name]\n\n self.model.load_state_dict(state_dict, strict=False)\n\n def save_weights(self, filename):\n path, _ = os.path.split(filename)\n os.makedirs(path, exist_ok=True)\n \n torch.save(self.model.state_dict(), filename)\n\n def batch_to_device(self, batch):\n img = batch['img'].to(self.device, non_blocking=True)\n tgt_input = batch['tgt_input'].to(self.device, non_blocking=True)\n tgt_output = batch['tgt_output'].to(self.device, non_blocking=True)\n tgt_padding_mask = batch['tgt_padding_mask'].to(self.device, non_blocking=True)\n\n batch = {\n 'img': img, 'tgt_input':tgt_input, \n 'tgt_output':tgt_output, 'tgt_padding_mask':tgt_padding_mask, \n 'filenames': batch['filenames']\n }\n\n return batch\n\n def data_gen(self, lmdb_path, data_root, annotation, transform=None):\n dataset = OCRDataset(lmdb_path=lmdb_path, \n root_dir=data_root, annotation_path=annotation, \n vocab=self.vocab, transform=transform, \n image_height=self.config['dataset']['image_height'], \n image_min_width=self.config['dataset']['image_min_width'], \n image_max_width=self.config['dataset']['image_max_width'])\n\n sampler = ClusterRandomSampler(dataset, self.batch_size, True)\n gen = DataLoader(\n dataset,\n batch_size=self.batch_size, \n sampler=sampler,\n collate_fn = collate_fn,\n shuffle=False,\n drop_last=False,\n **self.config['dataloader'])\n \n return gen\n\n def data_gen_v1(self, lmdb_path, data_root, annotation):\n data_gen = DataGen(data_root, annotation, self.vocab, 'cpu', \n image_height = self.config['dataset']['image_height'], \n image_min_width = self.config['dataset']['image_min_width'],\n image_max_width = self.config['dataset']['image_max_width'])\n\n return data_gen\n\n def step(self, batch):\n self.model.train()\n\n batch = self.batch_to_device(batch)\n img, tgt_input, tgt_output, tgt_padding_mask = batch['img'], batch['tgt_input'], batch['tgt_output'], batch['tgt_padding_mask'] \n \n outputs = self.model(img, tgt_input, tgt_key_padding_mask=tgt_padding_mask)\n# loss = self.criterion(rearrange(outputs, 'b t v -> (b t) v'), rearrange(tgt_output, 'b o -> (b o)'))\n outputs = outputs.view(-1, outputs.size(2))#flatten(0, 1)\n tgt_output = tgt_output.view(-1)#flatten()\n \n loss = self.criterion(outputs, tgt_output)\n\n self.optimizer.zero_grad()\n\n loss.backward()\n \n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1) \n\n self.optimizer.step()\n self.scheduler.step()\n\n loss_item = loss.item()\n\n return loss_item\n" ]
[ [ "torch.optim.lr_scheduler.OneCycleLR", "matplotlib.pyplot.imshow", "torch.load", "matplotlib.pyplot.figure", "torch.utils.data.DataLoader", "numpy.mean", "torch.no_grad", "matplotlib.pyplot.axis", "torch.device", "matplotlib.pyplot.show", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ad-daniel/opendr
[ "cc71138ae22ec39b186960ff98c74bc2cdca3623", "cc71138ae22ec39b186960ff98c74bc2cdca3623", "cc71138ae22ec39b186960ff98c74bc2cdca3623" ]
[ "tests/sources/tools/perception/object_detection_2d/gem/test_gem.py", "projects/perception/lightweight_open_pose/jetbot/utils/pose_controller.py", "projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/demo.py" ]
[ "# Copyright 2020-2022 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\nimport shutil\nimport os\nimport torch\nimport warnings\nfrom opendr.engine.datasets import ExternalDataset\nfrom opendr.perception.object_detection_2d import GemLearner\n\nfrom PIL import Image\n\nDEVICE = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\nprint(\"Using device:\", DEVICE)\nprint(\"Using device:\", DEVICE, file=sys.stderr)\n\n\ndef rmfile(path):\n try:\n os.remove(path)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\ndef rmdir(_dir):\n try:\n shutil.rmtree(_dir)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\nclass TestGemLearner(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print(\"\\n\\n*********************************\\nTEST Object Detection GEM Learner\\n\"\n \"*********************************\")\n cls.temp_dir = os.path.join(\"tests\", \"sources\", \"tools\",\n \"perception\", \"object_detection_2d\",\n \"gem\", \"gem_temp\")\n\n cls.model_backbone = \"resnet50\"\n\n cls.learner = GemLearner(iters=1,\n temp_path=cls.temp_dir,\n backbone=cls.model_backbone,\n num_classes=7,\n device=DEVICE,\n )\n\n cls.learner.download(mode='pretrained_gem')\n\n print(\"Model downloaded\", file=sys.stderr)\n\n cls.learner.download(mode='test_data_sample_dataset')\n\n cls.learner.download(mode='test_data_sample_images')\n\n print(\"Data downloaded\", file=sys.stderr)\n cls.dataset_location = os.path.join(cls.temp_dir,\n 'sample_dataset',\n )\n cls.m1_dataset = ExternalDataset(\n cls.dataset_location,\n \"coco\",\n )\n cls.m2_dataset = ExternalDataset(\n cls.dataset_location,\n \"coco\",\n )\n\n @classmethod\n def tearDownClass(cls):\n # Clean up downloaded files\n rmdir(os.path.join(cls.temp_dir, 'pretrained_models'))\n rmdir(os.path.join(cls.temp_dir, 'checkpoints'))\n rmdir(os.path.join(cls.temp_dir, 'facebookresearch_detr_master'))\n rmdir(os.path.join(cls.temp_dir, 'sample_dataset'))\n rmdir(os.path.join(cls.temp_dir, 'sample_images'))\n rmdir(os.path.join(cls.temp_dir, 'outputs'))\n rmdir(cls.temp_dir)\n\n def test_fit(self):\n # Test fit will issue resource warnings due to some files left open in pycoco tools,\n # as well as a deprecation warning due to a cast of a float to integer (hopefully they will be fixed in a future\n # version)\n warnings.simplefilter(\"ignore\", ResourceWarning)\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n self.learner.model = None\n self.learner.ort_session = None\n\n self.learner.download(mode='pretrained_gem')\n\n m = list(self.learner.model.parameters())[0].clone()\n\n self.learner.fit(\n m1_train_edataset=self.m1_dataset,\n m2_train_edataset=self.m2_dataset,\n annotations_folder='annotations',\n m1_train_annotations_file='RGB_26May2021_14h19m_coco.json',\n m2_train_annotations_file='Thermal_26May2021_14h19m_coco.json',\n m1_train_images_folder='train/m1',\n m2_train_images_folder='train/m2',\n out_dir=os.path.join(self.temp_dir, \"outputs\"),\n trial_dir=os.path.join(self.temp_dir, \"trial\"),\n logging_path='',\n verbose=False,\n m1_val_edataset=self.m1_dataset,\n m2_val_edataset=self.m2_dataset,\n m1_val_annotations_file='RGB_26May2021_14h19m_coco.json',\n m2_val_annotations_file='Thermal_26May2021_14h19m_coco.json',\n m1_val_images_folder='val/m1',\n m2_val_images_folder='val/m2',\n )\n\n self.assertFalse(torch.equal(m, list(self.learner.model.parameters())[0]),\n msg=\"Model parameters did not change after running fit.\")\n\n # Cleanup\n warnings.simplefilter(\"default\", ResourceWarning)\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def test_eval(self):\n # Test eval will issue resource warnings due to some files left open in pycoco tools,\n # as well as a deprecation warning due to a cast of a float to integer (hopefully they will be fixed in a future\n # version)\n warnings.simplefilter(\"ignore\", ResourceWarning)\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n self.learner.model = None\n self.learner.ort_session = None\n\n self.learner.download(mode='pretrained_gem')\n\n result = self.learner.eval(\n m1_edataset=self.m1_dataset,\n m2_edataset=self.m2_dataset,\n m1_images_folder='val/m1',\n m2_images_folder='val/m2',\n annotations_folder='annotations',\n m1_annotations_file='RGB_26May2021_14h19m_coco.json',\n m2_annotations_file='Thermal_26May2021_14h19m_coco.json',\n verbose=False,\n )\n\n self.assertGreater(len(result), 0)\n\n # Cleanup\n warnings.simplefilter(\"default\", ResourceWarning)\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def test_infer(self):\n self.learner.model = None\n self.learner.ort_session = None\n\n self.learner.download(mode='pretrained_gem')\n\n m1_image = Image.open(os.path.join(self.temp_dir, \"sample_images/rgb/2021_04_22_21_35_47_852516.jpg\"))\n m2_image = Image.open(os.path.join(self.temp_dir, 'sample_images/aligned_infra/2021_04_22_21_35_47_852516.jpg'))\n\n result, _, _ = self.learner.infer(m1_image, m2_image)\n\n self.assertGreater(len(result), 0)\n\n def test_save(self):\n self.learner.model = None\n self.learner.ort_session = None\n\n model_dir = os.path.join(self.temp_dir, \"test_model\")\n\n self.learner.download(mode='pretrained_detr')\n\n self.learner.save(model_dir)\n\n starting_param_1 = list(self.learner.model.parameters())[0].clone()\n\n learner2 = GemLearner(\n iters=1,\n temp_path=self.temp_dir,\n device=DEVICE,\n num_classes=7,\n )\n learner2.load(model_dir)\n\n new_param = list(learner2.model.parameters())[0].clone()\n self.assertTrue(torch.equal(starting_param_1, new_param))\n\n rmdir(model_dir)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020-2022 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom utils.pose_utils import calculate_horizontal_offset, calculate_upper_body_height, calculate_body_area\nfrom utils.pid import PID\nimport numpy as np\nimport cv2\n\n\nclass PoseController:\n def __init__(self, robot, pose_estimator, visualization_handler_fn=None, fall_handler_fn=None, active=True,\n infer_delay=0, disable_collision=False):\n \"\"\"\n Initializes the Executer class responsible for detecting humans and issuing the appropriate control commands.\n Note that the current implementation assumes that only one human appears in the scene. If more than one appears,\n then the robot will only process information regarding the first one detected.\n @param robot: The robot controller to be used for issuing the control commands\n @param pose_estimator: The pose estimator to be used for fall detection\n @param visualization_handler_fn: A function with signature visualization_handler_fn(img, pose, statistics) that\n can be used for visualization the detections and other information regarding the robot/target. This is an optional\n function. Please note that the call to this function is blocking. Non-blocking functionality should be implemented\n by the visualization_handler_fn() function.\n @param fall_handler_fn: function to call when a fall is detected\n @param active: enables active perception\n @param infer_delay: delay after each inference operation (used to fully simulate the real hardware)\n @param disable_collision: disables collision avoidance (to enable faster execution)\n \"\"\"\n\n self.active = active\n self.robot = robot\n self.pose_estimator = pose_estimator\n self.visualization_handler = visualization_handler_fn\n self.fall_handler_fn = fall_handler_fn\n self.infer_delay = infer_delay\n\n if disable_collision:\n self.enable_depth_perception = False\n else:\n # Enables some very basic collision detection\n self.enable_depth_perception = True\n self.collision_depth_threshold = 0.06\n import gluoncv\n import mxnet as mx\n self.ctx = mx.gpu(0)\n self.collision_model = gluoncv.model_zoo.get_model('monodepth2_resnet18_kitti_stereo_640x192',\n pretrained_base=False, ctx=self.ctx, pretrained=True)\n\n # Webots constants\n if self.robot.robot_interface == 'webots':\n\n # Amount of movement for one wheel when performing a rotation\n self.rotation_interval = 0.5\n\n # Used to control the limits within the human should be in order to considered centered\n self.translation_offset_limit = 0.04\n\n # PID controller to center the subject\n self.rotation_pid = PID(1, 0, 0.1, output_limits=(-3, 3), setpoint=0, cutoff=0.01)\n\n # Distances (calculated as the areas covered by the subject in the current frame) to keep from a subject\n self.max_distance_size = 0.07\n self.min_distance_size = 0.12\n self.distance_pid = PID(100, 0, 0, output_limits=(-2, 2), setpoint=self.max_distance_size)\n\n # Number of maximum missed joints allowed for assuming this is a clean pose\n self.min_joints_threshold = 8\n\n # Number of frames that can be passed with no detection, before initiating rotate_to_detect()\n self.patience = 5\n\n # Camera threshold for assessing a fall\n self.fall_threshold = 0.4\n\n # (Max) distance to be covered when a fall is detected\n self.distance_fall = 10\n\n # Distance to move forward for active perception\n self.active_step_forward = 2\n\n # Number of successive detection before considering a detection stable\n self.active_successive_limit = 5\n # Active control PIDs\n self.rotation_pid_active = PID(2, 0, 0.1, output_limits=(-3, 3), setpoint=0, cutoff=0.01)\n self.distance_pid_active = PID(30, 0, 0, output_limits=(-5, 5), setpoint=0.1)\n elif self.robot.robot_interface == 'jetbot':\n # Amount of movement for one wheel when performing a rotation\n self.rotation_interval = 0.5\n\n # Used to control the limits within the human should be in order to considered centered\n self.translation_offset_limit = 0.04\n\n # PID controller to center the subject\n self.rotation_pid = PID(1, 0, 0.1, output_limits=(-3, 3), setpoint=0, cutoff=0.01)\n\n # Distances (calculated as the areas covered by the subject in the current frame) to keep from a subject\n self.max_distance_size = 0.07\n self.min_distance_size = 0.12\n self.distance_pid = PID(100, 0, 0, output_limits=(-2, 2), setpoint=self.max_distance_size)\n\n # Number of maximum missed joints allowed for assuming this is a clean pose\n self.min_joints_threshold = 8\n\n # Number of frames that can be passed with no detection, before initiating rotate_to_detect()\n self.patience = 5\n\n # Camera threshold for assessing a fall\n self.fall_threshold = 0.4\n\n # (Max) distance to be covered when a fall is detected\n self.distance_fall = 10\n\n # Distance to move forward for active perception\n self.active_step_forward = 2\n\n # Number of successive detection before considering a detection stable\n self.active_successive_limit = 5\n # Active control PIDs\n self.rotation_pid_active = PID(2, 0, 0.1, output_limits=(-3, 3), setpoint=0, cutoff=0.01)\n self.distance_pid_active = PID(30, 0, 0, output_limits=(-5, 5), setpoint=0.1)\n else:\n assert False\n\n # Cache\n self.last_img = None\n self.last_pose = None\n\n # Using a running average to smooth the size\n self.running_average_size = 0\n self.size_smoothing = 0.3\n\n # Use a running average to check if fall has been detected\n self.running_average_fall = 0\n self.fall_smoothing = 0.9\n self.confident_fall_threshold = 0.9\n\n # Number of finetuning movements to perform during rotate and detect phase\n self.max_rotates_finetuning = 3\n\n # Limit for the determining whether there is something interesting in the heat map, as well as its relative size\n self.active_detection_limit = 0.25\n self.active_size_limit = 0.4\n\n self.image_width = 800\n self.image_height = 600\n\n def rotate_to_detect(self):\n \"\"\"\n Rotates the robot until finding a human target and then returns\n \"\"\"\n\n # Get a frame and check for detections\n self.last_img = self.robot.get_camera_data()\n self.last_pose = None\n poses = self.pose_estimator.infer(self._get_infer_image())\n self.wait()\n\n if self.active:\n\n # offset_x is used to determine the direction of movements\n offset_x = None\n heatmap = None\n\n # Counter of stable detections (used to stop the active perception)\n counter = 0\n while True:\n control_left, control_right = 0, 0\n\n # if offset has been detected, move to the direction that will bring us closer to the subject\n if offset_x is not None:\n # Calculate the rotation of the robot to center the target\n offset_command = self.rotation_pid_active(offset_x)\n if offset_command > 0:\n control_left += np.abs(offset_command)\n else:\n control_right += np.abs(offset_command)\n else:\n offset_command = None\n\n self.last_img = self.robot.get_camera_data()\n\n heatmap, poses = self.pose_estimator.infer_active(self.last_img)\n self.wait()\n\n # Get the probability that a pixel is not a joint\n heatmap = (1 - heatmap[:, :, -1])\n max_conf = np.max(heatmap)\n heatmap = cv2.resize(heatmap, (200, 160))\n person_area = np.sum([heatmap > self.active_size_limit]) / (200 * 160)\n # Convert heatmap to image\n heatmap = np.uint8((heatmap) * 255)\n\n # If something interesting has been detected\n if max_conf > self.active_detection_limit:\n # Locate the maximum in order to decide where to move in the next step\n i, j = np.unravel_index(np.argmax(heatmap), np.array(heatmap).shape)\n offset_x = (float(j) / heatmap.shape[1]) - 0.5\n # Also, we can now move closer to the point of interest\n\n distance_command = self.distance_pid_active(person_area)\n control_left += distance_command\n control_right += distance_command\n\n else:\n offset_x = None\n\n def visualization_fn(img):\n return self.visualization_handler(img, self.last_pose,\n {'state': 'rotate_to_detect_active',\n 'heatmap': heatmap,\n 'control_left': control_left,\n 'control_right': control_right,\n 'size': person_area, 'offset': offset_x,\n })\n\n # Perform the actions\n if offset_command is None:\n self.robot.rotate(self.rotation_interval, visualization_fn)\n else:\n self.robot.rotate(offset_command, visualization_fn)\n\n if max_conf > self.active_detection_limit:\n self.safe_translate(distance_command, visualization_fn)\n\n # If we have successfully detected a pose and we have a stable detection we can end this process\n if len(poses) > 0 and np.sum(poses[0].data == -1) < self.min_joints_threshold:\n counter += 1\n else:\n counter = 0\n\n if counter > self.active_successive_limit:\n break\n else:\n # Slowly rotate until detecting a human\n while len(poses) == 0:\n def visualization_fn(img):\n return self.visualization_handler(img, self.last_pose,\n {'state': 'rotate_to_detect_active',\n })\n\n self.robot.rotate(self.rotation_interval, visualization_fn)\n self.last_img = self.robot.get_camera_data()\n poses = self.pose_estimator.infer(cv2.resize(self.last_img, (600, 800)))\n self.wait()\n if len(poses) > 0:\n self.last_pose = poses[0]\n offset_center = calculate_horizontal_offset(poses[0], self.image_width)\n\n def visualization_fn(img):\n return self.visualization_handler(img, self.last_pose,\n {'state': 'rotate_to_detect_active',\n 'offset': offset_center,\n })\n\n if offset_center > 0:\n self.robot.rotate(-self.rotation_interval, visualization_fn)\n else:\n self.robot.rotate(self.rotation_interval, visualization_fn)\n break\n else:\n self.last_pose = None\n\n def _get_infer_image(self):\n return cv2.resize(self.last_img, (self.image_width, self.image_height))\n\n def monitor_target(self):\n \"\"\"\n Centers a target and tries to keep an appropriate distance.\n Periodically checks for falls and enables the fall mitigation routine.\n @return:\n @rtype:\n \"\"\"\n\n # Counter for the number of frames with no detection\n no_detection_frames = 0\n control_left, control_right = 0, 0\n fall = False\n offset_center, size = 0, 0\n\n while no_detection_frames < self.patience:\n\n self.last_img = self.robot.get_camera_data()\n poses = self.pose_estimator.infer(self._get_infer_image())\n self.wait()\n\n if len(poses) > 0:\n # Reset counter and keep the last pose\n\n self.last_pose = poses[0]\n\n # Check the quality of the pose\n if np.sum(self.last_pose.data == -1) >= self.min_joints_threshold:\n no_detection_frames += 1\n self.visualization_handler(self.last_img, None, {'state': 'monitor_target',\n 'control_left': control_left,\n 'control_right': control_right, 'fall': fall,\n 'size': size, 'offset': offset_center,\n 'fall_confidence': self.running_average_fall,\n 'skipped': True})\n self.robot.step()\n else:\n no_detection_frames = 0\n # Appropriate control the robot to keep the target centered and within appropriate distance\n # Keep some statistics for visualization\n control_left, control_right = 0, 0\n\n height = calculate_upper_body_height(self.last_pose, self.image_height)\n size_scaler = 1\n\n # If human is on ground, initially account for the distance discrepancy\n if height > self.fall_threshold:\n self.running_average_fall = self.running_average_fall * self.fall_smoothing + (\n 1 - self.fall_smoothing)\n size_scaler = 2\n else:\n self.running_average_fall = self.running_average_fall * self.fall_smoothing\n\n # Calculate the rotation of the robot to center the target\n offset_center = calculate_horizontal_offset(self.last_pose, self.image_width)\n offset_command = self.rotation_pid(offset_center)\n if offset_command > 0:\n control_left += np.abs(offset_command)\n else:\n control_right += np.abs(offset_command)\n\n # Calculate the distance in order to have the robot in a comfortable distance\n size = calculate_body_area(self.last_pose, self.image_width, self.image_height) * size_scaler\n if self.running_average_size == 0:\n self.running_average_size = size\n self.running_average_size = self.size_smoothing * self.running_average_size + (\n 1 - self.size_smoothing) * size\n\n if self.running_average_size < self.max_distance_size or self.running_average_size > self.min_distance_size:\n distance_command = self.distance_pid(self.running_average_size)\n control_left += distance_command\n control_right += distance_command\n else:\n distance_command = 0\n\n # Check if we had detected a fall with enough confidence\n if self.running_average_fall > self.confident_fall_threshold:\n fall = True\n else:\n fall = False\n\n def visualization_handler(img):\n return self.visualization_handler(img, self.last_pose,\n {'state': 'monitor_target',\n 'control_left': control_left,\n 'control_right': control_right,\n 'fall': fall,\n 'size': size,\n 'offset': offset_center,\n 'fall_confidence': self.running_average_fall,\n 'skipped': False, 'control': True})\n\n self.last_img = self.robot.get_camera_data()\n self.visualization_handler(self.last_img, self.last_pose, {'state': 'monitor_target',\n 'control_left': control_left,\n 'control_right': control_right,\n 'fall': fall,\n 'size': size, 'offset': offset_center,\n 'fall_confidence': self.running_average_fall,\n 'skipped': False,\n 'fall': fall})\n\n # Center the subject\n if np.abs(offset_command) > 0.01:\n self.robot.rotate(offset_command, visualization_handler)\n\n # Control the distance\n if distance_command != 0:\n self.safe_translate(distance_command, visualization_handler)\n self.last_img = self.robot.get_camera_data()\n self.robot.step()\n\n # Handle a fall\n if fall:\n self.handle_fall()\n # Reset threshold to allow fast recovery\n self.running_average_fall = 0\n\n else:\n self.last_pose = None\n no_detection_frames += 1\n self.robot.step()\n self.visualization_handler(self.last_img, self.last_pose, {'state': 'monitor_target',\n 'control_left': control_left,\n 'control_right': control_right, 'fall': fall,\n 'size': size, 'offset': offset_center,\n 'fall_confidence': self.running_average_fall,\n 'skipped': False})\n\n def handle_fall(self):\n \"\"\"\n This function is responsible for handling a detected fall. It collects two frames and then calls\n a user-defined function (fall_handler_fn) in order to further process the data\n \"\"\"\n images = []\n images.append(self.last_img)\n\n # Go towards the fall\n for i in range(self.distance_fall):\n self.safe_translate(2, lambda x: None)\n self.last_img = self.robot.get_camera_data()\n poses = self.pose_estimator.infer(self._get_infer_image())\n self.wait()\n if len(poses) > 0:\n self.last_pose = poses[0]\n else:\n break\n size = calculate_body_area(self.last_pose, self.image_width, self.image_height) * 2\n if size > self.min_distance_size:\n break\n images.append(self.last_img)\n\n self.fall_handler_fn(images)\n\n def safe_translate(self, distance_command, visualization_handler):\n \"\"\"\n Allows for translating the robot after checking for obstacles\n @param distance_command: translation comamnd\n @param visualization_handler: command to use for the translation\n @return: True, if the command was executed, False, otherwise\n \"\"\"\n\n if self.enable_depth_perception:\n from mxnet.gluon.data.vision import transforms\n import mxnet as mx\n import PIL.Image as pil\n\n img = cv2.cvtColor(self.last_img, cv2.COLOR_BGR2RGB)\n img = pil.fromarray(np.uint8(img))\n img = img.resize((640, 192), pil.LANCZOS)\n img = transforms.ToTensor()(mx.nd.array(img)).expand_dims(0).as_in_context(context=self.ctx)\n\n outputs = self.collision_model.predict(img)\n outputs = outputs[(\"disp\", 0)]\n outputs = outputs.squeeze().as_in_context(mx.cpu()).asnumpy()\n\n if self.enable_depth_perception and np.mean(outputs) > self.collision_depth_threshold:\n print(\"Collision\")\n return False\n else:\n self.robot.translate(distance_command, visualization_handler)\n return True\n\n def wait(self):\n time.sleep(self.infer_delay)\n", "# Copyright 2020-2022 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport cv2\nimport argparse\nimport numpy as np\nimport torch\nimport pandas\n\nfrom opendr.perception.facial_expression_recognition import ProgressiveSpatioTemporalBLNLearner\nfrom opendr.perception.facial_expression_recognition.landmark_based_facial_expression_recognition.\\\n algorithm.datasets.landmark_extractor import landmark_extractor\nfrom opendr.perception.facial_expression_recognition.landmark_based_facial_expression_recognition.\\\n algorithm.datasets.gen_facial_muscles_data import gen_muscle_data\n\n\ndef preds2label(labels_csv_path, confidence):\n k = 3\n class_scores, class_inds = torch.topk(confidence, k=k)\n expression_classes = pandas.read_csv(labels_csv_path, verbose=True, index_col=0).to_dict()[\"name\"]\n labels = {expression_classes[int(class_inds[j])]: float(class_scores[j].item())for j in range(k)}\n return labels\n\n\ndef getFrame(vidcap, sec, framespth, count):\n vidcap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000)\n hasFrames, image = vidcap.read()\n if hasFrames:\n cv2.imwrite(os.path.join(framespth, \"frame\" + str(count) + \".jpg\"), image)\n return hasFrames\n\n\ndef tile(a, dim, n_tile):\n a = torch.from_numpy(a)\n init_dim = a.size(dim)\n repeat_idx = [1] * a.dim()\n repeat_idx[dim] = n_tile\n a = a.repeat(*repeat_idx)\n order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))\n tiled = torch.index_select(a, dim, order_index)\n return tiled.numpy()\n\n\ndef data_normalization(data):\n data = torch.from_numpy(data)\n N, V, C, T, M = data.size()\n data = data.permute(0, 2, 3, 1, 4).contiguous().view(N, C, T, V, M)\n # remove the first 17 points\n data = data[:, :, :, 17:, :]\n N, C, T, V, M = data.size()\n # normalization\n for n in range(N):\n for t in range(T):\n for v in range(V):\n data[n, :, t, v, :] = data[n, :, t, v, :] - data[n, :, t, 16, :]\n return data.numpy()\n\n\ndef data_gen(landmark_path, num_frames, num_landmarks, num_dim, num_faces, model_name):\n if os.path.exists(landmark_path):\n root, _, files = os.walk(landmark_path)\n T = len(files)\n sample_numpy = np.zeros((1, num_landmarks, num_dim, num_frames, num_faces))\n if T > num_frames or model_name in ['pstbln_casia', 'pstbln_ck+']: # num_frames = 5\n for j in range(num_frames-1):\n if os.path.isfile(landmark_path + str(T - j - 1) + '.npy'):\n sample_numpy[0, :, :, -1 - j, 0] = np.load(landmark_path + str(T - j - 1) + '.npy')\n for j in range(T):\n if os.path.isfile(landmark_path + str(j) + '.npy'):\n sample_numpy[0, :, :, 0, 0] = np.load(landmark_path + str(j) + '.npy')\n break\n elif T < num_frames or model_name in ['pstbln_afew']: # num_frames = 300\n sample_numpy = np.zeros((1, num_landmarks, num_dim, T, num_faces))\n for j in range(T):\n if os.path.isfile(landmark_path + str(j) + '.npy'):\n sample_numpy[0, :, :, j, 0] = np.load(landmark_path + str(j) + '.npy')\n dif = num_frames - T\n num_tiles = int(dif / T)\n while dif > 0:\n if num_tiles == 0:\n for k in range(dif):\n sample_numpy[0, :, :, T + k, :] = sample_numpy[0, :, :, -1, :]\n elif num_tiles > 0:\n sample_numpy = tile(sample_numpy[:, :, :, :T, 0], 3, num_tiles)\n T = sample_numpy.shape[3]\n dif = num_frames - T\n num_tiles = int(dif / T)\n else:\n for j in range(num_frames):\n if os.path.isfile(landmark_path + str(j) + '.npy'):\n sample_numpy[:, :, :, j, 0] = np.load(landmark_path + str(j) + '.npy')\n return sample_numpy\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='video frame extractor')\n parser.add_argument(\"-i\", \"--video_folder\", required=True, default='./input.mp4',\n description=\"path to input video\")\n parser.add_argument(\"-i\", \"--labels_csv_path\", required=True, default='./labels.csv',\n description=\"path to reference labels file\")\n parser.add_argument(\"-p\", \"--shape_predictor\", required=True,\n default='./shape_predictor_68_face_landmarks.dat',\n description=\"path to facial landmark predictor\")\n parser.add_argument('--checkpoint_path', type=str, default='./pstbln',\n help='path to trained classifier model')\n parser.add_argument('--model_name', type=str, default='pstbln_casia',\n help='name of the pretrained model')\n parser.add_argument('--num_frames', type=int, default=5, help='the number of frames in each sequence')\n parser.add_argument(\"-i\", \"--output_data_path\", required=True, default='./data',\n description=\"path to save the generated muscles data\")\n\n args = vars(parser.parse_args())\n\n # 1: extract video frames:\n video_path = args.video_folder\n frames_path = os.path.join(video_path, 'frames_folder')\n if not os.path.exists(frames_path):\n os.makedirs(frames_path)\n vidcap = cv2.VideoCapture(video_path)\n sec = 0\n frameRate = 0.5 # it captures frames every 0.5 second\n count = 0\n success = getFrame(vidcap, sec, frames_path, count)\n while success:\n count = count + 1\n sec = sec + frameRate\n sec = round(sec, 2)\n success = getFrame(vidcap, sec, frames_path, count)\n\n # 2: extract landmarks from each frame:\n landmarks_path = os.path.join(frames_path, 'landmarks_folder')\n if not os.path.exists(landmarks_path):\n os.makedirs(landmarks_path)\n for root, _, files in os.walk(frames_path):\n for file in files:\n if '.jpg' in file:\n imgpth = os.path.join(root, file)\n outpth = landmarks_path\n frameidx = file.split(\".\")\n landmark_extractor(imgpth, landmarks_path + frameidx[0] + '.npy', args.shape_predictor)\n\n # 3: sequence numpy data generation from extracted landmarks and normalization:\n num_landmarks = 68\n num_dim = 2 # feature dimension for each facial landmark\n num_faces = 1 # number of faces in each frame\n num_frames = args.num_frames\n model_name = args.model_name\n muscle_path = args.output_data_path\n numpy_data = data_gen(landmarks_path, num_frames, num_landmarks, num_dim, num_faces, model_name)\n norm_data = data_normalization(numpy_data)\n muscle_data = gen_muscle_data(norm_data, muscle_path)\n\n if args.model_name == 'pstbln_ck+':\n num_point = 303\n num_class = 7\n elif args.model_name == 'pstbln_casia':\n num_point = 309\n num_class = 6\n elif args.model_name == 'pstbln_afew':\n num_point = 312\n num_class = 7\n\n # inference\n expression_classifier = ProgressiveSpatioTemporalBLNLearner(device=\"cpu\", dataset_name='AFEW', num_class=num_class,\n num_point=num_point, num_person=1, in_channels=2,\n blocksize=5, topology=[15, 10, 15, 5, 5, 10])\n model_saved_path = args.checkpoint_path\n expression_classifier.load(model_saved_path, model_name)\n prediction = expression_classifier.infer(muscle_data)\n category_labels = preds2label(args.labels_csv_path, prediction.confidence)\n print(category_labels)\n" ]
[ [ "torch.equal", "torch.cuda.is_available" ], [ "numpy.abs", "numpy.uint8", "numpy.max", "numpy.argmax", "numpy.mean", "numpy.array", "numpy.sum" ], [ "pandas.read_csv", "numpy.arange", "torch.from_numpy", "torch.topk", "torch.index_select", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ryosukehata/pytorch-lightning
[ "a5bd2edefbafa6e03acffd4ba1a8816bbc1682a3" ]
[ "pl_examples/basic_examples/lightning_module_template.py" ]
[ "\"\"\"\nExample template for defining a system\n\"\"\"\nimport os\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torchvision.datasets import MNIST\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.root_module.root_module import LightningModule\n\n\nclass LightningTemplateModel(LightningModule):\n \"\"\"\n Sample model to show how to define a template\n \"\"\"\n\n def __init__(self, hparams):\n \"\"\"\n Pass in parsed HyperOptArgumentParser to the model\n :param hparams:\n \"\"\"\n # init superclass\n super(LightningTemplateModel, self).__init__()\n self.hparams = hparams\n\n self.batch_size = hparams.batch_size\n\n # if you specify an example input, the summary will show input/output for each layer\n self.example_input_array = torch.rand(5, 28 * 28)\n\n # build model\n self.__build_model()\n\n # ---------------------\n # MODEL SETUP\n # ---------------------\n def __build_model(self):\n \"\"\"\n Layout model\n :return:\n \"\"\"\n self.c_d1 = nn.Linear(in_features=self.hparams.in_features,\n out_features=self.hparams.hidden_dim)\n self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)\n self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)\n\n self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim,\n out_features=self.hparams.out_features)\n\n # ---------------------\n # TRAINING\n # ---------------------\n def forward(self, x):\n \"\"\"\n No special modification required for lightning, define as you normally would\n :param x:\n :return:\n \"\"\"\n\n x = self.c_d1(x)\n x = torch.tanh(x)\n x = self.c_d1_bn(x)\n x = self.c_d1_drop(x)\n\n x = self.c_d2(x)\n logits = F.log_softmax(x, dim=1)\n\n return logits\n\n def loss(self, labels, logits):\n nll = F.nll_loss(logits, labels)\n return nll\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the training loop\n :param batch:\n :return:\n \"\"\"\n # forward pass\n x, y = batch\n x = x.view(x.size(0), -1)\n\n y_hat = self.forward(x)\n\n # calculate loss\n loss_val = self.loss(y, y_hat)\n\n # in DP mode (default) make sure if result is scalar, there's another dim in the beginning\n if self.trainer.use_dp or self.trainer.use_ddp2:\n loss_val = loss_val.unsqueeze(0)\n\n tqdm_dict = {'train_loss': loss_val}\n output = OrderedDict({\n 'loss': loss_val,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n\n # can also return just a scalar instead of a dict (return loss_val)\n return output\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the validation loop\n :param batch:\n :return:\n \"\"\"\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self.forward(x)\n\n loss_val = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n val_acc = torch.tensor(val_acc)\n\n if self.on_gpu:\n val_acc = val_acc.cuda(loss_val.device.index)\n\n # in DP mode (default) make sure if result is scalar, there's another dim in the beginning\n if self.trainer.use_dp or self.trainer.use_ddp2:\n loss_val = loss_val.unsqueeze(0)\n val_acc = val_acc.unsqueeze(0)\n\n output = OrderedDict({\n 'val_loss': loss_val,\n 'val_acc': val_acc,\n })\n\n # can also return just a scalar instead of a dict (return loss_val)\n return output\n\n def validation_end(self, outputs):\n \"\"\"\n Called at the end of validation to aggregate outputs\n :param outputs: list of individual outputs of each validation step\n :return:\n \"\"\"\n # if returned a scalar from validation_step, outputs is a list of tensor scalars\n # we return just the average in this case (if we want)\n # return torch.stack(outputs).mean()\n\n val_loss_mean = 0\n val_acc_mean = 0\n for output in outputs:\n val_loss = output['val_loss']\n\n # reduce manually when using dp\n if self.trainer.use_dp:\n val_loss = torch.mean(val_loss)\n val_loss_mean += val_loss\n\n # reduce manually when using dp\n val_acc = output['val_acc']\n if self.trainer.use_dp or self.trainer.use_ddp2:\n val_acc = torch.mean(val_acc)\n\n val_acc_mean += val_acc\n\n val_loss_mean /= len(outputs)\n val_acc_mean /= len(outputs)\n tqdm_dict = {'val_loss': val_loss_mean, 'val_acc': val_acc_mean}\n result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': val_loss_mean}\n return result\n\n # ---------------------\n # TRAINING SETUP\n # ---------------------\n def configure_optimizers(self):\n \"\"\"\n return whatever optimizers we want here\n :return: list of optimizers\n \"\"\"\n optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)\n return [optimizer], [scheduler]\n\n def __dataloader(self, train):\n # init data generators\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,))])\n dataset = MNIST(root=self.hparams.data_root, train=train,\n transform=transform, download=True)\n\n # when using multi-node (ddp) we need to add the datasampler\n train_sampler = None\n batch_size = self.hparams.batch_size\n\n if self.use_ddp:\n train_sampler = DistributedSampler(dataset)\n\n should_shuffle = train_sampler is None\n loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=should_shuffle,\n sampler=train_sampler,\n num_workers=0\n )\n\n return loader\n\n @pl.data_loader\n def train_dataloader(self):\n print('training data loader called')\n return self.__dataloader(train=True)\n\n @pl.data_loader\n def val_dataloader(self):\n print('val data loader called')\n return self.__dataloader(train=False)\n\n @pl.data_loader\n def test_dataloader(self):\n print('test data loader called')\n return self.__dataloader(train=False)\n\n @staticmethod\n def add_model_specific_args(parent_parser, root_dir): # pragma: no cover\n \"\"\"\n Parameters you define here will be available to your model through self.hparams\n :param parent_parser:\n :param root_dir:\n :return:\n \"\"\"\n parser = ArgumentParser(parents=[parent_parser])\n\n # param overwrites\n # parser.set_defaults(gradient_clip_val=5.0)\n\n # network params\n parser.add_argument('--in_features', default=28 * 28, type=int)\n parser.add_argument('--out_features', default=10, type=int)\n # use 500 for CPU, 50000 for GPU to see speed difference\n parser.add_argument('--hidden_dim', default=50000, type=int)\n parser.add_argument('--drop_prob', default=0.2, type=float)\n parser.add_argument('--learning_rate', default=0.001, type=float)\n\n # data\n parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)\n\n # training params (opt)\n parser.add_argument('--optimizer_name', default='adam', type=str)\n parser.add_argument('--batch_size', default=64, type=int)\n return parser\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.mean", "torch.nn.functional.log_softmax", "torch.nn.functional.nll_loss", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.utils.data.distributed.DistributedSampler", "torch.utils.data.DataLoader", "torch.sum", "torch.tensor", "torch.tanh", "torch.nn.Linear", "torch.rand", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajshajib/fabspec
[ "0fec1595a4525215bbabd1f2480d1d31a86d955e" ]
[ "fabspec/spectra.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nThis module defines the class Spectra() that contains a spectra and\nrelevant information.\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom copy import deepcopy\n\n\nclass Spectra(object):\n \"\"\"\n Contains a spectra and relevant information.\n \"\"\"\n\n def __init__(self, wavelengths, spectra, *args, **kwargs):\n \"\"\"\n :param spectra: Array of flux.\n :param wavelengths: Array of wavelengths, must be same length as\n spectra.\n :param args:\n :param kwargs: `resolution`: R, `resolution_fwhm`: FWHM of\n spectral resolution.\n \"\"\"\n try:\n assert len(spectra) == len(wavelengths)\n except:\n raise ('Error: spectra and wavelength must have same size!')\n\n self._spectra = np.array(spectra) # to store the original spectra\n self.spectra = deepcopy(self._spectra)\n self._wavelengths = np.array(wavelengths) # to store the original\n self.wavelengths = deepcopy(self._wavelengths)\n\n if 'resolution_fwhm' in kwargs:\n self._resolution_fwhm = kwargs['resolution_fwhm']\n\n # resolution parameter R\n if 'resolution' in kwargs:\n self._resolution = kwargs['resolution']\n\n if 'flux_unit' in kwargs:\n self._flux_unit = kwargs['flux_unit']\n else:\n self._flux_unit = 'arbitrary'\n\n if 'wavelength_unit' in kwargs:\n self._wavelength_unit = kwargs['wavelength_unit']\n\n @property\n def resolution_fwhm(self):\n if hasattr(self, '_resolution_fwhm'):\n return self._resolution_fwhm\n else:\n return None\n\n @resolution_fwhm.setter\n def resolution_fwhm(self, fwhm):\n \"\"\"\n Update the FWHM of the spectra.\n :param fwhm: FWHM to set for the spectra, in the same unit as\n `self.wavelengths`.\n \"\"\"\n self._resolution_fwhm = fwhm\n\n @property\n def resolution(self):\n if hasattr(self, '_resolution'):\n return self._resolution\n else:\n return None\n\n @property\n def flux_unit(self):\n if hasattr(self, '_flux_unit'):\n return self._flux_unit\n else:\n return None\n\n @property\n def wavelength_unit(self):\n if hasattr(self, '_wavelength_unit'):\n return self._wavelength_unit\n else:\n return None\n\n def get_delta_lambda(self):\n \"\"\"\n Compute the spatial pixel size of the spectra.\n :return:\n \"\"\"\n return np.mean(np.diff(self.wavelengths))\n\n def linearize_wavelength_scale(self, dlambda):\n \"\"\"\n Linearize the wavelength scale if its currently in log scale.\n :param dlambda: Wavelength resolution for linear intervals.\n :return:\n \"\"\"\n sample = interp1d(self.wavelengths, self.spectra, kind='linear',\n bounds_error=False, fill_value=0.)\n # NOT shortening the wavelength range by 1 index so that\n # `scipy.interpolate.interp1d` does not throw error. Fill value with 0\n # outside interpolation range.\n self.wavelengths = np.arange(self.wavelengths[0],\n self.wavelengths[-1]+dlambda/2., dlambda)\n\n self.spectra = sample(self.wavelengths)\n\n def normalize_flux(self):\n \"\"\"\n Normalize the flux so that the median is 1.\n :return:\n \"\"\"\n self.spectra /= np.median(self.spectra)\n self._flux_unit = 'normalized'\n\n def reset_to_initial(self):\n \"\"\"\n Reset the spectra to initial flux and wavelengths at the time\n of creating the `Spectra` object.\n :return:\n \"\"\"\n self.wavelengths = deepcopy(self._wavelengths)\n self.spectra = deepcopy(self._spectra)\n\n def get_wavelength_range(self):\n \"\"\"\n Get the wavelength range of the spectra.\n :return:\n \"\"\"\n return self.wavelengths[[0, -1]] #\\\n # + np.array([-0.5, 0.5])*self.get_delta_lambda()\n\n def clip(self, start_wavelength=None, end_wavelength=None):\n \"\"\"\n Clip the spectra within the specified wavelengths.\n :param start_wavelength: Start wavelength for clipping. If\n `None`, set to minimum of current wavelength range.\n :param end_wavelength: End wavelength for clipping. If `None`,\n set to maximum of current wavelength range.\n :return:\n \"\"\"\n if start_wavelength is None:\n start_wavelength = self.wavelengths[0]\n\n if end_wavelength is None:\n end_wavelength = self.wavelengths[-1]\n\n self.spectra = self.spectra[(self.wavelengths >= start_wavelength) &\n (self.wavelengths <= end_wavelength)]\n self.wavelengths = self.wavelengths[(self.wavelengths >=\n start_wavelength) &\n (self.wavelengths <=\n end_wavelength)]" ]
[ [ "numpy.arange", "numpy.median", "scipy.interpolate.interp1d", "numpy.diff", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
edward-io/pytorch
[ "04caef8e1d4f951cc380d6cebb9967b71695de13" ]
[ "torch/profiler/profiler.py" ]
[ "import gzip\nimport json\nimport os\nimport tempfile\nfrom enum import Enum\nfrom typing import Any, Callable, Iterable, Optional\nfrom warnings import warn\n\nimport torch\nimport torch.autograd.profiler as prof\nfrom torch.autograd import kineto_available, ProfilerActivity\n\n\nclass ProfilerAction(Enum):\n \"\"\"\n Profiler actions that can be taken at the specified intervals\n \"\"\"\n NONE = 0\n WARMUP = 1\n RECORD = 2\n RECORD_AND_SAVE = 3\n\n\ndef schedule(*, wait: int, warmup: int, active: int, repeat: int = 0, skip_first: int = 0) -> Callable:\n \"\"\"\n Returns a callable that can be used as profiler ``schedule`` argument. The profiler will skip\n the first ``skip_first`` steps, then wait for ``wait`` steps, then do the warmup for the next ``warmup`` steps,\n then do the active recording for the next ``active`` steps and then repeat the cycle starting with ``wait`` steps.\n The optional number of cycles is specified with the ``repeat`` parameter, the zero value means that\n the cycles will continue until the profiling is finished.\n \"\"\"\n def schedule_fn(step: int) -> ProfilerAction:\n assert step >= 0\n if step < skip_first:\n return ProfilerAction.NONE\n else:\n step -= skip_first\n num_steps = wait + warmup + active\n if repeat > 0 and step / num_steps >= repeat:\n return ProfilerAction.NONE\n mod_step = step % num_steps\n if mod_step < wait:\n return ProfilerAction.NONE\n elif mod_step < wait + warmup:\n return ProfilerAction.WARMUP\n else:\n return ProfilerAction.RECORD if mod_step < num_steps - 1 \\\n else ProfilerAction.RECORD_AND_SAVE\n assert wait >= 0 and warmup >= 0 and active > 0 and \\\n repeat >= 0 and skip_first >= 0, \"Invalid profiler schedule arguments\"\n if warmup == 0:\n warn(\"Profiler won't be using warmup, this can skew profiler results\")\n return schedule_fn\n\n\ndef _default_schedule_fn(_: int) -> ProfilerAction:\n \"\"\"\n Default profiler behavior - immediately starts recording the events,\n keeps doing it on every profiler step.\n \"\"\"\n return ProfilerAction.RECORD\n\ndef tensorboard_trace_handler(dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False):\n \"\"\"\n Outputs tracing files to directory of ``dir_name``, then that directory can be\n directly delivered to tensorboard as logdir.\n ``worker_name`` should be unique for each worker in distributed scenario,\n it will be set to '[hostname]_[pid]' by default.\n \"\"\"\n import os\n import socket\n import time\n\n def handler_fn(prof) -> None:\n nonlocal worker_name\n if not os.path.isdir(dir_name):\n try:\n os.makedirs(dir_name, exist_ok=True)\n except Exception:\n raise RuntimeError(\"Can't create directory: \" + dir_name)\n if not worker_name:\n worker_name = \"{}_{}\".format(socket.gethostname(), str(os.getpid()))\n file_name = \"{}.{}.pt.trace.json\".format(worker_name, int(time.time() * 1000))\n if use_gzip:\n file_name = file_name + '.gz'\n prof.export_chrome_trace(os.path.join(dir_name, file_name))\n return handler_fn\n\ndef supported_activities():\n \"\"\"\n Returns a set of supported profiler tracing activities.\n\n Note: profiler uses CUPTI library to trace on-device CUDA kernels.\n In case when CUDA is enabled but CUPTI is not available, passing\n ``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA\n profiling code (same as in the legacy ``torch.autograd.profiler``).\n This, in turn, results in including CUDA time in the profiler table output,\n but not in the JSON trace.\n \"\"\"\n return torch.autograd._supported_activities()\n\n\nclass profile(object):\n \"\"\"Profiler context manager.\n\n Args:\n activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:\n ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.\n Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.\n schedule (callable): callable that takes step (int) as a single parameter and returns\n ``ProfilerAction`` value that specifies the profiler action to perform at each step.\n on_trace_ready (callable): callable that is called at each step when ``schedule``\n returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.\n record_shapes (bool): save information about operator's input shapes.\n profile_memory (bool): track tensor memory allocation/deallocation.\n with_stack (bool): record source information (file and line number) for the ops.\n with_flops (bool): use formula to estimate the FLOPS of specific operators\n (matrix multiplication and 2D convolution).\n use_cuda (bool):\n .. deprecated:: 1.8.1\n use ``activities`` instead.\n\n .. note::\n Use :func:`~torch.profiler.schedule` to generate the callable schedule.\n Non-default schedules are useful when profiling long training jobs\n and allow the user to obtain multiple traces at the different iterations\n of the training process.\n The default schedule simply records all the events continuously for the\n duration of the context manager.\n\n .. note::\n Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard:\n\n ``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)``\n\n After profiling, result files can be found in the specified directory. Use the command:\n\n ``tensorboard --logdir dir_name``\n\n to see the results in TensorBoard.\n For more information, see\n `PyTorch Profiler TensorBoard Plugin <https://github.com/pytorch/kineto/tree/master/tb_plugin>`__\n\n .. note::\n Enabling shape and stack tracing results in additional overhead.\n When record_shapes=True is specified, profiler will temporarily hold references to the tensors;\n that may further prevent certain optimizations that depend on the reference count and introduce\n extra tensor copies.\n\n Examples:\n\n .. code-block:: python\n\n with torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ]\n ) as p:\n code_to_profile()\n print(p.key_averages().table(\n sort_by=\"self_cuda_time_total\", row_limit=-1))\n\n Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions:\n\n .. code-block:: python\n\n # Non-default profiler schedule allows user to turn profiler on and off\n # on different iterations of the training loop;\n # trace_handler is called every time a new trace becomes available\n def trace_handler(prof):\n print(prof.key_averages().table(\n sort_by=\"self_cuda_time_total\", row_limit=-1))\n # prof.export_chrome_trace(\"/tmp/test_trace_\" + str(prof.step_num) + \".json\")\n\n with torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ],\n\n # In this example with wait=1, warmup=1, active=2,\n # profiler will skip the first step/iteration,\n # start warming up on the second, record\n # the third and the forth iterations,\n # after which the trace will become available\n # and on_trace_ready (when set) is called;\n # the cycle repeats starting with the next step\n\n schedule=torch.profiler.schedule(\n wait=1,\n warmup=1,\n active=2),\n on_trace_ready=trace_handler\n # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')\n # used when outputting for tensorboard\n ) as p:\n for iter in range(N):\n code_iteration_to_profile(iter)\n # send a signal to the profiler that the next iteration has started\n p.step()\n \"\"\"\n def __init__(\n self,\n *,\n activities: Optional[Iterable[ProfilerActivity]] = None,\n schedule: Optional[Callable[[int], ProfilerAction]] = None,\n on_trace_ready: Optional[Callable[..., Any]] = None,\n record_shapes: bool = False,\n profile_memory: bool = False,\n with_stack: bool = False,\n with_flops: bool = False,\n # deprecated:\n use_cuda: Optional[bool] = None):\n if activities:\n self.activities = set(activities)\n else:\n self.activities = supported_activities()\n\n if use_cuda is not None:\n warn(\"use_cuda is deprecated, use activities argument instead\")\n if use_cuda:\n self.activities.add(ProfilerActivity.CUDA)\n elif ProfilerActivity.CUDA in self.activities:\n self.activities.remove(ProfilerActivity.CUDA)\n\n assert len(self.activities) > 0, \"No valid profiler activities found\"\n\n if schedule:\n self.schedule = schedule\n # add step markers into the trace and table view\n self.record_steps = True\n else:\n self.schedule = _default_schedule_fn\n self.record_steps = False\n self.on_trace_ready = on_trace_ready\n self.record_shapes = record_shapes\n self.with_flops = with_flops\n self.profile_memory = profile_memory\n self.with_stack = with_stack\n self.step_num = 0\n self.current_action = self.schedule(self.step_num)\n self.profiler: Optional[prof.profile] = None\n self.step_rec_fn: Optional[prof.record_function] = None\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n\n def start(self):\n self._enter_actions()\n if self.record_steps:\n self.step_rec_fn = prof.record_function(\"ProfilerStep#\" + str(self.step_num))\n self.step_rec_fn.__enter__()\n\n def stop(self):\n if self.record_steps and self.step_rec_fn:\n self.step_rec_fn.__exit__(None, None, None)\n self._exit_actions()\n\n def step(self):\n \"\"\"\n Signals the profiler that the next profiling step has started.\n \"\"\"\n if self.record_steps and self.step_rec_fn:\n self.step_rec_fn.__exit__(None, None, None)\n prev_action = self.current_action\n self.step_num += 1\n self.current_action = self.schedule(self.step_num)\n\n if self.current_action == ProfilerAction.NONE:\n if prev_action == ProfilerAction.NONE:\n pass\n elif prev_action == ProfilerAction.WARMUP:\n warn(\"Incorrect schedule: WARMUP followed by NONE\")\n self._start_trace()\n self._stop_trace()\n elif prev_action == ProfilerAction.RECORD:\n warn(\"Incorrect schedule: RECORD followed by NONE\")\n self._stop_trace()\n else:\n assert prev_action == ProfilerAction.RECORD_AND_SAVE\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n elif self.current_action == ProfilerAction.WARMUP:\n if prev_action == ProfilerAction.NONE:\n self._start_warmup()\n elif prev_action == ProfilerAction.WARMUP:\n pass\n elif prev_action == ProfilerAction.RECORD:\n warn(\"Incorrect schedule: RECORD followed by WARMUP\")\n self._stop_trace()\n else:\n assert prev_action == ProfilerAction.RECORD_AND_SAVE\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n self._start_warmup()\n elif self.current_action in \\\n [ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE]:\n if prev_action == ProfilerAction.NONE:\n self._start_warmup()\n self._start_trace()\n elif prev_action == ProfilerAction.WARMUP:\n self._start_trace()\n elif prev_action == ProfilerAction.RECORD:\n pass\n else:\n assert prev_action == ProfilerAction.RECORD_AND_SAVE\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n self._start_warmup()\n self._start_trace()\n\n if self.record_steps:\n self.step_rec_fn = prof.record_function(\"ProfilerStep#\" + str(self.step_num))\n self.step_rec_fn.__enter__()\n\n def export_chrome_trace(self, path: str):\n \"\"\"\n Exports the collected trace in Chrome JSON format.\n \"\"\"\n assert self.profiler\n if path.endswith('.gz'):\n fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)\n fp.close()\n retvalue = self.profiler.export_chrome_trace(fp.name)\n with open(fp.name) as fin:\n with gzip.open(path, 'wt') as fout:\n fout.writelines(fin)\n os.remove(fp.name)\n return retvalue\n else:\n return self.profiler.export_chrome_trace(path)\n\n def export_stacks(self, path: str, metric: str = \"self_cpu_time_total\"):\n \"\"\"Save stack traces in a file in a format suitable for visualization.\n\n Args:\n path (str): save stacks file to this location;\n metric (str): metric to use: \"self_cpu_time_total\" or \"self_cuda_time_total\"\n\n .. note::\n Example of using FlameGraph tool:\n\n - git clone https://github.com/brendangregg/FlameGraph\n - cd FlameGraph\n - ./flamegraph.pl --title \"CPU time\" --countname \"us.\" profiler.stacks > perf_viz.svg\n \"\"\"\n assert self.profiler\n return self.profiler.export_stacks(path, metric)\n\n def key_averages(self, group_by_input_shape: bool = False, group_by_stack_n: int = 0):\n \"\"\"Averages events, grouping them by operator name and (optionally) input shapes and\n stack.\n\n .. note::\n To use shape/stack functionality make sure to set record_shapes/with_stack\n when creating profiler context manager.\n \"\"\"\n assert self.profiler\n return self.profiler.key_averages(group_by_input_shape, group_by_stack_n)\n\n def events(self):\n \"\"\"\n Returns the list of unaggregated profiler events,\n to be used in the trace callback or after the profiling is finished\n \"\"\"\n assert self.profiler\n return self.profiler.function_events\n\n def add_metadata(self, key: str, value: str):\n \"\"\"\n Adds a user defined metadata with a string key and a string value\n into the trace file\n \"\"\"\n wrapped_value = \"\\\"\" + value.replace('\"', '\\\\\"') + \"\\\"\"\n torch.autograd._add_metadata_json(key, wrapped_value)\n\n def add_metadata_json(self, key: str, value: str):\n \"\"\"\n Adds a user defined metadata with a string key and a valid json value\n into the trace file\n \"\"\"\n torch.autograd._add_metadata_json(key, value)\n\n def _get_distributed_info(self):\n import torch.distributed as dist\n if not dist.is_available() or not dist.is_initialized():\n return None\n\n return {\n \"backend\": dist.get_backend(),\n \"rank\": dist.get_rank(),\n \"world_size\": dist.get_world_size()\n }\n\n def _enter_actions(self):\n if self.current_action == ProfilerAction.WARMUP:\n self._start_warmup()\n elif self.current_action in \\\n [ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE]:\n self._start_warmup()\n self._start_trace()\n\n def _exit_actions(self):\n if self.current_action == ProfilerAction.WARMUP:\n self._start_trace()\n self._stop_trace()\n elif self.current_action in \\\n [ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE]:\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n\n def _start_warmup(self):\n self.profiler = prof.profile(\n use_cuda=(ProfilerActivity.CUDA in self.activities),\n use_cpu=(ProfilerActivity.CPU in self.activities),\n record_shapes=self.record_shapes,\n with_flops=self.with_flops,\n profile_memory=self.profile_memory,\n with_stack=self.with_stack,\n use_kineto=True,\n )\n self.profiler._prepare_trace()\n\n def _start_trace(self):\n assert self.profiler is not None\n self.profiler._start_trace()\n\n if kineto_available():\n dist_info = self._get_distributed_info()\n if dist_info:\n self.add_metadata_json(\"distributedInfo\", json.dumps(dist_info))\n\n def _stop_trace(self):\n assert self.profiler is not None\n self.profiler.__exit__(None, None, None)\n" ]
[ [ "torch.autograd._supported_activities", "torch.distributed.get_backend", "torch.distributed.is_initialized", "torch.distributed.is_available", "torch.autograd.kineto_available", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.autograd._add_metadata_json", "torch.autograd.profiler.profile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JhonLiuljs/tensorflow_demo
[ "0757f81a2c8baae41fce586e5d86f7312f46fda6" ]
[ "1.Cnn_Captcha/gen_captcha.py" ]
[ "# coding:utf-8\nfrom captcha.image import ImageCaptcha # pip install captcha\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport random\nimport time\nimport sys\n\nfrom constants import number\nfrom constants import alphabet\nfrom constants import ALPHABET\n\n\n# 验证码一般都无视大小写;验证码长度4个字符\ndef random_captcha_text(char_set=number + alphabet + ALPHABET, captcha_size=4):\n \"\"\" 指定使用的验证码内容列表和长期 返回随机的验证码文本 \"\"\"\n captcha_text = []\n for i in range(captcha_size):\n c = random.choice(char_set)\n captcha_text.append(c)\n return captcha_text\n\n\n# 使用ImageCaptcha库生成验证码\ndef gen_captcha_text_and_image():\n \"\"\"生成字符对应的验证码 \"\"\"\n image = ImageCaptcha() # 导入验证码包 生成一张空白图\n\n captcha_text = random_captcha_text() # 随机一个验证码内容\n captcha_text = ''.join(captcha_text) # 类型转换为字符串\n\n captcha = image.generate(captcha_text)\n # image.write(captcha_text, 'image/' + captcha_text + '.jpg') # 写到文件\n\n # rm = 'rm '+captcha_text + '.jpg'\n # os.system(rm)\n\n captcha_image = Image.open(captcha) # 转换为图片格式\n captcha_image = np.array(captcha_image) # 转换为 np数组类型\n\n return captcha_text, captcha_image\n\n\n# 把彩色图像转为灰度图像(色彩对识别验证码没有什么用)\ndef convert2gray(img):\n if len(img.shape) > 2:\n gray = np.mean(img, -1)\n # 上面的转法较快,正规转法如下\n # r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]\n # gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n else:\n return img\n\n\nif __name__ == '__main__':\n # 测试\n for i in range(10000):\n text, image = gen_captcha_text_and_image()\n print('begin ', time.ctime(), type(image))\n f = plt.figure()\n ax = f.add_subplot(111)\n ax.text(0.1, 0.9, text, ha='center', va='center', transform=ax.transAxes)\n plt.imshow(image)\n # plt.show() # 显示,,取消注释并在30行取消写到文件的注释即可保存为文件\n print('end ', time.ctime())\n print(\"over!\")\n sys.exit()\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.array", "numpy.mean", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yoheikikuta/a-primer-on-adversarial-examples
[ "1f4bea303b01b140b3a022cc7448ad3daaae3447" ]
[ "data.py" ]
[ "import random\nfrom abc import ABC, abstractmethod\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as F\n\n\nclass Data(ABC):\n \"\"\"Data represents an abstract class providing interfaces.\n\n Attributes\n ----------\n base_dit str : base directory of data.\n self.batch_size int : batch size.\n self.num_workers int : number of workers used in multi-process data loding.\n \"\"\"\n base_dir = \"./data\"\n\n def __init__(self, batch_size, num_workers):\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n @abstractmethod\n def transform(self) -> torchvision.transforms.transforms.Compose:\n pass\n\n @abstractmethod\n def get_dataset(self) -> torchvision.datasets.vision.VisionDataset:\n pass\n\n def prepare_data(self):\n \"\"\"Get and return dataset with transformations.\n\n Returns\n -------\n trainloader torch.utils.data.DataLoader : train DataLoader.\n testloader torch.utils.data.DataLoader : test DataLoader.\n num_classes int : number of classes of dataset.\n \"\"\"\n trainset, testset = self.get_dataset()\n num_classes = len(trainset.classes)\n\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers)\n testloader = torch.utils.data.DataLoader(testset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers)\n\n return trainloader, testloader, num_classes\n\n\nclass DataCIFAR10(Data):\n \"\"\"DataCIFAR10 represents cifar10 dataset.\n\n Attributes\n ----------\n name str : \"cifar10\".\n \"\"\"\n name = \"cifar10\"\n\n def __init__(self, batch_size=4, num_workers=2):\n \"\"\"\n Parameters\n ----------\n batch_size int : batch_size.\n num_workers int : number of workers used in multi-process data loding.\n \"\"\"\n super(DataCIFAR10, self).__init__(batch_size, num_workers)\n\n def transform(self):\n \"\"\"Only uses transforms.ToTensor().\"\"\"\n return transforms.Compose([transforms.ToTensor()])\n\n def get_dataset(self):\n \"\"\"Download and load cifar10 dataset.\n\n Returns\n -------\n trainset torchvision.datasets.CIFAR10 : train dataset.\n testset torchvision.datasets.CIFAR10 : test dataset.\n \"\"\"\n trainset = torchvision.datasets.CIFAR10(root=f\"{self.base_dir}/{self.name}\",\n train=True, download=True,\n transform=self.transform())\n testset = torchvision.datasets.CIFAR10(root=f\"{self.base_dir}/{self.name}\",\n train=False, download=True,\n transform=self.transform())\n\n return trainset, testset\n\n\nclass DataGTSRB(Data):\n \"\"\"DataGTSRB represents pre-processed GTSRB dataset.\n\n Attributes\n ----------\n name str : \"GTSRB_processed\".\n \"\"\"\n name = \"GTSRB_processed\"\n\n def __init__(self, batch_size=4, num_workers=2):\n super(DataGTSRB, self).__init__(batch_size, num_workers)\n\n def transform(self):\n \"\"\"Only uses transforms.ToTensor().\"\"\"\n return transforms.Compose([transforms.ToTensor()])\n\n def get_dataset(self):\n \"\"\"Load GTSRB dataset from directory that is prepared in advance.\n\n Returns\n -------\n trainset torchvision.datasets.ImageFolder : train dataset.\n testset torchvision.datasets.ImageFolder : test dataset.\n \"\"\"\n trainset = torchvision.datasets.ImageFolder(\n root=f\"{self.base_dir}/{self.name}/train\",\n transform=self.transform())\n\n testset = torchvision.datasets.ImageFolder(\n root=f\"{self.base_dir}/{self.name}/test\",\n transform=self.transform())\n\n return trainset, testset\n\n\nclass RandomResizePadding(object):\n \"\"\"DataGTSRB represents pre-processed GTSRB dataset.\n\n Attributes\n ----------\n self.size int : image will be rescaled to [c, size, size].\n \"\"\"\n def __init__(self, size):\n assert isinstance(size, int)\n self.size = size\n\n def __call__(self, img):\n \"\"\"Randomly resize and 0-pad the given PIL.\n\n Parameters\n ----------\n img PIL.Image : input image.\n\n Returns\n -------\n img PIL.Image : trasnsormed image.\n \"\"\"\n # Randomly resize the image.\n resize = random.randint(img.width, self.size)\n resized_img = F.resize(img, resize)\n # 0-pad the resized image. 0-pad to all left, right, top and bottom.\n pad_size = self.size - resize\n padded_img = F.pad(resized_img, pad_size, fill=0)\n # Crop the padded image to get (size, size) image.\n pos_top = random.randint(0, pad_size)\n pos_left = random.randint(0, pad_size)\n transformed_img = F.crop(padded_img, pos_top, pos_left, self.size, self.size)\n return transformed_img\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fenning-research-group/sentaurus_ddd
[ "e9e7a9b86c8b87cafff0b69c4a0f83c2fe292a45" ]
[ "DatAnalysis.py" ]
[ "# DatAnalysis.py\n# Module containing data analysis functions used for the DDD model\n\nimport os\nimport re # Regular Expression package\nimport pdb # debugger\nimport numpy as np\nimport matplotlib.pyplot as plt # plotting package\n#import readh5conc\nimport csv\nimport h5py\nfrom scipy import signal\nfrom scipy import interpolate\nimport matplotlib.animation as manimation\nfrom datetime import datetime\n\n\n# function to plot the IV curve\n# Arguments:\n#\t\tfilename: name of the .plt file containing the data. Example: \"n_t6600_light_des.plt\"\n#\t\tplotcond=1 to plot, 0 otherwise\ndef analyzedata(filename,plotcond):\n\n\t#if condition==\"dark\":\n\t#\tfid=open(\"nodnum1_dark_des.plt\",\"r\")\n\t#elif condition==\"light\":\n\t#\tfid=open(\"nodnum1_light_des.plt\",\"r\")\n\t#else:\n\t#\traise Exception('wrong argument entered. Enter \"dark\" or \"light\".')\t\n\t\n\tfid=open(filename,\"r\")\n\trawdata=\"\" #Create empty string\n\twhile 1:\n\t\tdataline=fid.readline()\n\t\tif dataline==\"\": # check if the string is empty, meaning end of file\n\t\t\tbreak\n\t\tlinestr=dataline.strip() # save dataline into a string and remove leading and trailing spaces\n\t\trawdata=rawdata+\" \"+linestr # concatenate each line with the line string\n\tfid.close()\n\t\n\t# find the indices of the { brackets\n\t# xstart=re.search(\"{\",data)\n\t# xstart.span()\n\tindstart=[m.span() for m in re.finditer(\"{\",rawdata)] # finditer finds all the iterations of '{'\n\t\n\t# find the indices of the } brackets\n\tindend=[m.span() for m in re.finditer(\"}\",rawdata)]\n\t\n\t# Make a dictionary 'data' containing the info section and the numerical data section as an array\n\trawinfo=rawdata[indstart[0][0]+1:indend[0][0]-1] # names of datasets and of functions (includes extra spaces)\n\trawvalues=rawdata[indstart[1][1]+1:indend[1][1]-1] # numerical values calculated by Sentaurus (includes extra spaces)\n\t\n\t# create the dictionary entries and remove spaces\n\tdata=dict() # define dictionary\n\tdata[\"info\"]=rawinfo.strip() \n\tdata[\"values\"]=rawvalues.strip()\n\t\n\t#pdb.set_trace()\n\t# Find indices of the [ and ] brackets in the info section\n\t# [ brackets\n\tsqindst=[m.span() for m in re.finditer(\"\\[\",rawinfo.strip())] # starting index of square bracket\n\t# ] brackets\n\tsqinden=[m.span() for m in re.finditer(\"\\]\",rawinfo.strip())] # ending index of square bracket\n\t\n\t# The dataset names are within the first brackets\n\trawdatasets=data[\"info\"][sqindst[0][0]+1:sqinden[0][0]-1]\n\tdata[\"datasets\"]=rawdatasets.strip()\n\t\n\t# Split the dataset field at the double quotes to find the number of output parameters (voltage, current, etc.)\n\tlist_datasets=re.split(\"\\\" \",data[\"datasets\"]) # split each time a double quote followed by a space in encountered\n\tnboutputs=len(list_datasets) #number of output parameters from sdevice\n\t\n\t# Split the value string list\n\t#list_values=re.split(\" \",data[\"values\"]) # separate with two spaces as this is the minimum spacing between consecutive values\n\tlist_values=data[\"values\"].split() # split without argument splits at white spaces\n\t\n\t# Convert the value string list into a numpy string array (vector)\n\tvalvect_str=np.array(list_values)\n\t\n\t# convert numpy string array into a numpy float array\n\tvalvect=valvect_str.astype(np.float)\n\tlen_valvect=len(valvect)\n\t\n\t# Reshape the data array\n\tnblines=int(len_valvect/nboutputs)# nb of lines for the matrix\n\t#pdb.set_trace()\n\tvalarray=np.reshape(valvect,(nblines,nboutputs))\n\t#pdb.set_trace()\n\t\n\t# Get current and voltage (column number depends on the defined outputs in the sdevice file)\n\tV=valarray[:,1] # voltage in col. index 1, ie 2nd column\n\tI=valarray[:,7] # voltage in col. index 7, ie 8th column\n\t\n\tif plotcond:\n\t\tplt.ion()\n\t\tplt.plot(V,I,'-+')\n\t\tplt.ylabel(\"Total current (mA/cm2)\")\n\t\tplt.xlabel(\"Voltage (V)\")\n\t\tplt.ylim(-50, 20)\n\t\tplt.show()\n\t\n\treturn [I, V]\n\t\nNaprofilename=\"single_layer_D1=4E-16cm2ps_D2=1E-15cm2ps_Cs1E+20cm3_T85_time96hr_h1.0e-04_m1.0e+00_pnp.h5\"\n#Naprofilename=\"two_layers_D1=4E-16cm2ps_D2=1E-14cm2ps_Cs1E+20cm3_T85_time96hr_h1.0e-10_m1.0e+00_pnp.h5\"\ndef batchanalysis(directory='./2019-11-1_backup',sdevicetemplate=\"sdevice_light_des\",h5File=Naprofilename,startstep=0,endstep=0):\n\t# analyzes data from several plt files\n\t# assumes that the .plt files are saved in directory\n\t#sdevicetemplate is the name of the Sentaurus device template file without the extension, for instance \"sdevice_light_des\"\n\t# example: runSentaurus.batchanalysis(6,\"sdevice_light_des\")\n\t\n\t# obtain depth of the shunt as a function of time\n\t#sigma=shuntcond(nbsteps)\n\t\n\t# Find time point corresponding to each time\n\t# Open h5 file\n\t#h5File=\"FOR_JV_85C.h5\"\n\thf= h5py.File(h5File, 'r')\n\ttme= hf.get('time')\n\tti=tme[:]\n\t\n\tLti=len(ti) # length of the time list\n\t\n\t# In case endstep is not defined by the user or is wrongly defined\n\tif endstep==0 or endstep<startstep:\n\t\tendstep=Lti\n\t\tprint(\"Last simulation step set to the total nb of steps\")\n\t\t\n\t#nbsteps=len(ti) # number of time steps\n\tnbsteps=endstep-startstep\n\n\t# Save h5py data\n\ttime=[0]*nbsteps\n\ttime[:]=ti[:]\n\t\n\thf.close()\n\t\t\n\t# Define dictionary containing IV data. Each field contains a list.\n\tresults=dict()\n\t# Create lists of NaN (so that 0's won't be plotted if some points are skipped)\n\tresults[\"I\"]=NaNlist(Lti)\n\tresults[\"sdevicename\"]=NaNlist(Lti)\n\tresults[\"time\"]=NaNlist(Lti)\n\tresults[\"V\"]=NaNlist(Lti)\n\tresults[\"shdepth\"]=NaNlist(Lti)\n\tresults[\"Efficiency\"]=NaNlist(Lti)\n\t\n\t#pdb.set_trace()\n\t# results[\"I\"]=[0]*nbsteps # intialize the list to save current data\n\t# results[\"V\"]=[0]*nbsteps\n\t# results[\"sdevicename\"]=[0]*nbsteps\n\t# results[\"time\"]=[0]*nbsteps\n\t# results[\"shdepth\"]=[0]*nbsteps\n\t# results[\"Efficiency\"]=[0]*nbsteps\n\t\n\tshdepth=[]# List containing depth of the shunt at each iteration\n\t\n\tplt.ion()\n\tplt.figure()\n\tfor i in range(startstep,endstep):\n\t\t# Analyze data and store generated IV curve in a dictionary\n\t\t# The name of the file to analyze is obtained by adding the node name to the sdevice file name without the string \"sdevice\".\n\t\t# eg: sdevice_light_des.cmd becomes n_t1_light_des.plt\n\t\t\n\t\t#shdepth=sigma[i,1] # append shunt depth\n\t\t#timestamp=\"_t\"+str(int(sigma[i,0]))\n\t\t\n\t\tbasename=re.split(\"sdevice_\",sdevicetemplate) # yields \"light_des\" from \"sdevice_light_des\", case of a template file \"sdevice_light_des.cmd\".\n\t\tplotfile=directory+\"//\"+\"n_t\"+str(int(time[i]))+\"_\"+basename[1]+\".plt\" # by default the .plt files containing the extracted data are saved with a name in the form \"n_tk_basename.plt\" where k is the time and basename is the string after \"sdevice\" in the sdevice file. Example: \"n_t4_light_des.plt\" based on the file \"sdevice_light_des_tk.cmd\".\n\t\t\n\t\t# Try openining the filename. Pass if it does not exist (in case some points were skipped in the simulations)\n\t\tplt_flag=1\n\t\ttry:\n\t\t\tfid=open(plotfile,\"r\")\n\t\t\tfid.close()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\twarning_str='Could not open file '+plotfile+', it may not exist.\\nSkipping the file in the analysis.\\n'\n\t\t\tprint(warning_str)\n\t\t\tplt_flag=0 # set the flag to 0 so the file won't be used in the analysis\n\n\t\tif plt_flag: # if the .plt file was found, extract data\n\t\t\t# Extract resulting IV curve\n\t\t\tprint(plotfile)\n\t\t\t#pdb.set_trace()\n\t\t\t\n\t\t\ttry: # try extracting data from the .plt file\n\t\t\t\t[I,V]=analyzedata(plotfile,0)\n\t\t\texcept: # if the file is corrupted, skip this .plt file.\n\t\t\t\t# (happens for instance if the simulations do not converge)skip this .plt file.\n\t\t\t\t# The corresponding dictionary value will remain a NaN.\n\t\t\t\tprint('*****File %s is corrupted, skipped in analysis\\n' % plotfile)\n\t\t\telse: # if no exception is found, save the extracted data in the dictionary\n\t\t\t\t# Extract efficiency as a function of time step\n\t\t\t\t# save results in dictionary\n\t\t\t\tprint('File %s is ok\\n' % plotfile)\n\t\t\t\tresults[\"I\"][i]=I\n\t\t\t\tresults[\"V\"][i]=V\n\t\t\t\tresults[\"sdevicename\"][i]=plotfile\n\t\t\t\t#results[\"time\"][i]=i\n\t\t\t\tresults[\"time\"][i]=time[i]\n\t\t\t\tresults[\"Efficiency\"][i]=findeff(V,I)\n\t\t\t\t#results[\"shdepth\"][i]=shdepth\n\t\t\t\t\n\t\t\t\t# Plot the IV curves\n\t\t\t\t#plt.figure()\n\t\t\t\tplt.subplot(1,2,1)\n\t\t\t\tplt.plot(V,I)\n\t\t\t\n\tplt.ylabel(\"Total current (mA/cm$^2$)\")\n\tplt.xlabel(\"Voltage (V)\")\n\tplt.ylim(-35, 0)\n\tplt.xlim(0, 0.75)\n\tplt.rcParams.update({'font.size':16})\n\t\n\t# pdb.set_trace()\n\t\n\t# Convert lists to numpy arrays to apply a mask\n\ttimearr=np.asarray(results[\"time\"])\n\teffarr=np.asarray(results[\"Efficiency\"])\n\t# Find where to mask NaN in the array\n\teffmask=np.isfinite(effarr)\n\t#pdb.set_trace()\n\teffarr_mask=effarr[effmask] # apply mask\n\ttimearr_mask=timearr[effmask] # apply mask\n\t\n\t#pdb.set_trace()\n\t\n\tplt.subplot(1,2,2)\n\tplt.plot(timearr_mask/3600,effarr_mask,marker='s',linestyle='',color='k',markersize=12,fillstyle='none') # plot simulated points\tplt.plot(time_new/3600,eff_smoothed,'--b',linewidth=1.3,dashes=(5,6)) #plot interpolated curve\n\t\n\t# interpolate only if there are more than 3 points\n\ttime_new=float('NaN')\n\teff_smoothed=float('NaN')\n\tif len(effarr_mask)>3:\n\t\t# Spline interpolation of the efficiency curve\n\t\ttck=interpolate.splrep(timearr_mask,effarr_mask,s=0,k=3) # spline interpolation coefficients (order k)\n\t\ttime_new=np.arange(timearr_mask[0],timearr_mask[-1],300) # create a finer time vector\n\t\teffarr_interp=interpolate.splev(time_new,tck,der=0) #evaluate the interpolate curve\n\t\n\t\t# smooth interpolated curve\n\t\twdw=11\n\t\tpolord=3\n\t\teff_smoothed=signal.savgol_filter(effarr_interp,wdw,polord) #window length wdw, poly order polord\n\t\n\t\tplt.plot(time_new/3600,eff_smoothed,'--b',linewidth=1.3,dashes=(5,6)) #plot interpolated curve\n\t\t\n\t\tplt.legend(['Simulated\\npoints','Guide to\\nthe eye'],loc='upper right',prop={'size':9})\n\t\n\tplt.rcParams.update({'font.size':16})\n\tplt.subplots_adjust(left=0.15,top=0.98,wspace=0.4) # adjust space between subplots\n\t\n\tplt.ylabel(\"Efficiency (%)\")\n\tplt.xlabel(\"Time (h)\")\n\t#plt.ylim(-35, 0)\n\t#plt.xlim(0, 0.65)\n\t\n\ttimepts=SimTimePts(results) # Time points that have been run by the simulations\n\t\n\t#plt.show()\n\treturn results,timearr_mask,effarr_mask,time_new,eff_smoothed\n\t\n\t\ndef plotcurves():\n\t[Idark,Vdark]=analyzedat(\"dark\")\n\t[Ilight,Vlight]=analyzedat(\"light\")\n\tplt.plot(Vdark,Idark,label='dark')\n\tplt.hold(True)\n\tplt.plot(Vlight,Ilight,label='light')\n\tplt.rcParams.update({'font.size': 20}) # increase fontsize\n\tplt.legend(loc='upper left')\n\tplt.show()\n\t\ndef findeff(V,I):\n# Function returning the efficiency of the cell assuming 1 sun illumination\n\t\n\tI=-I # to work with positive currents\n\tP=V*I\n\t# Find max powerpoint\n\tP_L=list(P)\n\tpm=max(P_L)\n\timax=P_L.index(pm)\n\t\n\t#[k for k,j in enumerate(P) if j==pm]\n\t#pdb.set_trace()\n\tVmax=V[imax]\n\tImax=I[imax]\n\t\n\tnu=Imax*Vmax*10/1e3*100 # I is in mA/cm2, corresponding to 10 A/m2\n\t\n\treturn nu\n\n# function to plot Na profile\ndef ploth5(h5file=\"FOR_newNaprofile.h5\", folderpath=\"/home/linux/ieng6/na299x/na299x/DB/Guillaume/Solar_cell_Al_BSF\"):\n\t\n\t#Open h5 file (file for full stack file)\n\thf\t\t= h5py.File(h5file, 'r')\n\ttime \t= hf.get('time')\n\tct\t\t= hf.get('si/concentration')\n\t# ct_10=hf.get('si/concentration/ct_10')\n\tx\t\t= hf.get('si/x')\n\t\n\tpdb.set_trace()\n\tplt.ion()\n\tfig,ax=plt.subplots()\n\tfor t in enumerate(time):\n\t\tct_field='ct_'+str(t[0]) # do not use all curve since they are high resolution\n\t\t#pdb.set_trace()\n\t\tplt.plot(x-x[0],ct[ct_field])\n\t\tplt.ylim(1e10,1e16)\n\t\tplt.xlim(0,0.8)\n\t\tplt.yscale('log')\n\t\t\n\tfor item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n\t\tax.get_xticklabels() + ax.get_yticklabels()):\n\t\titem.set_fontsize(19)\n\t\t\n\t\n\thf.close()\n\t\n\t# creates a list of length L filled with NaN\ndef NaNlist(L): \n\tlist=np.zeros(L,dtype=np.float) #create array of floats because an array of int cannot be filled with NaN\n\tlist.fill(np.nan) # fill with NaN\n\tlist=list.tolist() #convert to list\n\t\n\treturn list\n\t\n# Function returning the indices of the time points that have been simulated in the result list output from batchanalysis.py\ndef SimTimePts(results):\n\ttme=results[\"time\"]\n\ttme_array=np.asarray(tme)\n\ttme_mask=np.isfinite(tme_array)\n\ttimepts=np.where(tme_mask==True)\n\t\n\treturn timepts[0] # array\n\t\n# Function to make video of efficiency as a function of time\n# Takes in argument the images generated at each time\n## not currently working, see module videoPID.py\ndef makePIDvideo(time_new,eff_smoothed):\n\n\t#Writer=manimation.FFMpegWriter(fps=30)\n\t#['ffmpeg']\n\t#writer=Writer(fps=20, metadata=dict(title='PID simulated efficiency', artist='G.',bitrate=1800)) #record at 20 fps\n\t#pdb.set_trace()\n\t# create figure\n\tfigPID,ax=plt.subplots()\n\tline,=ax.plot(time_new,eff_smoothed)\n\tplt.xlim(0,12)\n\tplt.ylim(0,19)\n\t\n\tdef animate_PID(i,time_new,eff_smoothed,line):\n\t\tline.set_data(time_new[:i],eff_smoothed[:i])\n\t\treturn line,\n\t\n\tplt.xlabel('Time (hrs)')\n\tplt.ylabel('Efficiency (%)')\n\t\n\t#plt.show()\n\t#pdb.set_trace()\n\tani = manimation.FuncAnimation(figPID, animate_PID, len(time_new), fargs=[time_new,eff_smoothed,line], blit=False)\n\tani.save('Efficiencyplot.mp4', writer='ffmpeg')\n\t# import images and make video\n\n# Function used to create a log of the parameters and files used in the simulations\ndef createlog(batchdir,Temp,mseg,clathrate_file, h5file, startstep, endstep, skipNB, sdetemplate, sdevicetemplate):\n\t# Also add the name of the optical generation file used? (although it is in the sde file)\n\targuments=locals() # get all function arguments as a dictionary\n\tnow=datetime.now()\n\tfilename=batchdir+\"/AA_logfile_PID_\"+now.strftime(\"%Y%d%m_%H_%M_%S\")+\".txt\"\n\tfid=open(filename,\"w+\")\n\t\n\tfid.write(\"Simulation starting time:\t\t\t\t\"+now.strftime(\"%Y/%d/%m %H:%M:%S\")+\"\\n\")\n\tfid.write(\"\\n\")\n\tfid.write(\"Temperature:\t\t\t\t\"+str(Temp)+\" °C\\n\")\n\tfid.write(\"Shunt segregation coefficient:\t\t\t\t\"+str(mseg)+\"\\n\")\n\tfid.write(\"Clathrate conductivity file:\t\t\t\t\"+clathrate_file+\"\\n\")\n\tfid.write(\"Sodium profiles from h5py file:\t\t\t\t\"+h5file+\"\\n\")\n\tfid.write(\"Simulation starting step:\t\t\t\t\"+str(startstep)+\"\\n\")\n\tfid.write(\"Simulation ending step:\t\t\t\t\"+str(endstep)+\"\\n\")\n\tfid.write(\"Step used to skip sodium profiles in the h5py file:\t\t\t\t\"+str(skipNB)+\"\\n\")\n\tfid.write(\"Sentaurus editor template file:\t\t\t\t\"+sdetemplate+\"\\n\")\n\tfid.write(\"Sentaurus device template file:\t\t\t\t\"+sdevicetemplate+\"\\n\")\n\n# Function replacing expression \"expr_search\" by \"expr_replace\" in file \"templatename.ext\" (in the base directory)\n# and saving the updated template \"templatename_t<time>.ext\" in directory \"newfolderpath\".\ndef replace_line(templatename,ext, expr_search,expr_replace,newfolderpath,time,identifier):\n# templatename:\t\t\tname of the template file, eg \"sdetemplate\"\n# extension:\t\t\textension of the template file, eg \"cmd\"\n# expr_search:\t\t\tname of the expression to be replaced, e.g. \"nodnum1\"\n# expr_replace:\t\t\tname of the new line, e.g. newnodename as newnodename=\"n_t\"+str(int(time))\n# newfolderpath:\t\tpath of the directory where the simulation data is saved. e.g. \"testdir2\"\n# time:\t\t\t\t\ttime of the Na profile (int or float)\n# identifier:\t\t\ttype of line being searched. eg: \"mesh line\"\n \n# open sdevice file to find number of lines\n\twith open(templatename+\".\"+ext,'r') as fp:\n\t\tcount_temp = len(fp.readlines( ))\n\t\tfp.close\n\t\n\t\tlinelist=[0]*count_temp # preallocate list memory\n\t\tnewlinelist=[0]*count_temp # preallocate list memory for file with modified line\n\n\n\twith open(templatename+\".\"+ext,'r') as fp:\n\t\tk=0\n\t\tsuccessdevice=0\n\t\t# save each line into a list\n\t\twhile 1:\n\t\t\tdataline=fp.readline()\n\t\t\tif dataline==\"\": # check if the string is empty, meaning end of file\n\t\t\t\tbreak\n\t\t\tlinelist[k]=dataline # create of list containing one line at each index\n\t\t\tnewlinelist[k]=dataline # copy the file into a new list array\n\t\t\t# find line where mesh is defined and change the name of the mesh file\n\t\t\tif re.search(expr_search,linelist[k]): # if the mesh definition is found\n\t\t\t\tprint(identifier + \" found\")\n\t\t\t\tnewlinelist[k]=re.sub(expr_search,expr_replace,linelist[k]) # replace the nodenumber \"nodnum1\" in the template by n_t1, n_t2, etc.\n\t\t\t\tsuccessdevice=successdevice+1\n\t\t\t\t#pdb.set_trace()\n\t\t\tk=k+1\n\t\tfp.close()\n\t\t\n\t\tif successdevice==0:\n\t\t\tprint(\"*********\\n\"+identifier + \" not found\\n**********\")\n\t\t\n\t\t# save the files under a different name (_t0, _t1, _t2, etc)\n\t\t# file path\n\t\tfilepath=os.path.join(newfolderpath,templatename) # path to save file in the right simulation folder\n\n\t\tnew_datafile_name=filepath+\"_t\"+str(int(time))+\".\"+ext\n\t\tf=open(new_datafile_name,\"w+\")\n\t\tfor i in range(len(newlinelist)):\n\t\t\tf.write(newlinelist[i])\n\t\tf.close()\n\t\tprint(\"File \"+new_datafile_name+\" created.\\n\")\n\t\t\n\t\treturn new_datafile_name\n\t\t\n\t\t\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.asarray", "matplotlib.pyplot.hold", "matplotlib.pyplot.plot", "matplotlib.pyplot.rcParams.update", "numpy.where", "scipy.signal.savgol_filter", "numpy.reshape", "numpy.arange", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots_adjust", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "scipy.interpolate.splev", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.ion", "matplotlib.pyplot.ylabel", "scipy.interpolate.splrep", "numpy.isfinite", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
bdice/pymbar
[ "22c327bcdde20f7c6256b3eb0de2efc4939e77c8" ]
[ "pymbar/tests/test_covariance.py" ]
[ "import numpy as np\nimport pymbar\nfrom pymbar.utils_for_testing import eq, suppress_derivative_warnings_for_tests\n\ndef load_oscillators(n_states, n_samples):\n name = \"%dx%d oscillators\" % (n_states, n_samples)\n O_k = np.linspace(1, 5, n_states)\n k_k = np.linspace(1, 3, n_states)\n N_k = (np.ones(n_states) * n_samples).astype('int')\n test = pymbar.testsystems.harmonic_oscillators.HarmonicOscillatorsTestCase(O_k, k_k)\n x_n, u_kn, N_k_output, s_n = test.sample(N_k, mode='u_kn')\n return name, u_kn, N_k_output, s_n\n\n\ndef load_exponentials(n_states, n_samples):\n name = \"%dx%d exponentials\" % (n_states, n_samples)\n rates = np.linspace(1, 3, n_states)\n N_k = (np.ones(n_states) * n_samples).astype('int')\n test = pymbar.testsystems.exponential_distributions.ExponentialTestCase(rates)\n x_n, u_kn, N_k_output, s_n = test.sample(N_k, mode='u_kn')\n return name, u_kn, N_k_output, s_n\n\n\ndef _test(data_generator):\n name, U, N_k, s_n = data_generator()\n print(name)\n mbar = pymbar.MBAR(U, N_k)\n results1 = mbar.getFreeEnergyDifferences(uncertainty_method=\"svd\", return_dict=True)\n fij1_t, dfij1_t = mbar.getFreeEnergyDifferences(uncertainty_method=\"svd\", return_dict=False)\n results2 = mbar.getFreeEnergyDifferences(uncertainty_method=\"svd-ew\", return_dict=True)\n fij1 = results1['Delta_f']\n dfij1 = results1['dDelta_f']\n fij2 = results2['Delta_f']\n dfij2 = results2['dDelta_f']\n\n # Check to make sure the returns from with and w/o dict are the same\n eq(fij1, fij1_t)\n eq(dfij1, dfij1_t)\n\n eq(pymbar.mbar_solvers.mbar_gradient(U, N_k, mbar.f_k), np.zeros(N_k.shape), decimal=8)\n eq(np.exp(mbar.Log_W_nk).sum(0), np.ones(len(N_k)), decimal=10)\n eq(np.exp(mbar.Log_W_nk).dot(N_k), np.ones(U.shape[1]), decimal=10)\n eq(pymbar.mbar_solvers.self_consistent_update(U, N_k, mbar.f_k), mbar.f_k, decimal=10)\n\n # Test against old MBAR code.\n with suppress_derivative_warnings_for_tests():\n mbar0 = pymbar.old_mbar.MBAR(U, N_k)\n fij0, dfij0 = mbar0.getFreeEnergyDifferences(uncertainty_method=\"svd\")\n eq(mbar.f_k, mbar0.f_k, decimal=8)\n eq(np.exp(mbar.Log_W_nk), np.exp(mbar0.Log_W_nk), decimal=5)\n\n eq(fij0, fij1, decimal=8)\n eq(dfij0, dfij1, decimal=8)\n\n eq(fij0, fij2, decimal=8)\n eq(dfij0, dfij2, decimal=8)\n\n\ndef test_100x100_oscillators():\n data_generator = lambda: load_oscillators(100, 100)\n _test(data_generator)\n\n\ndef test_200x50_oscillators():\n data_generator = lambda: load_oscillators(200, 50)\n _test(data_generator)\n\n\ndef test_200x50_exponentials():\n data_generator = lambda: load_exponentials(200, 50)\n _test(data_generator)\n" ]
[ [ "numpy.exp", "numpy.zeros", "numpy.linspace", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xunhen/HRNet-Object-Detection
[ "44f641da00810c61e217c1080ef1b45d39df484f" ]
[ "mmdet/ops/nms/setup.py" ]
[ "import os.path as osp\nfrom setuptools import setup, Extension\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\next_args = dict(\n include_dirs=[np.get_include()],\n language='c++',\n extra_compile_args={\n 'cc': ['-Wno-unused-function', '-Wno-write-strings'],\n 'nvcc': ['-c', '--compiler-options', '-fPIC'],\n },\n)\n\nextensions = [\n Extension('soft_nms_cpu', ['src/soft_nms_cpu.pyx'], **ext_args),\n]\n\n\ndef customize_compiler_for_nvcc(self):\n \"\"\"inject deep into distutils to customize how the dispatch\n to cc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on.\"\"\"\n\n # tell the compiler it can processes .cu\n self.src_extensions.append('.cu')\n\n # save references to the default compiler_so and _comple methods\n #add by wjc\n if hasattr(self, 'compiler_so'): # add by hwx at 20180408\n default_compiler_so = self.compiler_so\n super = self._compile\n\n # now redefine the _compile method. This gets executed for each\n # object but distutils doesn't have the ability to change compilers\n # based on source extension: we add it.\n def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n if osp.splitext(src)[1] == '.cu':\n # use the cuda for .cu files\n self.set_executable('compiler_so', 'nvcc')\n # use only a subset of the extra_postargs, which are 1-1 translated\n # from the extra_compile_args in the Extension class\n postargs = extra_postargs['nvcc']\n else:\n postargs = extra_postargs['cc']\n\n super(obj, src, ext, cc_args, postargs, pp_opts)\n # reset the default compiler_so, which we might have changed for cuda\n self.compiler_so = default_compiler_so\n\n # inject our redefined _compile method into the class\n self._compile = _compile\n\n\nclass custom_build_ext(build_ext):\n\n def build_extensions(self):\n customize_compiler_for_nvcc(self.compiler)\n build_ext.build_extensions(self)\n\n\nsetup(\n name='soft_nms',\n cmdclass={'build_ext': custom_build_ext},\n ext_modules=cythonize(extensions),\n)\n\nsetup(\n name='nms_cuda',\n ext_modules=[\n CUDAExtension('nms_cuda', [\n 'src/nms_cuda.cpp',\n 'src/nms_kernel.cu',\n ]),\n CUDAExtension('nms_cpu', [\n 'src/nms_cpu.cpp',\n ]),\n ],\n cmdclass={'build_ext': BuildExtension})\n" ]
[ [ "torch.utils.cpp_extension.CUDAExtension", "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mrsempress/stereo
[ "c7465e92d9d03f73c13011125bdd02c33def6c19" ]
[ "epilines.py" ]
[ "\"\"\"\nIt is for Epipolar geometry\n\"\"\"\n\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\ndef Epipolar_geometry(leftpath, rightpath):\n \"\"\"\n :param leftpath: The path of left images\n :param rightpath: The path of right images\n :return:\n \"\"\"\n # objP = np.zeros((6 * 7, 3), np.float32)\n # objP[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)\n # patternSize = (7, 6)\n imgl = cv2.imread(leftpath, 0) # queryimage # left image\n imgr = cv2.imread(rightpath, 0) # trainimage # right image\n # id = leftpath[16:]\n id = leftpath[42:]\n # # The origin image is gray\n # grayl = cv2.cvtColor(imgl, cv2.COLOR_BGR2GRAY)\n # grayr = cv2.cvtColor(imgr,cv2.COLOR_BGR2GRAY)\n # criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n # retl, cornersl = cv2.findChessboardCorners(grayl, patternSize, None)\n # retr, cornersr = cv2.findChessboardCorners(grayr, patternSize, None)\n # if not retl or not retr:\n # return\n\n # cornersl2 = cv2.cornerSubPix(grayl, cornersl, (11, 11), (-1, -1), criteria)\n # cornersr2 = cv2.cornerSubPix(grayr, cornersr, (11, 11), (-1, -1), criteria)\n\n # imgl = cv2.drawChessboardCorners(grayl, patternSize, cornersl2, retl)\n # imgr = cv2.drawChessboardCorners(grayr, patternSize, cornersr2, retr)\n\n # FLANN: Fast Libary for Approximate Nearest Neighbors\n (pts1, pts2) = findMatches(imgl, imgr, id)\n F, pts1, pts2 = findFundamentalMatrix(pts1, pts2)\n findEpilines(imgl, imgr, pts1, pts2, F, id)\n\n # # Brute Force\n # sift = cv2.xfeatures2d.SIFT_create(100)\n # kp1, des1 = sift.detectAndCompute(imgl, None)\n # kp2, des2 = sift.detectAndCompute(imgr, None)\n # bf = cv2.BFMatcher()\n # # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = False)\n # matches = bf.knnMatch(des1, des2, k=2)\n # goodMatches = []\n # minRatio = 1/3\n # for m,n in matches:\n # if m.distance / n.distance < minRatio:\n # goodMatches.append([m])\n # sorted(goodMatches,key=lambda x:x[0].distance)\n # #绘制最优匹配点\n # img3 = None\n # img3 = cv2.drawMatchesKnn(imgl, kp1, imgr, kp2, matches, img3, flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)\n # img3 = cv2.resize(img3,(1000, 400))\n # cv2.imwrite('output/epilines/epilines_' + id, img3)\n\n\ndef findMatches(img1, img2, id):\n \"\"\"\n :param img1: The left image\n :param img2: The right image\n :param id: The name of image\n :return: The list of symmetric point\n \"\"\"\n # vgg = cv2.xfeatures2d.VGG_create()\n # brisk = cv2.BRISK_create()\n # gms = cv2.xfeatures2d.matchGMS()\n # sift = cv2.xfeatures2d.SIFT_create(100)\n sift = cv2.xfeatures2d.SIFT_create()\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(img1, None)\n kp2, des2 = sift.detectAndCompute(img2, None)\n\n # FLANN parameters\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n\n matchesMask = [[0, 0] for i in range(len(matches))]\n good = []\n pts1 = []\n pts2 = []\n # ratio test as per Lowe's paper\n for i, (m, n) in enumerate(matches):\n if m.distance < 0.8 * n.distance:\n good.append(m)\n pts2.append(kp2[m.trainIdx].pt)\n pts1.append(kp1[m.queryIdx].pt)\n matchesMask[i] = [1, 0]\n\n # draw matches\n drawParams = dict( # singlePointColor=(255,0,0), matchColor=(0,255,0),\n matchesMask=matchesMask,\n flags=0)\n resultImage = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **drawParams)\n\n # # Univariate transformation\n # matchesMask = Univariatetrans(good, kp1, kp2, img1, img2)\n # # draw matches\n # drawParams = dict(matchColor = (0,255,0), # draw matches in green color\n # singlePointColor = None, matchesMask = matchesMask, flags = 2)\n # resultImage = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **drawParams)\n\n # cv2.imwrite('output/epilines/epilines_' + id, resultImage)\n cv2.imwrite('output/calibration_binocular/epilines_' + id, resultImage)\n\n return pts1, pts2\n\n\ndef Univariatetrans(goodMatches, kp1, kp2, img1, img2):\n \"\"\"\n :param goodMatches: The matches points\n :param kp1: keypoints 1\n :param kp2: keypoints 2\n :param img1: image 1\n :param img2: image 2\n :return: matchesMask\n \"\"\"\n MIN_MATCH_COUNT = 10\n\n if len(goodMatches) > MIN_MATCH_COUNT:\n\n src_pts = np.float32([kp1[m.queryIdx].pt for m in goodMatches]).reshape(-1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in goodMatches]).reshape(-1, 2)\n\n # Get the projection matrix\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist() # 用来配置匹配图,只绘制单应性图片中关键点的匹配线\n\n h, w = img1.shape[:2]\n\n # four corner\n pts = np.float32([[55, 74], [695, 45], [727, 464], [102, 548]]).reshape(-1, 1, 2)\n dst = cv2.perspectiveTransform(pts, M)\n\n # Draw the framework\n img2 = cv2.polylines(img2, [np.int32(dst)], True, (0, 255, 0), 2, cv2.LINE_AA)\n\n else:\n print(\"Not enough matches are found - %d/%d\" % (len(goodMatches), MIN_MATCH_COUNT))\n matchesMask = None\n\n return matchesMask\n\n\ndef findFundamentalMatrix(pts1, pts2):\n \"\"\"\n :param pts1: Symmetric point list 1\n :param pts2: Symmetric point list 2\n :return: Fundamental matrix and inlier points\n \"\"\"\n pts1 = np.int32(pts1)\n pts2 = np.int32(pts2)\n F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)\n # F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.RANSAC, 5.0)\n\n # We select only inlier points\n pts1 = pts1[mask.ravel() == 1]\n pts2 = pts2[mask.ravel() == 1]\n\n return F, pts1, pts2\n\n\ndef findEpilines(img1, img2, pts1, pts2, F, id):\n \"\"\"\n :param img1: The left image\n :param img2: The right image\n :param pts1: Symmetric point 1\n :param pts2: Symmetric point 2\n :param F: Fundamental matrix\n :param id: The id of raw picture\n :return:\n \"\"\"\n # Find epilines corresponding to points in right image (second image) [img6] and\n # drawing its lines on left image [img5]\n lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)\n lines1 = lines1.reshape(-1, 3)\n img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)\n\n # Find epilines corresponding to points in left image (first image) [img4] and\n # drawing its lines on right image [img3]\n lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)\n lines2 = lines2.reshape(-1, 3)\n img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)\n # cv2.imwrite('output/epilines/epilines_left' + id, img5)\n # cv2.imwrite('output/epilines/epilines_right' + id, img3)\n cv2.imwrite('output/calibration_binocular/epilines_left' + id, img5)\n cv2.imwrite('output/calibration_binocular/epilines_right' + id, img3)\n # plt.subplot(121), plt.imshow(img5)\n # plt.subplot(122), plt.imshow(img3)\n\n plt.subplot(221), plt.imshow(img5)\n plt.subplot(222), plt.imshow(img6)\n plt.subplot(223), plt.imshow(img3)\n plt.subplot(224), plt.imshow(img4)\n plt.show()\n\n\ndef drawlines(img1, img2, lines, pts1, pts2):\n \"\"\"\n :param img1: The image on which we draw the epilines for the points in img2\n :param img2: The other image\n :param lines: corresponding epilines\n :param pts1: Inlier point 1\n :param pts2: Inlier point 2\n :return: The new left and right image\n \"\"\"\n r, c = img1.shape\n img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)\n img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)\n for r, pt1, pt2 in zip(lines, pts1, pts2):\n color = tuple(np.random.randint(0, 255, 3).tolist())\n x0, y0 = map(int, [0, -r[2] / r[1]])\n x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])\n img1 = cv2.line(img1, (x0, y0), (x1, y1), color, 1)\n img1 = cv2.circle(img1, tuple(pt1), 5, color, -1)\n img2 = cv2.circle(img2, tuple(pt2), 5, color, -1)\n return img1, img2\n\n\ndef main():\n # for id in range(1, 15):\n # if id == 10:\n # continue\n # # leftpath = './data/left/left' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # # rightpath = './data/right/right' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # leftpath = 'output/calibration_binocular/rectifiedleft' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # rightpath = 'output/calibration_binocular/rectifiedright' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # print(leftpath)\n # print(rightpath)\n # Epipolar_geometry(leftpath, rightpath)\n leftpath = 'output/calibration_binocular/rectifiedleft04.jpg'\n rightpath = 'output/calibration_binocular/rectifiedright04.jpg'\n print(leftpath)\n print(rightpath)\n Epipolar_geometry(leftpath, rightpath)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.int32", "matplotlib.pyplot.subplot", "numpy.float32", "matplotlib.pyplot.show", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vednatnaik/sip_calculator
[ "84da3c5c314f4da27dea2bccc0c05620cd18ec6c" ]
[ "Yearly_Sip_calculator.py" ]
[ "import matplotlib\r\nmatplotlib.use('WebAgg')\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef sip_calculator (sip_amount, years, IntrestRate):\r\n\r\n current_amount = sip_amount\r\n current_amount = sip_amount + (current_amount * IntrestRate) / 100\r\n\r\n print(f\"first month return {current_amount}\")\r\n\r\n for n in range(0, years - 1):\r\n RR = sip_amount + current_amount\r\n Nextmonthreturn = RR + (RR * IntrestRate) / 100\r\n # print(RR)\r\n print(f\"your {n + 2} years return is {round(Nextmonthreturn, 2)} Rs/-\")\r\n current_amount = Nextmonthreturn\r\n\r\n print(\"\")\r\n Invested_amount = sip_amount * years\r\n total_value = Nextmonthreturn\r\n est_return = total_value - Invested_amount\r\n print(f\"Invested amount is = {round(Invested_amount, 2)}Rs\")\r\n print(\"\")\r\n print(f\"Estimated return = {round(est_return, 2)}Rs\")\r\n print(\"\")\r\n print(f\"Total Value = {round(total_value, 2)}Rs\")\r\n print(\"\")\r\n\r\n\r\n list_data_name = [\"Invested Amount\", \"Est. Returns\"]\r\n list_data = [round(Invested_amount, 2), round(est_return, 2)]\r\n my_circle = plt.Circle((0, 0), 0.7, color='white')\r\n fig = plt.figure()\r\n plt.pie(list_data, labels=list_data_name)\r\n p = plt.gcf()\r\n p.gca().add_artist(my_circle)\r\n plt.show()\r\n\r\n\r\nprint(\"enter the amout you would like to invest per month:- \")\r\nsip_amount =12*int(input())\r\n\r\nprint(\"No. of years:-\")\r\nyears = int(input())\r\n\r\nprint(\"expected rate of return:-\")\r\nIntrestRate = int(input())\r\n\r\n\r\nsip_calculator(sip_amount,years,IntrestRate)" ]
[ [ "matplotlib.use", "matplotlib.pyplot.gcf", "matplotlib.pyplot.Circle", "matplotlib.pyplot.pie", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vita-epfl/openpifpaf_posetrack
[ "282ba063450d523728637167420d9ade4d9c1e65", "282ba063450d523728637167420d9ade4d9c1e65" ]
[ "openpifpaf_posetrack/transforms/scale.py", "openpifpaf_posetrack/cocokpst.py" ]
[ "import logging\n\nimport numpy as np\nimport PIL\n\nimport openpifpaf\nfrom openpifpaf.transforms.scale import _scale\n\nLOG = logging.getLogger(__name__)\n\n\nclass ScaleMix(openpifpaf.transforms.Preprocess):\n def __init__(self, scale_threshold, *,\n upscale_factor=2.0,\n downscale_factor=0.5,\n resample=PIL.Image.BILINEAR):\n self.scale_threshold = scale_threshold\n self.upscale_factor = upscale_factor\n self.downscale_factor = downscale_factor\n self.resample = resample\n\n def __call__(self, images, all_anns, metas):\n scales = np.array([\n np.sqrt(ann['bbox'][2] * ann['bbox'][3])\n for anns in all_anns\n for ann in anns if (not getattr(ann, 'iscrowd', False)\n and np.any(ann['keypoints'][:, 2] > 0.0))\n ])\n LOG.debug('scale threshold = %f, scales = %s', self.scale_threshold, scales)\n if not scales.shape[0]:\n return images, all_anns, metas\n\n all_above_threshold = np.all(scales > self.scale_threshold)\n all_below_threshold = np.all(scales < self.scale_threshold)\n if not all_above_threshold and \\\n not all_below_threshold:\n return images, all_anns, metas\n\n new_images = []\n new_all_anns = []\n new_metas = []\n for image, anns, meta in zip(images, all_anns, metas):\n w, h = image.size\n\n if all_above_threshold:\n target_w, target_h = int(w / 2), int(h / 2)\n else:\n target_w, target_h = int(w * 2), int(h * 2)\n\n new_image, new_anns, new_meta = \\\n _scale(image, anns, meta, target_w, target_h, self.resample)\n new_images.append(new_image)\n new_all_anns.append(new_anns)\n new_metas.append(new_meta)\n\n return new_images, new_all_anns, new_metas\n", "import argparse\n\nimport torch\n\nimport openpifpaf\nfrom openpifpaf.plugins.coco.constants import (\n COCO_CATEGORIES,\n COCO_KEYPOINTS,\n COCO_PERSON_SKELETON,\n COCO_PERSON_SIGMAS,\n COCO_PERSON_SCORE_WEIGHTS,\n COCO_UPRIGHT_POSE,\n DENSER_COCO_PERSON_CONNECTIONS,\n HFLIP,\n)\nfrom . import collate, encoder, headmeta, transforms\nfrom .transforms import SingleImage as S\n\ntry:\n import pycocotools.coco\n # monkey patch for Python 3 compat\n pycocotools.coco.unicode = str\nexcept ImportError:\n pass\n\n\nclass CocoKpSt(openpifpaf.datasets.DataModule):\n def __init__(self):\n super().__init__()\n\n cif = headmeta.TBaseCif(\n 'cif', 'cocokpst',\n keypoints=COCO_KEYPOINTS,\n sigmas=COCO_PERSON_SIGMAS,\n pose=COCO_UPRIGHT_POSE,\n draw_skeleton=COCO_PERSON_SKELETON,\n score_weights=COCO_PERSON_SCORE_WEIGHTS,\n )\n caf = headmeta.TBaseCaf(\n 'caf', 'cocokpst',\n keypoints=COCO_KEYPOINTS,\n sigmas=COCO_PERSON_SIGMAS,\n pose=COCO_UPRIGHT_POSE,\n skeleton=COCO_PERSON_SKELETON,\n )\n dcaf = headmeta.TBaseCaf(\n 'caf25', 'cocokpst',\n keypoints=COCO_KEYPOINTS,\n sigmas=COCO_PERSON_SIGMAS,\n pose=COCO_UPRIGHT_POSE,\n skeleton=DENSER_COCO_PERSON_CONNECTIONS,\n sparse_skeleton=COCO_PERSON_SKELETON,\n only_in_field_of_view=True,\n )\n tcaf = headmeta.Tcaf(\n 'tcaf', 'cocokpst',\n keypoints_single_frame=COCO_KEYPOINTS,\n sigmas_single_frame=COCO_PERSON_SIGMAS,\n pose_single_frame=COCO_UPRIGHT_POSE,\n draw_skeleton_single_frame=COCO_PERSON_SKELETON,\n only_in_field_of_view=True,\n )\n\n cif.upsample_stride = openpifpaf.plugins.coco.CocoKp.upsample_stride\n caf.upsample_stride = openpifpaf.plugins.coco.CocoKp.upsample_stride\n dcaf.upsample_stride = openpifpaf.plugins.coco.CocoKp.upsample_stride\n tcaf.upsample_stride = openpifpaf.plugins.coco.CocoKp.upsample_stride\n self.head_metas = [cif, caf, dcaf, tcaf]\n\n @classmethod\n def cli(cls, parser: argparse.ArgumentParser):\n # group = parser.add_argument_group('data module CocoKpSt')\n pass\n\n @classmethod\n def configure(cls, args: argparse.Namespace):\n # extract global information\n pass\n\n def _preprocess(self):\n bmin = openpifpaf.plugins.coco.CocoKp.bmin\n encoders = (\n encoder.SingleImage(openpifpaf.encoder.Cif(self.head_metas[0], bmin=bmin)),\n encoder.SingleImage(openpifpaf.encoder.Caf(self.head_metas[1], bmin=bmin)),\n encoder.SingleImage(openpifpaf.encoder.Caf(self.head_metas[2], bmin=bmin)),\n encoder.Tcaf(self.head_metas[3], bmin=bmin),\n )\n\n if not openpifpaf.plugins.coco.CocoKp.augmentation:\n return openpifpaf.transforms.Compose([\n openpifpaf.transforms.NormalizeAnnotations(),\n openpifpaf.transforms.RescaleAbsolute(openpifpaf.plugins.coco.CocoKp.square_edge),\n openpifpaf.transforms.CenterPad(openpifpaf.plugins.coco.CocoKp.square_edge),\n transforms.ImageToTracking(),\n S(openpifpaf.transforms.EVAL_TRANSFORM),\n transforms.Encoders(encoders),\n ])\n\n if openpifpaf.plugins.coco.CocoKp.extended_scale:\n rescale_t = openpifpaf.transforms.RescaleRelative(\n scale_range=(0.25 * openpifpaf.plugins.coco.CocoKp.rescale_images,\n 2.0 * openpifpaf.plugins.coco.CocoKp.rescale_images),\n power_law=True, stretch_range=(0.75, 1.33))\n else:\n rescale_t = openpifpaf.transforms.RescaleRelative(\n scale_range=(0.4 * openpifpaf.plugins.coco.CocoKp.rescale_images,\n 2.0 * openpifpaf.plugins.coco.CocoKp.rescale_images),\n power_law=True, stretch_range=(0.75, 1.33))\n\n return openpifpaf.transforms.Compose([\n openpifpaf.transforms.NormalizeAnnotations(),\n transforms.ImageToTracking(),\n openpifpaf.transforms.RandomApply(transforms.RandomizeOneFrame(), 0.2),\n S(openpifpaf.transforms.RandomApply(\n openpifpaf.transforms.HFlip(COCO_KEYPOINTS, HFLIP), 0.5)),\n S(rescale_t),\n S(openpifpaf.transforms.RandomChoice(\n [openpifpaf.transforms.RotateBy90(),\n openpifpaf.transforms.RotateUniform(30.0)],\n [openpifpaf.plugins.coco.CocoKp.orientation_invariant, 0.4],\n )),\n transforms.Crop(openpifpaf.plugins.coco.CocoKp.square_edge, max_shift=30.0),\n transforms.Pad(openpifpaf.plugins.coco.CocoKp.square_edge, max_shift=30.0),\n S(openpifpaf.transforms.RandomApply(openpifpaf.transforms.Blur(),\n openpifpaf.plugins.coco.CocoKp.blur / 2.0)),\n S(openpifpaf.transforms.RandomApply(transforms.HorizontalBlur(),\n openpifpaf.plugins.coco.CocoKp.blur / 2.0)),\n S(openpifpaf.transforms.TRAIN_TRANSFORM),\n transforms.Encoders(encoders),\n ])\n\n def train_loader(self):\n train_data = openpifpaf.plugins.coco.CocoDataset(\n image_dir=openpifpaf.plugins.coco.CocoKp.train_image_dir,\n ann_file=openpifpaf.plugins.coco.CocoKp.train_annotations,\n preprocess=self._preprocess(),\n annotation_filter=True,\n min_kp_anns=openpifpaf.plugins.coco.CocoKp.min_kp_anns,\n category_ids=[1],\n )\n return torch.utils.data.DataLoader(\n train_data,\n batch_size=self.batch_size // 2,\n shuffle=(not openpifpaf.plugins.coco.CocoKp.debug\n and openpifpaf.plugins.coco.CocoKp.augmentation),\n pin_memory=openpifpaf.plugins.coco.CocoKp.pin_memory,\n num_workers=self.loader_workers,\n drop_last=True,\n collate_fn=collate.collate_tracking_images_targets_meta,\n )\n\n def val_loader(self):\n val_data = openpifpaf.plugins.coco.CocoDataset(\n image_dir=openpifpaf.plugins.coco.CocoKp.val_image_dir,\n ann_file=openpifpaf.plugins.coco.CocoKp.val_annotations,\n preprocess=self._preprocess(),\n annotation_filter=True,\n min_kp_anns=openpifpaf.plugins.coco.CocoKp.min_kp_anns,\n category_ids=[1],\n )\n return torch.utils.data.DataLoader(\n val_data,\n batch_size=self.batch_size // 2,\n shuffle=False,\n pin_memory=openpifpaf.plugins.coco.CocoKp.pin_memory,\n num_workers=self.loader_workers,\n drop_last=True,\n collate_fn=collate.collate_tracking_images_targets_meta,\n )\n\n def _eval_preprocess(self):\n return openpifpaf.transforms.Compose([\n *openpifpaf.plugins.coco.CocoKp.common_eval_preprocess(),\n openpifpaf.transforms.ToAnnotations([\n openpifpaf.transforms.ToKpAnnotations(\n COCO_CATEGORIES,\n keypoints_by_category={1: self.head_metas[0].keypoints},\n skeleton_by_category={1: self.head_metas[1].skeleton},\n ),\n openpifpaf.transforms.ToCrowdAnnotations(COCO_CATEGORIES),\n ]),\n openpifpaf.transforms.EVAL_TRANSFORM,\n ])\n\n def eval_loader(self):\n eval_data = openpifpaf.plugins.coco.CocoDataset(\n image_dir=openpifpaf.plugins.coco.CocoKp.eval_image_dir,\n ann_file=openpifpaf.plugins.coco.CocoKp.eval_annotations,\n preprocess=self._eval_preprocess(),\n annotation_filter=openpifpaf.plugins.coco.CocoKp.eval_annotation_filter,\n min_kp_anns=(openpifpaf.plugins.coco.CocoKp.min_kp_anns\n if openpifpaf.plugins.coco.CocoKp.eval_annotation_filter\n else 0),\n category_ids=[1] if openpifpaf.plugins.coco.CocoKp.eval_annotation_filter else [],\n )\n return torch.utils.data.DataLoader(\n eval_data,\n batch_size=self.batch_size,\n shuffle=False,\n pin_memory=openpifpaf.plugins.coco.CocoKp.pin_memory,\n num_workers=self.loader_workers,\n drop_last=False,\n collate_fn=openpifpaf.datasets.collate_images_anns_meta,\n )\n\n def metrics(self):\n return [openpifpaf.metric.Coco(\n pycocotools.coco.COCO(openpifpaf.plugins.coco.CocoKp.eval_annotations),\n max_per_image=20,\n category_ids=[1],\n iou_type='keypoints',\n )]\n" ]
[ [ "numpy.all", "numpy.sqrt", "numpy.any" ], [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FunByJohn/QaDiL
[ "9e22bb061c5a2c32473c7ab3aa9b9cce4e98c963" ]
[ "Notes/IMO21/img/Matplotlib/sephp.py" ]
[ "#import numpy as np\nimport matplotlib\n#matplotlib.rcParams['text.usetex'] = True\nimport matplotlib.pyplot as plt\n\nplt.plot([1.35, 1.42, 1.45, 1.52], [35, 50, 40, 45], 'ro')\n\nplt.plot([1.68, 1.70, 1.73, 1.73], [65, 70, 60, 80], 'bo')\n\nplt.axis([1.3, 1.8, 30, 90])\n\nplt.xlabel(\"height (m)\")\n\nplt.ylabel(\"weight (kg)\")\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
timudk/probability
[ "8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103", "8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103", "8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103", "8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103", "8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103", "8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103" ]
[ "tensorflow_probability/python/internal/special_math.py", "tensorflow_probability/python/mcmc/diagnostic.py", "tensorflow_probability/python/bijectors/chain.py", "tensorflow_probability/python/distributions/variational_gaussian_process.py", "tensorflow_probability/python/math/root_search.py", "tensorflow_probability/python/bijectors/blockwise.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Functions \"ndtr\" and \"ndtri\" are derived from calculations made in:\n# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n# In the following email exchange, the author gives his consent to redistribute\n# derived works under an Apache 2.0 license.\n#\n# From: Stephen Moshier <[email protected]>\n# Date: Sat, Jun 9, 2018 at 2:36 PM\n# Subject: Re: Licensing cephes under Apache (BSD-like) license.\n# To: rif <[email protected]>\n#\n#\n#\n# Hello Rif,\n#\n# Yes, Google may distribute Cephes files under the Apache 2 license.\n#\n# If clarification is needed, I do not favor BSD over other free licenses.\n# I would agree that Apache 2 seems to cover the concern you mentioned\n# about sublicensees.\n#\n# Best wishes for good luck with your projects!\n# Steve Moshier\n#\n#\n#\n# On Thu, 31 May 2018, rif wrote:\n#\n# > Hello Steve.\n# > My name is Rif. I work on machine learning software at Google.\n# >\n# > Your cephes software continues to be incredibly useful and widely used. I\n# > was wondering whether it would be permissible for us to use the Cephes code\n# > under the Apache 2.0 license, which is extremely similar in permissions to\n# > the BSD license (Wikipedia comparisons). This would be quite helpful to us\n# > in terms of avoiding multiple licenses on software.\n# >\n# > I'm sorry to bother you with this (I can imagine you're sick of hearing\n# > about this by now), but I want to be absolutely clear we're on the level and\n# > not misusing your important software. In former conversation with Eugene\n# > Brevdo ([email protected]), you wrote \"If your licensing is similar to BSD,\n# > the formal way that has been handled is simply to add a statement to the\n# > effect that you are incorporating the Cephes software by permission of the\n# > author.\" I wanted to confirm that (a) we could use the Apache license, (b)\n# > that we don't need to (and probably you don't want to) keep getting\n# > contacted about individual uses, because your intent is generally to allow\n# > this software to be reused under \"BSD-like\" license, and (c) you're OK\n# > letting incorporators decide whether a license is sufficiently BSD-like?\n# >\n# > Best,\n# >\n# > rif\n# >\n# >\n# >\n\n\"\"\"Special Math Ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import dtype_util\n\n__all__ = [\n \"erfinv\",\n \"ndtr\",\n \"ndtri\",\n \"log_ndtr\",\n \"log_cdf_laplace\",\n]\n\n\n# log_ndtr uses different functions over the ranges\n# (-infty, lower](lower, upper](upper, infty)\n# Lower bound values were chosen by examining where the support of ndtr\n# appears to be zero, relative to scipy's (which is always 64bit). They were\n# then made more conservative just to be safe. (Conservative means use the\n# expansion more than we probably need to.) See `NdtrTest` in\n# special_math_test.py.\nLOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64)\nLOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32)\n\n# Upper bound values were chosen by examining for which values of 'x'\n# Log[cdf(x)] is 0, after which point we need to use the approximation\n# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly\n# conservative, meaning we use the approximation earlier than needed.\nLOGNDTR_FLOAT64_UPPER = np.array(8, np.float64)\nLOGNDTR_FLOAT32_UPPER = np.array(5, np.float32)\n\n\ndef ndtr(x, name=\"ndtr\"):\n \"\"\"Normal distribution function.\n\n Returns the area under the Gaussian probability density function, integrated\n from minus infinity to x:\n\n ```\n 1 / x\n ndtr(x) = ---------- | exp(-0.5 t**2) dt\n sqrt(2 pi) /-inf\n\n = 0.5 (1 + erf(x / sqrt(2)))\n = 0.5 erfc(x / sqrt(2))\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"ndtr\").\n\n Returns:\n ndtr: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]:\n raise TypeError(\n \"x.dtype=%s is not handled, see docstring for supported types.\"\n % x.dtype)\n return _ndtr(x)\n\n\ndef _ndtr(x):\n \"\"\"Implements ndtr core logic.\"\"\"\n half_sqrt_2 = tf.constant(\n 0.5 * np.sqrt(2.), dtype=x.dtype, name=\"half_sqrt_2\")\n w = x * half_sqrt_2\n z = tf.abs(w)\n y = tf1.where(\n tf.less(z, half_sqrt_2), 1. + tf.math.erf(w),\n tf1.where(tf.greater(w, 0.), 2. - tf.math.erfc(z), tf.math.erfc(z)))\n return 0.5 * y\n\n\ndef ndtri(p, name=\"ndtri\"):\n \"\"\"The inverse of the CDF of the Normal distribution function.\n\n Returns x such that the area under the pdf from minus infinity to x is equal\n to p.\n\n A piece-wise rational approximation is done for the function.\n This is a port of the implementation in netlib.\n\n Args:\n p: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"ndtri\").\n\n Returns:\n x: `Tensor` with `dtype=p.dtype`.\n\n Raises:\n TypeError: if `p` is not floating-type.\n \"\"\"\n\n with tf.name_scope(name):\n p = tf.convert_to_tensor(value=p, name=\"p\")\n if dtype_util.as_numpy_dtype(p.dtype) not in [np.float32, np.float64]:\n raise TypeError(\n \"p.dtype=%s is not handled, see docstring for supported types.\"\n % p.dtype)\n return _ndtri(p)\n\n\ndef _ndtri(p):\n \"\"\"Implements ndtri core logic.\"\"\"\n\n # Constants used in piece-wise rational approximations. Taken from the cephes\n # library:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n p0 = list(reversed([-5.99633501014107895267E1,\n 9.80010754185999661536E1,\n -5.66762857469070293439E1,\n 1.39312609387279679503E1,\n -1.23916583867381258016E0]))\n q0 = list(reversed([1.0,\n 1.95448858338141759834E0,\n 4.67627912898881538453E0,\n 8.63602421390890590575E1,\n -2.25462687854119370527E2,\n 2.00260212380060660359E2,\n -8.20372256168333339912E1,\n 1.59056225126211695515E1,\n -1.18331621121330003142E0]))\n p1 = list(reversed([4.05544892305962419923E0,\n 3.15251094599893866154E1,\n 5.71628192246421288162E1,\n 4.40805073893200834700E1,\n 1.46849561928858024014E1,\n 2.18663306850790267539E0,\n -1.40256079171354495875E-1,\n -3.50424626827848203418E-2,\n -8.57456785154685413611E-4]))\n q1 = list(reversed([1.0,\n 1.57799883256466749731E1,\n 4.53907635128879210584E1,\n 4.13172038254672030440E1,\n 1.50425385692907503408E1,\n 2.50464946208309415979E0,\n -1.42182922854787788574E-1,\n -3.80806407691578277194E-2,\n -9.33259480895457427372E-4]))\n p2 = list(reversed([3.23774891776946035970E0,\n 6.91522889068984211695E0,\n 3.93881025292474443415E0,\n 1.33303460815807542389E0,\n 2.01485389549179081538E-1,\n 1.23716634817820021358E-2,\n 3.01581553508235416007E-4,\n 2.65806974686737550832E-6,\n 6.23974539184983293730E-9]))\n q2 = list(reversed([1.0,\n 6.02427039364742014255E0,\n 3.67983563856160859403E0,\n 1.37702099489081330271E0,\n 2.16236993594496635890E-1,\n 1.34204006088543189037E-2,\n 3.28014464682127739104E-4,\n 2.89247864745380683936E-6,\n 6.79019408009981274425E-9]))\n\n def _create_polynomial(var, coeffs):\n \"\"\"Compute n_th order polynomial via Horner's method.\"\"\"\n coeffs = np.array(coeffs, dtype_util.as_numpy_dtype(var.dtype))\n if not coeffs.size:\n return tf.zeros_like(var)\n return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var\n\n maybe_complement_p = tf1.where(p > -np.expm1(-2.), 1. - p, p)\n # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs\n # later on. The result from the computation when p == 0 is not used so any\n # number that doesn't result in NaNs is fine.\n sanitized_mcp = tf1.where(\n maybe_complement_p <= 0.,\n tf.fill(tf.shape(input=p),\n dtype_util.as_numpy_dtype(p.dtype)(0.5)), maybe_complement_p)\n\n # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).\n w = sanitized_mcp - 0.5\n ww = w ** 2\n x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)\n / _create_polynomial(ww, q0))\n x_for_big_p *= -np.sqrt(2. * np.pi)\n\n # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),\n # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different\n # arrays based on whether p < exp(-32).\n z = tf.sqrt(-2. * tf.math.log(sanitized_mcp))\n first_term = z - tf.math.log(z) / z\n second_term_small_p = (\n _create_polynomial(1. / z, p2) /\n _create_polynomial(1. / z, q2) / z)\n second_term_otherwise = (\n _create_polynomial(1. / z, p1) /\n _create_polynomial(1. / z, q1) / z)\n x_for_small_p = first_term - second_term_small_p\n x_otherwise = first_term - second_term_otherwise\n\n x = tf1.where(sanitized_mcp > np.exp(-2.), x_for_big_p,\n tf1.where(z >= 8.0, x_for_small_p, x_otherwise))\n\n x = tf1.where(p > 1. - np.exp(-2.), x, -x)\n infinity_scalar = tf.constant(np.inf, dtype=p.dtype)\n infinity = tf.fill(tf.shape(input=p), infinity_scalar)\n x_nan_replaced = tf1.where(p <= 0.0, -infinity,\n tf1.where(p >= 1.0, infinity, x))\n return x_nan_replaced\n\n\ndef log_ndtr(x, series_order=3, name=\"log_ndtr\"):\n \"\"\"Log Normal distribution function.\n\n For details of the Normal distribution function see `ndtr`.\n\n This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or\n using an asymptotic series. Specifically:\n - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on\n `log(1-x) ~= -x, x << 1`.\n - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique\n and take a log.\n - For `x <= lower_segment`, we use the series approximation of erf to compute\n the log CDF directly.\n\n The `lower_segment` is set based on the precision of the input:\n\n ```\n lower_segment = { -20, x.dtype=float64\n { -10, x.dtype=float32\n upper_segment = { 8, x.dtype=float64\n { 5, x.dtype=float32\n ```\n\n When `x < lower_segment`, the `ndtr` asymptotic series approximation is:\n\n ```\n ndtr(x) = scale * (1 + sum) + R_N\n scale = exp(-0.5 x**2) / (-x sqrt(2 pi))\n sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}\n R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})\n ```\n\n where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a\n [double-factorial](https://en.wikipedia.org/wiki/Double_factorial).\n\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n series_order: Positive Python `integer`. Maximum depth to\n evaluate the asymptotic expansion. This is the `N` above.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\n Returns:\n log_ndtr: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n TypeError: if `series_order` is a not Python `integer.`\n ValueError: if `series_order` is not in `[0, 30]`.\n \"\"\"\n if not isinstance(series_order, int):\n raise TypeError(\"series_order must be a Python integer.\")\n if series_order < 0:\n raise ValueError(\"series_order must be non-negative.\")\n if series_order > 30:\n raise ValueError(\"series_order must be <= 30.\")\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n\n if dtype_util.base_equal(x.dtype, tf.float64):\n lower_segment = LOGNDTR_FLOAT64_LOWER\n upper_segment = LOGNDTR_FLOAT64_UPPER\n elif dtype_util.base_equal(x.dtype, tf.float32):\n lower_segment = LOGNDTR_FLOAT32_LOWER\n upper_segment = LOGNDTR_FLOAT32_UPPER\n else:\n raise TypeError(\"x.dtype=%s is not supported.\" % x.dtype)\n\n # The basic idea here was ported from:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n # We copy the main idea, with a few changes\n # * For x >> 1, and X ~ Normal(0, 1),\n # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],\n # which extends the range of validity of this function.\n # * We use one fixed series_order for all of 'x', rather than adaptive.\n # * Our docstring properly reflects that this is an asymptotic series, not a\n # Taylor series. We also provided a correct bound on the remainder.\n # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when\n # x=0. This happens even though the branch is unchosen because when x=0\n # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan\n # regardless of whether dy is finite. Note that the minimum is a NOP if\n # the branch is chosen.\n return tf1.where(\n tf.greater(x, upper_segment),\n -_ndtr(-x), # log(1-x) ~= -x, x << 1\n tf1.where(\n tf.greater(x, lower_segment),\n tf.math.log(_ndtr(tf.maximum(x, lower_segment))),\n _log_ndtr_lower(tf.minimum(x, lower_segment), series_order)))\n\n\ndef _log_ndtr_lower(x, series_order):\n \"\"\"Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.\"\"\"\n x_2 = tf.square(x)\n # Log of the term multiplying (1 + sum)\n log_scale = -0.5 * x_2 - tf.math.log(-x) - 0.5 * np.log(2. * np.pi)\n return log_scale + tf.math.log(_log_ndtr_asymptotic_series(x, series_order))\n\n\ndef _log_ndtr_asymptotic_series(x, series_order):\n \"\"\"Calculates the asymptotic series used in log_ndtr.\"\"\"\n npdt = dtype_util.as_numpy_dtype(x.dtype)\n if series_order <= 0:\n return npdt(1)\n x_2 = tf.square(x)\n even_sum = tf.zeros_like(x)\n odd_sum = tf.zeros_like(x)\n x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.\n for n in range(1, series_order + 1):\n y = npdt(_double_factorial(2 * n - 1)) / x_2n\n if n % 2:\n odd_sum += y\n else:\n even_sum += y\n x_2n *= x_2\n return 1. + even_sum - odd_sum\n\n\ndef erfinv(x, name=\"erfinv\"):\n \"\"\"The inverse function for erf, the error function.\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"erfinv\").\n\n Returns:\n x: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]:\n raise TypeError(\"x.dtype={} is not handled, see docstring for supported \"\n \"types.\".format(dtype_util.name(x.dtype)))\n return ndtri((x + 1.) / 2.) / np.sqrt(2.)\n\n\ndef _double_factorial(n):\n \"\"\"The double factorial function for small Python integer `n`.\"\"\"\n return np.prod(np.arange(n, 1, -2))\n\n\ndef log_cdf_laplace(x, name=\"log_cdf_laplace\"):\n \"\"\"Log Laplace distribution function.\n\n This function calculates `Log[L(x)]`, where `L(x)` is the cumulative\n distribution function of the Laplace distribution, i.e.\n\n ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```\n\n For numerical accuracy, `L(x)` is computed in different ways depending on `x`,\n\n ```\n x <= 0:\n Log[L(x)] = Log[0.5] + x, which is exact\n\n 0 < x:\n Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\n Returns:\n `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n \"\"\"\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n\n # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.\n lower_solution = -np.log(2.) + x\n\n # safe_exp_neg_x = exp{-x} for x > 0, but is\n # bounded above by 1, which avoids\n # log[1 - 1] = -inf for x = log(1/2), AND\n # exp{-x} --> inf, for x << -1\n safe_exp_neg_x = tf.exp(-tf.abs(x))\n\n # log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used\n # internally by log1p, rather than being done explicitly here.\n upper_solution = tf.math.log1p(-0.5 * safe_exp_neg_x)\n\n return tf1.where(x < 0., lower_solution, upper_solution)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utilities for Markov Chain Monte Carlo (MCMC) sampling.\n\n@@effective_sample_size\n@@potential_scale_reduction\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import stats\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import prefer_static\n\n__all__ = [\n 'effective_sample_size',\n 'potential_scale_reduction',\n]\n\n\ndef effective_sample_size(states,\n filter_threshold=0.,\n filter_beyond_lag=None,\n filter_beyond_positive_pairs=False,\n name=None):\n \"\"\"Estimate a lower bound on effective sample size for each independent chain.\n\n Roughly speaking, \"effective sample size\" (ESS) is the size of an iid sample\n with the same variance as `state`.\n\n More precisely, given a stationary sequence of possibly correlated random\n variables `X_1, X_2,...,X_N`, each identically distributed ESS is the number\n such that\n\n ```Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.```\n\n If the sequence is uncorrelated, `ESS = N`. If the sequence is positively\n auto-correlated, `ESS` will be less than `N`. If there are negative\n correlations, then `ESS` can exceed `N`.\n\n Args:\n states: `Tensor` or list of `Tensor` objects. Dimension zero should index\n identically distributed states.\n filter_threshold: `Tensor` or list of `Tensor` objects.\n Must broadcast with `state`. The auto-correlation sequence is truncated\n after the first appearance of a term less than `filter_threshold`.\n Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,\n setting to any number less than `-1` has the same effect. Ignored if\n `filter_beyond_positive_pairs` is `True`.\n filter_beyond_lag: `Tensor` or list of `Tensor` objects. Must be\n `int`-like and scalar valued. The auto-correlation sequence is truncated\n to this length. Setting to `None` means we do not filter based on number\n of lags.\n filter_beyond_positive_pairs: Python boolean. If `True`, only consider the\n initial auto-correlation sequence where the pairwise sums are positive.\n name: `String` name to prepend to created ops.\n\n Returns:\n ess: `Tensor` or list of `Tensor` objects. The effective sample size of\n each component of `states`. Shape will be `states.shape[1:]`.\n\n Raises:\n ValueError: If `states` and `filter_threshold` or `states` and\n `filter_beyond_lag` are both lists with different lengths.\n\n #### Examples\n\n We use ESS to estimate standard error.\n\n ```\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])\n\n # Get 1000 states from one chain.\n states = tfp.mcmc.sample_chain(\n num_burnin_steps=200,\n num_results=1000,\n current_state=tf.constant([0., 0.]),\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target.log_prob,\n step_size=0.05,\n num_leapfrog_steps=20))\n states.shape\n ==> (1000, 2)\n\n ess = effective_sample_size(states, filter_beyond_positive_pairs=True)\n ==> Shape (2,) Tensor\n\n mean, variance = tf.nn.moments(states, axis=0)\n standard_error = tf.sqrt(variance / ess)\n ```\n\n Some math shows that, with `R_k` the auto-correlation sequence,\n `R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have\n\n ```ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]```\n\n This function estimates the above by first estimating the auto-correlation.\n Since `R_k` must be estimated using only `N - k` samples, it becomes\n progressively noisier for larger `k`. For this reason, the summation over\n `R_k` should be truncated at some number `filter_beyond_lag < N`. This\n function provides two methods to perform this truncation.\n\n * `filter_threshold` -- since many MCMC methods generate chains where `R_k >\n 0`, a reasonable criteria is to truncate at the first index where the\n estimated auto-correlation becomes negative. This method does not estimate\n the `ESS` of super-efficient chains (where `ESS > N`) correctly.\n\n * `filter_beyond_positive_pairs` -- reversible MCMC chains produce\n auto-correlation sequence with the property that pairwise sums of the\n elements of that sequence are positive [1] (i.e. `R_{2k} + R_{2k + 1} > 0`\n for `k in {0, ..., N/2}`). Deviations are only possible due to noise. This\n method truncates the auto-correlation sequence where the pairwise sums\n become non-positive.\n\n The arguments `filter_beyond_lag`, `filter_threshold` and\n `filter_beyond_positive_pairs` are filters intended to remove noisy tail terms\n from `R_k`. You can combine `filter_beyond_lag` with `filter_threshold` or\n `filter_beyond_positive_pairs. E.g. combining `filter_beyond_lag` and\n `filter_beyond_positive_pairs` means that terms are removed if they were to be\n filtered under the `filter_beyond_lag` OR `filter_beyond_positive_pairs`\n criteria.\n\n #### References\n\n [1]: Geyer, C. J. Practical Markov chain Monte Carlo (with discussion).\n Statistical Science, 7:473-511, 1992.\n \"\"\"\n states_was_list = _is_list_like(states)\n\n # Convert all args to lists.\n if not states_was_list:\n states = [states]\n\n filter_beyond_lag = _broadcast_maybelist_arg(states, filter_beyond_lag,\n 'filter_beyond_lag')\n filter_threshold = _broadcast_maybelist_arg(states, filter_threshold,\n 'filter_threshold')\n filter_beyond_positive_pairs = _broadcast_maybelist_arg(\n states, filter_beyond_positive_pairs, 'filter_beyond_positive_pairs')\n\n # Process items, one at a time.\n with tf.name_scope('effective_sample_size' if name is None else name):\n ess_list = [\n _effective_sample_size_single_state(s, fbl, ft, fbpp) # pylint: disable=g-complex-comprehension\n for (s, fbl, ft,\n fbpp) in zip(states, filter_beyond_lag, filter_threshold,\n filter_beyond_positive_pairs)\n ]\n\n if states_was_list:\n return ess_list\n return ess_list[0]\n\n\ndef _effective_sample_size_single_state(states, filter_beyond_lag,\n filter_threshold,\n filter_beyond_positive_pairs):\n \"\"\"ESS computation for one single Tensor argument.\"\"\"\n\n with tf.name_scope('effective_sample_size_single_state'):\n\n states = tf.convert_to_tensor(value=states, name='states')\n dt = states.dtype\n\n # filter_beyond_lag == None ==> auto_corr is the full sequence.\n auto_corr = stats.auto_correlation(\n states, axis=0, max_lags=filter_beyond_lag)\n\n # With R[k] := auto_corr[k, ...],\n # ESS = N / {1 + 2 * Sum_{k=1}^N (N - k) / N * R[k]}\n # = N / {-1 + 2 * Sum_{k=0}^N (N - k) / N * R[k]} (since R[0] = 1)\n # approx N / {-1 + 2 * Sum_{k=0}^M (N - k) / N * R[k]}\n # where M is the filter_beyond_lag truncation point chosen above.\n\n # Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total\n # ndims the same as auto_corr\n n = _axis_size(states, axis=0)\n k = tf.range(0., _axis_size(auto_corr, axis=0))\n nk_factor = (n - k) / n\n if auto_corr.shape.ndims is not None:\n new_shape = [-1] + [1] * (auto_corr.shape.ndims - 1)\n else:\n new_shape = tf.concat(\n ([-1],\n tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)),\n axis=0)\n nk_factor = tf.reshape(nk_factor, new_shape)\n weighted_auto_corr = nk_factor * auto_corr\n\n if filter_beyond_positive_pairs:\n def _sum_pairs(x):\n x_len = tf.shape(x)[0]\n # For odd sequences, we drop the final value.\n x = x[:x_len - x_len % 2]\n new_shape = tf.concat([[x_len // 2, 2], tf.shape(x)[1:]], axis=0)\n return tf.reduce_sum(tf.reshape(x, new_shape), 1)\n\n # Pairwise sums are all positive for auto-correlation spectra derived from\n # reversible MCMC chains.\n # E.g. imagine the pairwise sums are [0.2, 0.1, -0.1, -0.2]\n # Step 1: mask = [False, False, True, False]\n mask = _sum_pairs(auto_corr) < 0.\n # Step 2: mask = [0, 0, 1, 1]\n mask = tf.cast(mask, dt)\n # Step 3: mask = [0, 0, 1, 2]\n mask = tf.cumsum(mask, axis=0)\n # Step 4: mask = [1, 1, 0, 0]\n mask = tf.maximum(1. - mask, 0.)\n\n # N.B. this reduces the length of weighted_auto_corr by a factor of 2.\n # It still works fine in the formula below.\n weighted_auto_corr = _sum_pairs(weighted_auto_corr) * mask\n elif filter_threshold is not None:\n filter_threshold = tf.convert_to_tensor(\n value=filter_threshold, dtype=dt, name='filter_threshold')\n # Get a binary mask to zero out values of auto_corr below the threshold.\n # mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,\n # mask[i, ...] = 0, otherwise.\n # So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]\n # Building step by step,\n # Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.\n # Step 1: mask = [False, False, True, False]\n mask = auto_corr < filter_threshold\n # Step 2: mask = [0, 0, 1, 1]\n mask = tf.cast(mask, dtype=dt)\n # Step 3: mask = [0, 0, 1, 2]\n mask = tf.cumsum(mask, axis=0)\n # Step 4: mask = [1, 1, 0, 0]\n mask = tf.maximum(1. - mask, 0.)\n weighted_auto_corr *= mask\n\n return n / (-1 +\n 2 * tf.reduce_sum(input_tensor=weighted_auto_corr, axis=0))\n\n\ndef potential_scale_reduction(chains_states,\n independent_chain_ndims=1,\n split_chains=False,\n validate_args=False,\n name=None):\n \"\"\"Gelman and Rubin (1992)'s potential scale reduction for chain convergence.\n\n Given `N > 1` states from each of `C > 1` independent chains, the potential\n scale reduction factor, commonly referred to as R-hat, measures convergence of\n the chains (to the same target) by testing for equality of means.\n Specifically, R-hat measures the degree to which variance (of the means)\n between chains exceeds what one would expect if the chains were identically\n distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2].\n\n Some guidelines:\n\n * The initial state of the chains should be drawn from a distribution\n overdispersed with respect to the target.\n * If all chains converge to the target, then as `N --> infinity`, R-hat --> 1.\n Before that, R-hat > 1 (except in pathological cases, e.g. if the chain\n paths were identical).\n * The above holds for any number of chains `C > 1`. Increasing `C` does\n improve effectiveness of the diagnostic.\n * Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of\n course this is problem-dependent. See [Brooks and Gelman (1998)][2].\n * R-hat only measures non-convergence of the mean. If higher moments, or\n other statistics are desired, a different diagnostic should be used. See\n [Brooks and Gelman (1998)][2].\n\n Args:\n chains_states: `Tensor` or Python `list` of `Tensor`s representing the\n states of a Markov Chain at each result step. The `ith` state is\n assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`.\n Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain.\n Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent\n chains to be tested for convergence to the same target.\n The remaining dimensions, `A`, can have any shape (even empty).\n independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the\n number of dimensions, from `dim = 1` to `dim = D`, holding independent\n chain results to be tested for convergence.\n split_chains: Python `bool`. If `True`, divide samples from each chain into\n first and second halves, treating these as separate chains. This makes\n R-hat more robust to non-stationary chains, and is recommended in [3].\n validate_args: Whether to add runtime checks of argument validity. If False,\n and arguments are incorrect, correct behavior is not guaranteed.\n name: `String` name to prepend to created tf. Default:\n `potential_scale_reduction`.\n\n Returns:\n `Tensor` or Python `list` of `Tensor`s representing the R-hat statistic for\n the state(s). Same `dtype` as `state`, and shape equal to\n `state.shape[1 + independent_chain_ndims:]`.\n\n Raises:\n ValueError: If `independent_chain_ndims < 1`.\n\n #### Examples\n\n Diagnosing convergence by monitoring 10 chains that each attempt to\n sample from a 2-variate normal.\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])\n\n # Get 10 (2x) overdispersed initial states.\n initial_state = target.sample(10) * 2.\n ==> (10, 2)\n\n # Get 1000 samples from the 10 independent chains.\n chains_states, _ = tfp.mcmc.sample_chain(\n num_burnin_steps=200,\n num_results=1000,\n current_state=initial_state,\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target.log_prob,\n step_size=0.05,\n num_leapfrog_steps=20))\n chains_states.shape\n ==> (1000, 10, 2)\n\n rhat = tfp.mcmc.diagnostic.potential_scale_reduction(\n chains_states, independent_chain_ndims=1)\n\n # The second dimension needed a longer burn-in.\n rhat.eval()\n ==> [1.05, 1.3]\n ```\n\n To see why R-hat is reasonable, let `X` be a random variable drawn uniformly\n from the combined states (combined over all chains). Then, in the limit\n `N, C --> infinity`, with `E`, `Var` denoting expectation and variance,\n\n ```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].```\n\n Using the law of total variance, the numerator is the variance of the combined\n states, and the denominator is the total variance minus the variance of the\n the individual chain means. If the chains are all drawing from the same\n distribution, they will have the same mean, and thus the ratio should be one.\n\n #### References\n\n [1]: Stephen P. Brooks and Andrew Gelman. General Methods for Monitoring\n Convergence of Iterative Simulations. _Journal of Computational and\n Graphical Statistics_, 7(4), 1998.\n\n [2]: Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation\n Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992.\n [3]: Vehtari et al. Rank-normalization, folding, and localization: An\n improved Rhat for assessing convergence of MCMC.\n \"\"\"\n chains_states_was_list = _is_list_like(chains_states)\n if not chains_states_was_list:\n chains_states = [chains_states]\n\n # tf.get_static_value returns None iff a constant value (as a numpy\n # array) is not efficiently computable. Therefore, we try constant_value then\n # check for None.\n icn_const_ = tf.get_static_value(\n tf.convert_to_tensor(value=independent_chain_ndims))\n if icn_const_ is not None:\n independent_chain_ndims = icn_const_\n if icn_const_ < 1:\n raise ValueError(\n 'Argument `independent_chain_ndims` must be `>= 1`, found: {}'.format(\n independent_chain_ndims))\n\n with tf.name_scope('potential_scale_reduction' if name is None else name):\n rhat_list = [\n _potential_scale_reduction_single_state(s, independent_chain_ndims,\n split_chains, validate_args)\n for s in chains_states\n ]\n\n if chains_states_was_list:\n return rhat_list\n return rhat_list[0]\n\n\ndef _potential_scale_reduction_single_state(state, independent_chain_ndims,\n split_chains, validate_args):\n \"\"\"potential_scale_reduction for one single state `Tensor`.\"\"\"\n with tf.name_scope('potential_scale_reduction_single_state'):\n # We assume exactly one leading dimension indexes e.g. correlated samples\n # from each Markov chain.\n state = tf.convert_to_tensor(value=state, name='state')\n\n n_samples_ = tf.compat.dimension_value(state.shape[0])\n if n_samples_ is not None: # If available statically.\n if split_chains and n_samples_ < 4:\n raise ValueError(\n 'Must provide at least 4 samples when splitting chains. '\n 'Found {}'.format(n_samples_))\n if not split_chains and n_samples_ < 2:\n raise ValueError(\n 'Must provide at least 2 samples. Found {}'.format(n_samples_))\n elif validate_args:\n if split_chains:\n state = distribution_util.with_dependencies([\n tf1.assert_greater(\n tf.shape(state)[0], 4,\n message='Must provide at least 4 samples when splitting chains.'\n )], state)\n else:\n state = distribution_util.with_dependencies([\n tf1.assert_greater(\n tf.shape(state)[0], 2,\n message='Must provide at least 2 samples.')], state)\n\n # Define so it's not a magic number.\n # Warning! `if split_chains` logic assumes this is 1!\n sample_ndims = 1\n\n if split_chains:\n # Split the sample dimension in half, doubling the number of\n # independent chains.\n\n # For odd number of samples, keep all but the last sample.\n state_shape = prefer_static.shape(state)\n n_samples = state_shape[0]\n state = state[:n_samples - n_samples % 2]\n\n # Suppose state = [0, 1, 2, 3, 4, 5]\n # Step 1: reshape into [[0, 1, 2], [3, 4, 5]]\n # E.g. reshape states of shape [a, b] into [2, a//2, b].\n state = tf.reshape(\n state,\n prefer_static.concat([[2, n_samples // 2], state_shape[1:]], axis=0)\n )\n # Step 2: Put the size `2` dimension in the right place to be treated as a\n # chain, changing [[0, 1, 2], [3, 4, 5]] into [[0, 3], [1, 4], [2, 5]],\n # reshaping [2, a//2, b] into [a//2, 2, b].\n state = tf.transpose(\n a=state,\n perm=prefer_static.concat(\n [[1, 0], tf.range(2, tf.rank(state))], axis=0))\n\n # We're treating the new dim as indexing 2 chains, so increment.\n independent_chain_ndims += 1\n\n sample_axis = tf.range(0, sample_ndims)\n chain_axis = tf.range(sample_ndims,\n sample_ndims + independent_chain_ndims)\n sample_and_chain_axis = tf.range(\n 0, sample_ndims + independent_chain_ndims)\n\n n = _axis_size(state, sample_axis)\n m = _axis_size(state, chain_axis)\n\n # In the language of Brooks and Gelman (1998),\n # B / n is the between chain variance, the variance of the chain means.\n # W is the within sequence variance, the mean of the chain variances.\n b_div_n = _reduce_variance(\n tf.reduce_mean(input_tensor=state, axis=sample_axis, keepdims=True),\n sample_and_chain_axis,\n biased=False)\n w = tf.reduce_mean(\n input_tensor=_reduce_variance(\n state, sample_axis, keepdims=True, biased=True),\n axis=sample_and_chain_axis)\n\n # sigma^2_+ is an estimate of the true variance, which would be unbiased if\n # each chain was drawn from the target. c.f. \"law of total variance.\"\n sigma_2_plus = w + b_div_n\n\n return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)\n\n\n# TODO(b/72873233) Move some variant of this to tfd.sample_stats.\ndef _reduce_variance(x, axis=None, biased=True, keepdims=False):\n with tf.name_scope('reduce_variance'):\n x = tf.convert_to_tensor(value=x, name='x')\n mean = tf.reduce_mean(input_tensor=x, axis=axis, keepdims=True)\n biased_var = tf.reduce_mean(\n input_tensor=tf.math.squared_difference(x, mean),\n axis=axis,\n keepdims=keepdims)\n if biased:\n return biased_var\n n = _axis_size(x, axis)\n return (n / (n - 1.)) * biased_var\n\n\ndef _axis_size(x, axis=None):\n \"\"\"Get number of elements of `x` in `axis`, as type `x.dtype`.\"\"\"\n if axis is None:\n return tf.cast(tf.size(input=x), x.dtype)\n return tf.cast(\n tf.reduce_prod(input_tensor=tf.gather(tf.shape(input=x), axis)), x.dtype)\n\n\ndef _is_list_like(x):\n \"\"\"Helper which returns `True` if input is `list`-like.\"\"\"\n return isinstance(x, (tuple, list))\n\n\ndef _broadcast_maybelist_arg(states, secondary_arg, name):\n \"\"\"Broadcast a listable secondary_arg to that of states.\"\"\"\n if _is_list_like(secondary_arg):\n if len(secondary_arg) != len(states):\n raise ValueError('Argument `%s` was a list of different length ({}) than '\n '`states` ({})'.format(name, len(states)))\n else:\n secondary_arg = [secondary_arg] * len(states)\n\n return secondary_arg\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Chain bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\n__all__ = [\n \"Chain\",\n]\n\n\ndef _use_static_shape(input_tensor, ndims):\n return (tensorshape_util.is_fully_defined(input_tensor.shape) and\n isinstance(ndims, int))\n\n\ndef _compute_min_event_ndims(bijector_list, compute_forward=True):\n \"\"\"Computes the min_event_ndims associated with the give list of bijectors.\n\n Given a list `bijector_list` of bijectors, compute the min_event_ndims that is\n associated with the composition of bijectors in that list.\n\n min_event_ndims is the # of right most dimensions for which the bijector has\n done necessary computation on (i.e. the non-broadcastable part of the\n computation).\n\n We can derive the min_event_ndims for a chain of bijectors as follows:\n\n In the case where there are no rank changing bijectors, this will simply be\n `max(b.forward_min_event_ndims for b in bijector_list)`. This is because the\n bijector with the most forward_min_event_ndims requires the most dimensions,\n and hence the chain also requires operating on those dimensions.\n\n However in the case of rank changing, more care is needed in determining the\n exact amount of dimensions. Padding dimensions causes subsequent bijectors to\n operate on the padded dimensions, and Removing dimensions causes bijectors to\n operate more left.\n\n Args:\n bijector_list: List of bijectors to be composed by chain.\n compute_forward: Boolean. If True, computes the min_event_ndims associated\n with a forward call to Chain, and otherwise computes the min_event_ndims\n associated with an inverse call to Chain. The latter is the same as the\n min_event_ndims associated with a forward call to Invert(Chain(....)).\n\n Returns:\n min_event_ndims\n \"\"\"\n min_event_ndims = 0\n # This is a mouthful, but what this encapsulates is that if not for rank\n # changing bijectors, we'd only need to compute the largest of the min\n # required ndims. Hence \"max_min\". Due to rank changing bijectors, we need to\n # account for synthetic rank growth / synthetic rank decrease from a rank\n # changing bijector.\n rank_changed_adjusted_max_min_event_ndims = 0\n\n if compute_forward:\n bijector_list = reversed(bijector_list)\n\n for b in bijector_list:\n if compute_forward:\n current_min_event_ndims = b.forward_min_event_ndims\n current_inverse_min_event_ndims = b.inverse_min_event_ndims\n else:\n current_min_event_ndims = b.inverse_min_event_ndims\n current_inverse_min_event_ndims = b.forward_min_event_ndims\n\n # New dimensions were touched.\n if rank_changed_adjusted_max_min_event_ndims < current_min_event_ndims:\n min_event_ndims += (\n current_min_event_ndims - rank_changed_adjusted_max_min_event_ndims)\n rank_changed_adjusted_max_min_event_ndims = max(\n current_min_event_ndims, rank_changed_adjusted_max_min_event_ndims)\n\n # If the number of dimensions has increased via forward, then\n # inverse_min_event_ndims > forward_min_event_ndims, and hence the\n # dimensions we computed on, have moved left (so we have operated\n # on additional dimensions).\n # Conversely, if the number of dimensions has decreased via forward,\n # then we have inverse_min_event_ndims < forward_min_event_ndims,\n # and so we will have operated on fewer right most dimensions.\n\n number_of_changed_dimensions = (\n current_min_event_ndims - current_inverse_min_event_ndims)\n rank_changed_adjusted_max_min_event_ndims -= number_of_changed_dimensions\n return min_event_ndims\n\n\nclass Chain(bijector.Bijector):\n \"\"\"Bijector which applies a sequence of bijectors.\n\n Example Use:\n\n ```python\n chain = Chain([Exp(), Softplus()], name=\"one_plus_exp\")\n ```\n\n Results in:\n\n * Forward:\n\n ```python\n exp = Exp()\n softplus = Softplus()\n Chain([exp, softplus]).forward(x)\n = exp.forward(softplus.forward(x))\n = tf.exp(tf.log(1. + tf.exp(x)))\n = 1. + tf.exp(x)\n ```\n\n * Inverse:\n\n ```python\n exp = Exp()\n softplus = Softplus()\n Chain([exp, softplus]).inverse(y)\n = softplus.inverse(exp.inverse(y))\n = tf.log(tf.exp(tf.log(y)) - 1.)\n = tf.log(y - 1.)\n ```\n\n \"\"\"\n\n def __init__(self, bijectors=None, validate_args=False, name=None):\n \"\"\"Instantiates `Chain` bijector.\n\n Args:\n bijectors: Python `list` of bijector instances. An empty list makes this\n bijector equivalent to the `Identity` bijector.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str`, name given to ops managed by this object. Default:\n E.g., `Chain([Exp(), Softplus()]).name == \"chain_of_exp_of_softplus\"`.\n\n Raises:\n ValueError: if bijectors have different dtypes.\n \"\"\"\n if name is None:\n name = (\"identity\" if not bijectors else\n \"_of_\".join([\"chain\"] + [b.name for b in bijectors]))\n name = name.replace(\"/\", \"\")\n with tf.name_scope(name) as name:\n if bijectors is None:\n bijectors = ()\n self._bijectors = bijectors\n\n for a_bijector in bijectors:\n if not a_bijector._is_injective: # pylint: disable=protected-access\n raise NotImplementedError(\n \"Invert is not implemented for non-injective bijector \"\n \"({})\".format(a_bijector.name))\n\n dtype = list(set([b.dtype for b in bijectors if b.dtype is not None]))\n if len(dtype) > 1:\n raise ValueError(\"incompatible dtypes: %s\" % dtype)\n elif len(dtype) == 1:\n dtype = dtype[0]\n else:\n dtype = None\n\n inverse_min_event_ndims = _compute_min_event_ndims(\n bijectors, compute_forward=False)\n forward_min_event_ndims = _compute_min_event_ndims(\n bijectors, compute_forward=True)\n\n super(Chain, self).__init__(\n graph_parents=list(itertools.chain.from_iterable(\n b.graph_parents for b in bijectors)),\n forward_min_event_ndims=forward_min_event_ndims,\n inverse_min_event_ndims=inverse_min_event_ndims,\n is_constant_jacobian=all(b.is_constant_jacobian for b in bijectors),\n validate_args=validate_args,\n dtype=dtype,\n name=name)\n\n @property\n def bijectors(self):\n return self._bijectors\n\n def _shape_helper(self, func_name, input_shape, reverse):\n new_shape = input_shape\n for b in reversed(self.bijectors) if reverse else self.bijectors:\n func = getattr(b, func_name, None)\n if func is None:\n raise ValueError(\"unable to call %s on bijector %s (%s)\" %\n (func_name, b.name, func))\n new_shape = func(new_shape)\n return new_shape\n\n def _forward_event_shape(self, input_shape):\n return self._shape_helper(\"forward_event_shape\", input_shape,\n reverse=True)\n\n def _forward_event_shape_tensor(self, input_shape):\n return self._shape_helper(\n \"forward_event_shape_tensor\", input_shape, reverse=True)\n\n def _inverse_event_shape(self, output_shape):\n return self._shape_helper(\"inverse_event_shape\", output_shape,\n reverse=False)\n\n def _inverse_event_shape_tensor(self, output_shape):\n return self._shape_helper(\"inverse_event_shape_tensor\", output_shape,\n reverse=False)\n\n def _inverse(self, y, **kwargs):\n for b in self.bijectors:\n y = b.inverse(y, **kwargs.get(b.name, {}))\n return y\n\n def _inverse_log_det_jacobian(self, y, **kwargs):\n y = tf.convert_to_tensor(value=y, name=\"y\")\n ildj = tf.cast(0., dtype=dtype_util.base_dtype(y.dtype))\n\n if not self.bijectors:\n return ildj\n\n event_ndims = self._maybe_get_static_event_ndims(\n self.inverse_min_event_ndims)\n\n if _use_static_shape(y, event_ndims):\n event_shape = y.shape[tensorshape_util.rank(y.shape) - event_ndims:]\n else:\n event_shape = tf.shape(input=y)[tf.rank(y) - event_ndims:]\n\n # TODO(b/129973548): Document and simplify.\n for b in self.bijectors:\n ildj = ildj + b.inverse_log_det_jacobian(\n y, event_ndims=event_ndims, **kwargs.get(b.name, {}))\n\n if _use_static_shape(y, event_ndims):\n event_shape = b.inverse_event_shape(event_shape)\n event_ndims = self._maybe_get_static_event_ndims(\n tensorshape_util.rank(event_shape))\n else:\n event_shape = b.inverse_event_shape_tensor(event_shape)\n event_shape_ = distribution_util.maybe_get_static_value(event_shape)\n event_ndims = tf.size(input=event_shape)\n event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)\n\n if event_ndims_ is not None and event_shape_ is not None:\n event_ndims = event_ndims_\n event_shape = event_shape_\n\n y = b.inverse(y, **kwargs.get(b.name, {}))\n return ildj\n\n def _forward(self, x, **kwargs):\n for b in reversed(self.bijectors):\n x = b.forward(x, **kwargs.get(b.name, {}))\n return x\n\n def _forward_log_det_jacobian(self, x, **kwargs):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n\n fldj = tf.cast(0., dtype=dtype_util.base_dtype(x.dtype))\n\n if not self.bijectors:\n return fldj\n\n event_ndims = self._maybe_get_static_event_ndims(\n self.forward_min_event_ndims)\n\n if _use_static_shape(x, event_ndims):\n event_shape = x.shape[tensorshape_util.rank(x.shape) - event_ndims:]\n else:\n event_shape = tf.shape(input=x)[tf.rank(x) - event_ndims:]\n\n # TODO(b/129973548): Document and simplify.\n for b in reversed(self.bijectors):\n fldj = fldj + b.forward_log_det_jacobian(\n x, event_ndims=event_ndims, **kwargs.get(b.name, {}))\n if _use_static_shape(x, event_ndims):\n event_shape = b.forward_event_shape(event_shape)\n event_ndims = self._maybe_get_static_event_ndims(\n tensorshape_util.rank(event_shape))\n else:\n event_shape = b.forward_event_shape_tensor(event_shape)\n event_shape_ = distribution_util.maybe_get_static_value(event_shape)\n event_ndims = tf.size(input=event_shape)\n event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)\n\n if event_ndims_ is not None and event_shape_ is not None:\n event_ndims = event_ndims_\n event_shape = event_shape_\n\n x = b.forward(x, **kwargs.get(b.name, {}))\n\n return fldj\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The VariationalGaussianProcess distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import gaussian_process\nfrom tensorflow_probability.python.distributions import independent\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.distributions import mvn_linear_operator\nfrom tensorflow_probability.python.distributions import normal\nfrom tensorflow_probability.python.internal import dtype_util\n\n__all__ = [\n 'VariationalGaussianProcess',\n]\n\n\ndef _add_diagonal_shift(matrix, shift):\n return tf.linalg.set_diag(\n matrix, tf.linalg.diag_part(matrix) + shift, name='add_diagonal_shift')\n\n\ndef _solve_cholesky_factored_system(\n cholesky_factor, rhs, name=None):\n with tf.name_scope(\n name or '_solve_cholesky_factored_system') as scope:\n cholesky_factor = tf.convert_to_tensor(value=cholesky_factor,\n name='cholesky_factor')\n rhs = tf.convert_to_tensor(value=rhs, name='rhs')\n lin_op = tf.linalg.LinearOperatorLowerTriangular(\n cholesky_factor, name=scope)\n return lin_op.solve(lin_op.solve(rhs), adjoint=True)\n\n\ndef _solve_cholesky_factored_system_vec(cholesky_factor, rhs, name=None):\n with tf.name_scope(\n name or '_solve_cholesky_factored_system') as scope:\n cholesky_factor = tf.convert_to_tensor(\n value=cholesky_factor, name='cholesky_factor')\n rhs = tf.convert_to_tensor(value=rhs, name='rhs')\n lin_op = tf.linalg.LinearOperatorLowerTriangular(\n cholesky_factor, name=scope)\n return lin_op.solvevec(lin_op.solvevec(rhs), adjoint=True)\n\n\nclass VariationalGaussianProcess(\n mvn_linear_operator.MultivariateNormalLinearOperator):\n \"\"\"Posterior predictive of a variational Gaussian process.\n\n This distribution implements the variational Gaussian process (VGP), as\n described in [Titsias, 2009][1] and [Hensman, 2013][2]. The VGP is an\n inducing point-based approximation of an exact GP posterior\n (see Mathematical Details, below). Ultimately, this Distribution class\n represents a marginal distrbution over function values at a\n collection of `index_points`. It is parameterized by\n\n - a kernel function,\n - a mean function,\n - the (scalar) observation noise variance of the normal likelihood,\n - a set of index points,\n - a set of inducing index points, and\n - the parameters of the (full-rank, Gaussian) variational posterior\n distribution over function values at the inducing points, conditional on\n some observations.\n\n A VGP is \"trained\" by selecting any kernel parameters, the locations of the\n inducing index points, and the variational parameters. [Titsias, 2009][1] and\n [Hensman, 2013][2] describe a variational lower bound on the marginal log\n likelihood of observed data, which this class offers through the\n `variational_loss` method (this is the negative lower bound, for convenience\n when plugging into a TF Optimizer's `minimize` function).\n Training may be done in minibatches.\n\n [Titsias, 2009][1] describes a closed form for the optimal variational\n parameters, in the case of sufficiently small observational data (ie,\n small enough to fit in memory but big enough to warrant approximating the GP\n posterior). A method to compute these optimal parameters in terms of the full\n observational data set is provided as a staticmethod,\n `optimal_variational_posterior`. It returns a\n `MultivariateNormalLinearOperator` instance with optimal location and\n scale parameters.\n\n #### Mathematical Details\n\n ##### Notation\n\n We will in general be concerned about three collections of index points, and\n it'll be good to give them names:\n\n * `x[1], ..., x[N]`: observation index points -- locations of our observed\n data.\n * `z[1], ..., z[M]`: inducing index points -- locations of the\n \"summarizing\" inducing points\n * `t[1], ..., t[P]`: predictive index points -- locations where we are\n making posterior predictions based on observations and the variational\n parameters.\n\n To lighten notation, we'll use `X, Z, T` to denote the above collections.\n Similarly, we'll denote by `f(X)` the collection of function values at each of\n the `x[i]`, and by `Y`, the collection of (noisy) observed data at each `x[i].\n We'll denote kernel matrices generated from pairs of index points as `K_tt`,\n `K_xt`, `K_tz`, etc, e.g.,\n\n ```none\n | k(t[1], z[1]) k(t[1], z[2]) ... k(t[1], z[M]) |\n K_tz = | k(t[2], z[1]) k(t[2], z[2]) ... k(t[2], z[M]) |\n | ... ... ... |\n | k(t[P], z[1]) k(t[P], z[2]) ... k(t[P], z[M]) |\n ```\n\n ##### Preliminaries\n\n A Gaussian process is an indexed collection of random variables, any finite\n collection of which are jointly Gaussian. Typically, the index set is some\n finite-dimensional, real vector space, and indeed we make this assumption in\n what follows. The GP may then be thought of as a distribution over functions\n on the index set. Samples from the GP are functions *on the whole index set*;\n these can't be represented in finite compute memory, so one typically works\n with the marginals at a finite collection of index points. The properties of\n the GP are entirely determined by its mean function `m` and covariance\n function `k`. The generative process, assuming a mean-zero normal likelihood\n with stddev `sigma`, is\n\n ```none\n f ~ GP(m, k)\n\n Y | f(X) ~ Normal(f(X), sigma), i = 1, ... , N\n ```\n\n In finite terms (ie, marginalizing out all but a finite number of f(X)'sigma),\n we can write\n\n ```none\n f(X) ~ MVN(loc=m(X), cov=K_xx)\n\n Y | f(X) ~ Normal(f(X), sigma), i = 1, ... , N\n ```\n\n Posterior inference is possible in analytical closed form but becomes\n intractible as data sizes get large. See [Rasmussen, 2006][3] for details.\n\n ##### The VGP\n\n The VGP is an inducing point-based approximation of an exact GP posterior,\n where two approximating assumptions have been made:\n\n 1. function values at non-inducing points are mutually independent\n conditioned on function values at the inducing points,\n 2. the (expensive) posterior over function values at inducing points\n conditional on obseravtions is replaced with an arbitrary (learnable)\n full-rank Gaussian distribution,\n\n ```none\n q(f(Z)) = MVN(loc=m, scale=S),\n ```\n\n where `m` and `S` are parameters to be chosen by optimizing an evidence\n lower bound (ELBO).\n\n The posterior predictive distribution becomes\n\n ```none\n q(f(T)) = integral df(Z) p(f(T) | f(Z)) q(f(Z))\n = MVN(loc = A @ m, scale = B^(1/2))\n ```\n\n where\n\n ```none\n A = K_tz @ K_zz^-1\n B = K_tt - A @ (K_zz - S S^T) A^T\n ```\n\n ***The approximate posterior predictive distribution `q(f(T))` is what the\n `VariationalGaussianProcess` class represents.***\n\n Model selection in this framework entails choosing the kernel parameters,\n inducing point locations, and variational parameters. We do this by optimizing\n a variational lower bound on the marginal log likelihood of observed data. The\n lower bound takes the following form (see [Titsias, 2009][1] and\n [Hensman, 2013][2] for details on the derivation):\n\n ```none\n L(Z, m, S, Y) = (\n MVN(loc=(K_zx @ K_zz^-1) @ m, scale_diag=sigma).log_prob(Y) -\n (Tr(K_xx - K_zx @ K_zz^-1 @ K_xz) +\n Tr(S @ S^T @ K_zz^1 @ K_zx @ K_xz @ K_zz^-1)) / (2 * sigma^2) -\n KL(q(f(Z)) || p(f(Z))))\n ```\n\n where in the final KL term, `p(f(Z))` is the GP prior on inducing point\n function values. This variational lower bound can be computed on minibatches\n of the full data set `(X, Y)`. A method to compute the *negative* variational\n lower bound is implemented as `VariationalGaussianProcess.variational_loss`.\n\n ##### Optimal variational parameters\n\n As described in [Titsias, 2009][1], a closed form optimum for the variational\n location and scale parameters, `m` and `S`, can be computed when the\n observational data are not prohibitively voluminous. The\n `optimal_variational_posterior` function to computes the optimal variational\n posterior distribution over inducing point function values in terms of the GP\n parameters (mean and kernel functions), inducing point locations, observation\n index points, and observations. Note that the inducing index point locations\n must still be optimized even when these parameters are known functions of the\n inducing index points. The optimal parameters are computed as follows:\n\n ```none\n C = sigma^-2 (K_zz + K_zx @ K_xz)^-1\n\n optimal Gaussian covariance: K_zz @ C @ K_zz\n optimal Gaussian location: sigma^-2 K_zz @ C @ K_zx @ Y\n ```\n\n #### Usage Examples\n\n Here's an example of defining and training a VariationalGaussianProcess on\n some toy generated data.\n\n ```python\n # We'll use double precision throughout for better numerics.\n dtype = np.float64\n\n # Generate noisy data from a known function.\n f = lambda x: np.exp(-x[..., 0]**2 / 20.) * np.sin(1. * x[..., 0])\n true_observation_noise_variance_ = dtype(1e-1) ** 2\n\n num_training_points_ = 100\n x_train_ = np.stack(\n [np.random.uniform(-6., 0., [num_training_points_/ 2 , 1]),\n np.random.uniform(1., 10., [num_training_points_/ 2 , 1])],\n axis=0).astype(dtype)\n y_train_ = (f(x_train_) +\n np.random.normal(\n 0., np.sqrt(true_observation_noise_variance_),\n [num_training_points_]).astype(dtype))\n\n # Create kernel with trainable parameters, and trainable observation noise\n # variance variable. Each of these is constrained to be positive.\n amplitude = (tf.nn.softplus(tf.Variable(-1., dtype=dtype, name='amplitude')))\n length_scale = (1e-5 +\n tf.nn.softplus(\n tf.Variable(-3., dtype=dtype, name='length_scale')))\n kernel = tfk.ExponentiatedQuadratic(\n amplitude=amplitude,\n length_scale=length_scale)\n\n observation_noise_variance = tf.nn.softplus(\n tf.Variable(0, dtype=dtype, name='observation_noise_variance'))\n\n # Create trainable inducing point locations and variational parameters.\n num_inducing_points_ = 20\n\n inducing_index_points = tf.Variable(\n initial_inducing_points_, dtype=dtype,\n name='inducing_index_points')\n variational_inducing_observations_loc = tf.Variable(\n np.zeros([num_inducing_points_], dtype=dtype),\n name='variational_inducing_observations_loc')\n variational_inducing_observations_scale = tf.Variable(\n np.eye(num_inducing_points_, dtype=dtype),\n name='variational_inducing_observations_scale')\n\n # These are the index point locations over which we'll construct the\n # (approximate) posterior predictive distribution.\n num_predictive_index_points_ = 500\n index_points_ = np.linspace(-13, 13,\n num_predictive_index_points_,\n dtype=dtype)[..., np.newaxis]\n\n\n # Construct our variational GP Distribution instance.\n vgp = tfd.VariationalGaussianProcess(\n kernel,\n index_points=index_points_,\n inducing_index_points=inducing_index_points,\n variational_inducing_observations_loc=variational_inducing_observations_loc,\n variational_inducing_observations_scale=variational_inducing_observations_scale,\n observation_noise_variance=observation_noise_variance)\n\n # For training, we use some simplistic numpy-based minibatching.\n batch_size = 64\n x_train_batch = tf.placeholder(dtype, [batch_size, 1], name='x_train_batch')\n y_train_batch = tf.placeholder(dtype, [batch_size], name='y_train_batch')\n\n # Create the loss function we want to optimize.\n loss = vgp.variational_loss(\n observations=y_train_batch,\n observation_index_points=x_train_batch,\n kl_weight=float(batch_size) / float(num_training_points_))\n\n optimizer = tf.train.AdamOptimizer()\n train_op = optimizer.minimize(loss)\n\n num_iters = 10000\n num_logs = 10\n with tf.Session() as sess:\n for i in range(num_iters):\n batch_idxs = np.random.randint(num_training_points_, size=[batch_size])\n x_train_batch_ = x_train_[batch_idxs, ...]\n y_train_batch_ = y_train_[batch_idxs]\n\n [_, loss_] = sess.run([train_op, loss],\n feed_dict={x_train_batch: x_train_batch_,\n y_train_batch: y_train_batch_})\n if i % (num_iters / num_logs) == 0 or i + 1 == num_iters:\n print(i, loss_)\n\n # Generate a plot with\n # - the posterior predictive mean\n # - training data\n # - inducing index points (plotted vertically at the mean of the variational\n # posterior over inducing point function values)\n # - 50 posterior predictive samples\n\n num_samples = 50\n [\n samples_,\n mean_,\n inducing_index_points_,\n variational_loc_,\n ] = sess.run([\n vgp.sample(num_samples),\n vgp.mean(),\n inducing_index_points,\n variational_inducing_observations_loc\n ])\n plt.figure(figsize=(15, 5))\n plt.scatter(inducing_index_points_[..., 0], variational_loc_\n marker='x', s=50, color='k', zorder=10)\n plt.scatter(x_train_[..., 0], y_train_, color='#00ff00', zorder=9)\n plt.plot(np.tile(index_points_[..., 0], num_samples),\n samples_.T, color='r', alpha=.1)\n plt.plot(index_points_, mean_, color='k')\n plt.plot(index_points_, f(index_points_), color='b')\n ```\n\n # Here we use the same data setup, but compute the optimal variational\n # parameters instead of training them.\n ```python\n # We'll use double precision throughout for better numerics.\n dtype = np.float64\n\n # Generate noisy data from a known function.\n f = lambda x: np.exp(-x[..., 0]**2 / 20.) * np.sin(1. * x[..., 0])\n true_observation_noise_variance_ = dtype(1e-1) ** 2\n\n num_training_points_ = 1000\n x_train_ = np.random.uniform(-10., 10., [num_training_points_, 1])\n y_train_ = (f(x_train_) +\n np.random.normal(\n 0., np.sqrt(true_observation_noise_variance_),\n [num_training_points_]))\n\n # Create kernel with trainable parameters, and trainable observation noise\n # variance variable. Each of these is constrained to be positive.\n amplitude = (tf.nn.softplus(\n tf.Variable(.54, dtype=dtype, name='amplitude', use_resource=True)))\n length_scale = (\n 1e-5 +\n tf.nn.softplus(\n tf.Variable(.54, dtype=dtype, name='length_scale', use_resource=True)))\n kernel = tfk.ExponentiatedQuadratic(\n amplitude=amplitude,\n length_scale=length_scale)\n\n observation_noise_variance = tf.nn.softplus(\n tf.Variable(\n .54, dtype=dtype, name='observation_noise_variance', use_resource=True))\n\n # Create trainable inducing point locations and variational parameters.\n num_inducing_points_ = 10\n\n inducing_index_points = tf.Variable(\n np.linspace(-10., 10., num_inducing_points_)[..., np.newaxis],\n dtype=dtype, name='inducing_index_points', use_resource=True)\n\n variational_loc, variational_scale = (\n tfd.VariationalGaussianProcess.optimal_variational_posterior(\n kernel=kernel,\n inducing_index_points=inducing_index_points,\n observation_index_points=x_train_,\n observations=y_train_,\n observation_noise_variance=observation_noise_variance))\n\n # These are the index point locations over which we'll construct the\n # (approximate) posterior predictive distribution.\n num_predictive_index_points_ = 500\n index_points_ = np.linspace(-13, 13,\n num_predictive_index_points_,\n dtype=dtype)[..., np.newaxis]\n\n # Construct our variational GP Distribution instance.\n vgp = tfd.VariationalGaussianProcess(\n kernel,\n index_points=index_points_,\n inducing_index_points=inducing_index_points,\n variational_inducing_observations_loc=variational_loc,\n variational_inducing_observations_scale=variational_scale,\n observation_noise_variance=observation_noise_variance)\n\n # For training, we use some simplistic numpy-based minibatching.\n batch_size = 64\n x_train_batch = tf.placeholder(dtype, [batch_size, 1], name='x_train_batch')\n y_train_batch = tf.placeholder(dtype, [batch_size], name='y_train_batch')\n\n # Create the loss function we want to optimize.\n loss = vgp.variational_loss(\n observations=y_train_batch,\n observation_index_points=x_train_batch,\n kl_weight=float(batch_size) / float(num_training_points_))\n\n optimizer = tf.train.AdamOptimizer(learning_rate=.01)\n train_op = optimizer.minimize(loss)\n\n num_iters = 300\n num_logs = 10\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(num_iters):\n batch_idxs = np.random.randint(num_training_points_, size=[batch_size])\n x_train_batch_ = x_train_[batch_idxs, ...]\n y_train_batch_ = y_train_[batch_idxs]\n\n [_, loss_] = sess.run([train_op, loss],\n feed_dict={x_train_batch: x_train_batch_,\n y_train_batch: y_train_batch_})\n if i % (num_iters / num_logs) == 0 or i + 1 == num_iters:\n print(i, loss_)\n\n # Generate a plot with\n # - the posterior predictive mean\n # - training data\n # - inducing index points (plotted vertically at the mean of the\n # variational posterior over inducing point function values)\n # - 50 posterior predictive samples\n\n num_samples = 50\n [\n samples_,\n mean_,\n inducing_index_points_,\n variational_loc_,\n ] = sess.run([\n vgp.sample(num_samples),\n vgp.mean(),\n inducing_index_points,\n variational_loc\n ])\n plt.figure(figsize=(15, 5))\n plt.scatter(inducing_index_points_[..., 0], variational_loc_,\n marker='x', s=50, color='k', zorder=10)\n plt.scatter(x_train_[..., 0], y_train_, color='#00ff00', alpha=.1, zorder=9)\n plt.plot(np.tile(index_points_, num_samples),\n samples_.T, color='r', alpha=.1)\n plt.plot(index_points_, mean_, color='k')\n plt.plot(index_points_, f(index_points_), color='b')\n\n ```\n\n #### References\n\n [1]: Titsias, M. \"Variational Model Selection for Sparse Gaussian Process\n Regression\", 2009.\n http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf\n [2]: Hensman, J., Lawrence, N. \"Gaussian Processes for Big Data\", 2013\n https://arxiv.org/abs/1309.6835\n [3]: Carl Rasmussen, Chris Williams. Gaussian Processes For Machine Learning,\n 2006. http://www.gaussianprocess.org/gpml/\n \"\"\"\n\n def __init__(self,\n kernel,\n index_points,\n inducing_index_points,\n variational_inducing_observations_loc,\n variational_inducing_observations_scale,\n mean_fn=None,\n observation_noise_variance=0.,\n predictive_noise_variance=0.,\n jitter=1e-6,\n validate_args=False,\n allow_nan_stats=False,\n name='VariataionalGaussianProcess'):\n \"\"\"Instantiate a VariationalGaussianProcess Distribution.\n\n Args:\n kernel: `PositiveSemidefiniteKernel`-like instance representing the\n GP's covariance function.\n index_points: `float` `Tensor` representing finite (batch of) vector(s) of\n points in the index set over which the VGP is defined. Shape has the\n form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e1` is the number\n (size) of index points in each batch (we denote it `e1` to distinguish\n it from the numer of inducing index points, denoted `e2` below).\n Ultimately the VariationalGaussianProcess distribution corresponds to an\n `e1`-dimensional multivariate normal. The batch shape must be\n broadcastable with `kernel.batch_shape`, the batch shape of\n `inducing_index_points`, and any batch dims yielded by `mean_fn`.\n inducing_index_points: `float` `Tensor` of locations of inducing points in\n the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just\n like `index_points`. The batch shape components needn't be identical to\n those of `index_points`, but must be broadcast compatible with them.\n variational_inducing_observations_loc: `float` `Tensor`; the mean of the\n (full-rank Gaussian) variational posterior over function values at the\n inducing points, conditional on observed data. Shape has the form `[b1,\n ..., bB, e2]`, where `b1, ..., bB` is broadcast compatible with other\n parameters' batch shapes, and `e2` is the number of inducing points.\n variational_inducing_observations_scale: `float` `Tensor`; the scale\n matrix of the (full-rank Gaussian) variational posterior over function\n values at the inducing points, conditional on observed data. Shape has\n the form `[b1, ..., bB, e2, e2]`, where `b1, ..., bB` is broadcast\n compatible with other parameters and `e2` is the number of inducing\n points.\n mean_fn: Python `callable` that acts on index points to produce a (batch\n of) vector(s) of mean values at those index points. Takes a `Tensor` of\n shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is\n (broadcastable with) `[b1, ..., bB]`. Default value: `None` implies\n constant zero function.\n observation_noise_variance: `float` `Tensor` representing the variance\n of the noise in the Normal likelihood distribution of the model. May be\n batched, in which case the batch shape must be broadcastable with the\n shapes of all other batched parameters (`kernel.batch_shape`,\n `index_points`, etc.).\n Default value: `0.`\n predictive_noise_variance: `float` `Tensor` representing additional\n variance in the posterior predictive model. If `None`, we simply re-use\n `observation_noise_variance` for the posterior predictive noise. If set\n explicitly, however, we use the given value. This allows us, for\n example, to omit predictive noise variance (by setting this to zero) to\n obtain noiseless posterior predictions of function values, conditioned\n on noisy observations.\n jitter: `float` scalar `Tensor` added to the diagonal of the covariance\n matrix to ensure positive definiteness of the covariance matrix.\n Default value: `1e-6`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n Default value: `False`.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"VariationalGaussianProcess\".\n\n Raises:\n ValueError: if `mean_fn` is not `None` and is not callable.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(\n name or 'VariationalGaussianProcess') as name:\n dtype = dtype_util.common_dtype(\n [kernel,\n index_points,\n inducing_index_points,\n variational_inducing_observations_loc,\n variational_inducing_observations_scale,\n observation_noise_variance,\n predictive_noise_variance,\n jitter], tf.float32)\n\n index_points = tf.convert_to_tensor(\n value=index_points, dtype=dtype, name='index_points')\n inducing_index_points = tf.convert_to_tensor(\n value=inducing_index_points, dtype=dtype,\n name='inducing_index_points')\n variational_inducing_observations_loc = tf.convert_to_tensor(\n value=variational_inducing_observations_loc, dtype=dtype,\n name='variational_inducing_observations_loc')\n variational_inducing_observations_scale = tf.convert_to_tensor(\n value=variational_inducing_observations_scale, dtype=dtype,\n name='variational_inducing_observations_scale')\n observation_noise_variance = tf.convert_to_tensor(\n value=observation_noise_variance,\n dtype=dtype,\n name='observation_noise_variance')\n if predictive_noise_variance is None:\n predictive_noise_variance = observation_noise_variance\n else:\n predictive_noise_variance = tf.convert_to_tensor(\n value=predictive_noise_variance, dtype=dtype,\n name='predictive_noise_variance')\n jitter = tf.convert_to_tensor(\n value=jitter, dtype=dtype, name='jitter')\n\n self._kernel = kernel\n self._index_points = index_points\n self._inducing_index_points = inducing_index_points\n self._variational_inducing_observations_posterior = (\n mvn_linear_operator.MultivariateNormalLinearOperator(\n loc=variational_inducing_observations_loc,\n scale=tf.linalg.LinearOperatorFullMatrix(\n variational_inducing_observations_scale),\n name='variational_inducing_observations_posterior'))\n\n # Default to a constant zero function, borrowing the dtype from\n # index_points to ensure consistency.\n if mean_fn is None:\n mean_fn = lambda x: tf.zeros([1], dtype=dtype)\n else:\n if not callable(mean_fn):\n raise ValueError('`mean_fn` must be a Python callable')\n self._mean_fn = mean_fn\n self._observation_noise_variance = observation_noise_variance\n self._predictive_noise_variance = predictive_noise_variance\n self._jitter = jitter\n\n with tf.name_scope('init'):\n # We let t and z denote predictive and inducing index points, resp.\n kzz = _add_diagonal_shift(\n kernel.matrix(inducing_index_points, inducing_index_points),\n jitter)\n\n self._chol_kzz = tf.linalg.cholesky(kzz)\n self._kzz_inv_varloc = _solve_cholesky_factored_system_vec(\n self._chol_kzz,\n (variational_inducing_observations_loc -\n mean_fn(inducing_index_points)),\n name='kzz_inv_varloc')\n\n loc, scale = self._compute_posterior_predictive_params()\n\n super(VariationalGaussianProcess, self).__init__(\n loc=loc,\n scale=scale,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=name)\n self._parameters = parameters\n self._graph_parents = [\n index_points,\n inducing_index_points,\n variational_inducing_observations_loc,\n variational_inducing_observations_scale,\n observation_noise_variance,\n jitter]\n\n def _compute_posterior_predictive_params(self):\n ktt = _add_diagonal_shift(\n self._kernel.matrix(self._index_points, self._index_points),\n self._jitter)\n kzt = tf.linalg.LinearOperatorFullMatrix(\n self._kernel.matrix(self._inducing_index_points, self._index_points))\n\n kzz_inv_kzt = tf.linalg.LinearOperatorFullMatrix(\n _solve_cholesky_factored_system(\n self._chol_kzz, kzt.to_dense(), name='kzz_inv_kzt'))\n\n var_cov = tf.linalg.LinearOperatorFullMatrix(\n self._variational_inducing_observations_posterior.covariance())\n posterior_predictive_cov = (\n ktt -\n kzt.matmul(kzz_inv_kzt.to_dense(), adjoint=True) +\n kzz_inv_kzt.matmul(var_cov.matmul(kzz_inv_kzt.to_dense()),\n adjoint=True))\n\n # Add predictive_noise_variance\n posterior_predictive_cov = _add_diagonal_shift(\n posterior_predictive_cov, self._predictive_noise_variance)\n\n scale = tf.linalg.LinearOperatorLowerTriangular(\n tf.linalg.cholesky(posterior_predictive_cov))\n\n loc = (self._mean_fn(self._index_points) +\n kzt.matvec(self._kzz_inv_varloc, adjoint=True))\n\n return loc, scale\n\n @property\n def mean_fn(self):\n return self._mean_fn\n\n @property\n def kernel(self):\n return self._kernel\n\n @property\n def index_points(self):\n return self._index_points\n\n @property\n def inducing_index_points(self):\n return self._inducing_index_points\n\n @property\n def variational_inducing_observations_loc(self):\n return self._variational_inducing_observations_posterior.loc\n\n @property\n def variational_inducing_observations_scale(self):\n return self._variational_inducing_observations_posterior.scale\n\n @property\n def observation_noise_variance(self):\n return self._observation_noise_variance\n\n @property\n def predictive_noise_variance(self):\n return self._predictive_noise_variance\n\n @property\n def jitter(self):\n return self._jitter\n\n def _covariance(self):\n return self._covariance_matrix\n\n def variational_loss(self,\n observations,\n observation_index_points=None,\n kl_weight=1.,\n name='variational_loss'):\n \"\"\"Variational loss for the VGP.\n\n Given `observations` and `observation_index_points`, compute the\n negative variational lower bound as specified in [Hensman, 2013][1].\n\n Args:\n observations: `float` `Tensor` representing collection, or batch of\n collections, of observations corresponding to\n `observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which\n must be brodcastable with the batch and example shapes of\n `observation_index_points`. The batch shape `[b1, ..., bB]` must be\n broadcastable with the shapes of all other batched parameters\n (`kernel.batch_shape`, `observation_index_points`, etc.).\n observation_index_points: `float` `Tensor` representing finite (batch of)\n vector(s) of points where observations are defined. Shape has the\n form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e1` is the number\n (size) of index points in each batch (we denote it `e1` to distinguish\n it from the numer of inducing index points, denoted `e2` below). If\n set to `None` uses `index_points` as the origin for observations.\n Default value: None.\n kl_weight: Amount by which to scale the KL divergence loss between prior\n and posterior.\n Default value: 1.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"GaussianProcess\".\n Returns:\n loss: Scalar tensor representing the negative variational lower bound.\n Can be directly used in a `tf.Optimizer`.\n Raises:\n ValueError: if `mean_fn` is not `None` and is not callable.\n\n #### References\n\n [1]: Hensman, J., Lawrence, N. \"Gaussian Processes for Big Data\", 2013\n https://arxiv.org/abs/1309.6835\n \"\"\"\n\n with tf.name_scope(name or 'variational_gp_loss'):\n if observation_index_points is None:\n observation_index_points = self._index_points\n observation_index_points = tf.convert_to_tensor(\n value=observation_index_points, dtype=self._dtype,\n name='observation_index_points')\n observations = tf.convert_to_tensor(\n value=observations, dtype=self._dtype, name='observations')\n kl_weight = tf.convert_to_tensor(\n value=kl_weight, dtype=self._dtype,\n name='kl_weight')\n\n # The variational loss is a negative ELBO. The ELBO can be broken down\n # into three terms:\n # 1. a likelihood term\n # 2. a trace term arising from the covariance of the posterior predictive\n\n kzx = self.kernel.matrix(self._inducing_index_points,\n observation_index_points)\n\n kzx_linop = tf.linalg.LinearOperatorFullMatrix(kzx)\n loc = (self._mean_fn(observation_index_points) +\n kzx_linop.matvec(self._kzz_inv_varloc, adjoint=True))\n\n likelihood = independent.Independent(\n normal.Normal(\n loc=loc,\n scale=tf.sqrt(self._observation_noise_variance + self._jitter),\n name='NormalLikelihood'),\n reinterpreted_batch_ndims=1)\n obs_ll = likelihood.log_prob(observations)\n\n chol_kzz_linop = tf.linalg.LinearOperatorLowerTriangular(self._chol_kzz)\n chol_kzz_inv_kzx = chol_kzz_linop.solve(kzx)\n kzz_inv_kzx = chol_kzz_linop.solve(chol_kzz_inv_kzx, adjoint=True)\n\n kxx_diag = self.kernel.apply(\n observation_index_points, observation_index_points, example_ndims=1)\n ktilde_trace_term = (\n tf.reduce_sum(input_tensor=kxx_diag, axis=-1) -\n tf.reduce_sum(input_tensor=chol_kzz_inv_kzx ** 2, axis=[-2, -1]))\n\n # Tr(SB)\n # where S = A A.T, A = variational_inducing_observations_scale\n # and B = Kzz^-1 Kzx Kzx.T Kzz^-1\n #\n # Now Tr(SB) = Tr(A A.T Kzz^-1 Kzx Kzx.T Kzz^-1)\n # = Tr(A.T Kzz^-1 Kzx Kzx.T Kzz^-1 A)\n # = sum_ij (A.T Kzz^-1 Kzx)_{ij}^2\n other_trace_term = tf.reduce_sum(\n input_tensor=(\n self._variational_inducing_observations_posterior.scale.matmul(\n kzz_inv_kzx) ** 2),\n axis=[-2, -1])\n\n trace_term = (.5 * (ktilde_trace_term + other_trace_term) /\n self._observation_noise_variance)\n\n inducing_prior = gaussian_process.GaussianProcess(\n kernel=self._kernel,\n mean_fn=self._mean_fn,\n index_points=self._inducing_index_points,\n observation_noise_variance=self._observation_noise_variance)\n\n kl_term = kl_weight * kullback_leibler.kl_divergence(\n self._variational_inducing_observations_posterior,\n inducing_prior)\n\n lower_bound = (obs_ll - trace_term - kl_term)\n\n return -tf.reduce_mean(input_tensor=lower_bound)\n\n @staticmethod\n def optimal_variational_posterior(\n kernel,\n inducing_index_points,\n observation_index_points,\n observations,\n observation_noise_variance,\n mean_fn=None,\n jitter=1e-6,\n name=None):\n \"\"\"Model selection for optimal variational hyperparameters.\n\n Given the full training set (parameterized by `observations` and\n `observation_index_points`), compute the optimal variational\n location and scale for the VGP. This is based of the method suggested\n in [Titsias, 2009][1].\n\n Args:\n kernel: `PositiveSemidefiniteKernel`-like instance representing the\n GP's covariance function.\n inducing_index_points: `float` `Tensor` of locations of inducing points in\n the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just\n like `observation_index_points`. The batch shape components needn't be\n identical to those of `observation_index_points`, but must be broadcast\n compatible with them.\n observation_index_points: `float` `Tensor` representing finite (batch of)\n vector(s) of points where observations are defined. Shape has the\n form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e1` is the number\n (size) of index points in each batch (we denote it `e1` to distinguish\n it from the numer of inducing index points, denoted `e2` below).\n observations: `float` `Tensor` representing collection, or batch of\n collections, of observations corresponding to\n `observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which\n must be brodcastable with the batch and example shapes of\n `observation_index_points`. The batch shape `[b1, ..., bB]` must be\n broadcastable with the shapes of all other batched parameters\n (`kernel.batch_shape`, `observation_index_points`, etc.).\n observation_noise_variance: `float` `Tensor` representing the variance\n of the noise in the Normal likelihood distribution of the model. May be\n batched, in which case the batch shape must be broadcastable with the\n shapes of all other batched parameters (`kernel.batch_shape`,\n `index_points`, etc.).\n Default value: `0.`\n mean_fn: Python `callable` that acts on index points to produce a (batch\n of) vector(s) of mean values at those index points. Takes a `Tensor` of\n shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is\n (broadcastable with) `[b1, ..., bB]`. Default value: `None` implies\n constant zero function.\n jitter: `float` scalar `Tensor` added to the diagonal of the covariance\n matrix to ensure positive definiteness of the covariance matrix.\n Default value: `1e-6`.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"optimal_variational_posterior\".\n Returns:\n loc, scale: Tuple representing the variational location and scale.\n Raises:\n ValueError: if `mean_fn` is not `None` and is not callable.\n\n #### References\n\n [1]: Titsias, M. \"Variational Model Selection for Sparse Gaussian Process\n Regression\", 2009.\n http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf\n \"\"\"\n\n with tf.name_scope(name or 'optimal_variational_posterior'):\n dtype = dtype_util.common_dtype(\n [inducing_index_points,\n observation_index_points,\n observations,\n observation_noise_variance,\n jitter], tf.float32)\n\n inducing_index_points = tf.convert_to_tensor(\n value=inducing_index_points,\n dtype=dtype, name='inducing_index_points')\n observation_index_points = tf.convert_to_tensor(\n value=observation_index_points, dtype=dtype,\n name='observation_index_points')\n observations = tf.convert_to_tensor(\n value=observations, dtype=dtype, name='observations')\n observation_noise_variance = tf.convert_to_tensor(\n value=observation_noise_variance,\n dtype=dtype,\n name='observation_noise_variance')\n jitter = tf.convert_to_tensor(\n value=jitter, dtype=dtype, name='jitter')\n\n # Default to a constant zero function.\n if mean_fn is None:\n mean_fn = lambda x: tf.zeros([1], dtype=dtype)\n else:\n if not callable(mean_fn):\n raise ValueError('`mean_fn` must be a Python callable')\n\n # z are the inducing points and x are the observation index points.\n kzz = kernel.matrix(inducing_index_points, inducing_index_points)\n kzx = kernel.matrix(inducing_index_points, observation_index_points)\n\n noise_var_inv = tf.math.reciprocal(observation_noise_variance)\n\n sigma_inv = _add_diagonal_shift(\n kzz + noise_var_inv * tf.matmul(kzx, kzx, adjoint_b=True),\n jitter)\n\n chol_sigma_inv = tf.linalg.cholesky(sigma_inv)\n\n kzx_lin_op = tf.linalg.LinearOperatorFullMatrix(kzx)\n kzx_obs = kzx_lin_op.matvec(\n observations - mean_fn(observation_index_points))\n kzz_lin_op = tf.linalg.LinearOperatorFullMatrix(kzz)\n loc = (mean_fn(inducing_index_points) +\n noise_var_inv * kzz_lin_op.matvec(\n _solve_cholesky_factored_system_vec(chol_sigma_inv, kzx_obs)))\n\n chol_sigma_inv_lin_op = tf.linalg.LinearOperatorLowerTriangular(\n chol_sigma_inv)\n scale = chol_sigma_inv_lin_op.solve(kzz)\n\n return loc, scale\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Methods for finding roots of functions of one variable.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\n\n__all__ = [\n 'secant_root',\n]\n\nRootSearchResults = collections.namedtuple(\n 'RootSearchResults',\n [\n # A tensor containing the last position explored. If the search was\n # successful, this position is a root of the objective function.\n 'estimated_root',\n # A tensor containing the value of the objective function at the last\n # position explored. If the search was successful, then this is close\n # to 0.\n 'objective_at_estimated_root',\n # The number of iterations performed.\n 'num_iterations',\n ])\n\n\ndef secant_root(objective_fn,\n initial_position,\n next_position=None,\n value_at_position=None,\n position_tolerance=1e-8,\n value_tolerance=1e-8,\n max_iterations=50,\n stopping_policy_fn=tf.reduce_all,\n validate_args=False,\n name=None):\n r\"\"\"Finds root(s) of a function of single variable using the secant method.\n\n The [secant method](https://en.wikipedia.org/wiki/Secant_method) is a\n root-finding algorithm that uses a succession of roots of secant lines to\n better approximate a root of a function. The secant method can be thought of\n as a finite-difference approximation of Newton's method.\n\n Args:\n objective_fn: Python callable for which roots are searched. It must be a\n callable of a single variable. `objective_fn` must return a `Tensor` of\n the same shape and dtype as `initial_position`.\n initial_position: `Tensor` or Python float representing the starting\n position. The function will search for roots in the neighborhood of each\n point. The shape of `initial_position` should match that of the input to\n `objective_fn`.\n next_position: Optional `Tensor` representing the next position in the\n search. If specified, this argument must broadcast with the shape of\n `initial_position` and have the same dtype. It will be used to compute the\n first step to take when searching for roots. If not specified, a default\n value will be used instead.\n Default value: `initial_position * (1 + 1e-4) + sign(initial_position) *\n 1e-4`.\n value_at_position: Optional `Tensor` or Pyhon float representing the value\n of `objective_fn` at `initial_position`. If specified, this argument must\n have the same shape and dtype as `initial_position`. If not specified, the\n value will be evaluated during the search.\n Default value: None.\n position_tolerance: Optional `Tensor` representing the tolerance for the\n estimated roots. If specified, this argument must broadcast with the shape\n of `initial_position` and have the same dtype.\n Default value: `1e-8`.\n value_tolerance: Optional `Tensor` representing the tolerance used to check\n for roots. If the absolute value of `objective_fn` is smaller than\n `value_tolerance` at a given position, then that position is considered a\n root for the function. If specified, this argument must broadcast with the\n shape of `initial_position` and have the same dtype.\n Default value: `1e-8`.\n max_iterations: Optional `Tensor` or Python integer specifying the maximum\n number of steps to perform for each initial position. Must broadcast with\n the shape of `initial_position`.\n Default value: `50`.\n stopping_policy_fn: Python `callable` controlling the algorithm termination.\n It must be a callable accepting a `Tensor` of booleans with the shape of\n `initial_position` (each denoting whether the search is finished for each\n starting point), and returning a scalar boolean `Tensor` (indicating\n whether the overall search should stop). Typical values are\n `tf.reduce_all` (which returns only when the search is finished for all\n points), and `tf.reduce_any` (which returns as soon as the search is\n finished for any point).\n Default value: `tf.reduce_all` (returns only when the search is finished\n for all points).\n validate_args: Python `bool` indicating whether to validate arguments such\n as `position_tolerance`, `value_tolerance`, and `max_iterations`.\n Default value: `False`.\n name: Python `str` name prefixed to ops created by this function.\n\n Returns:\n root_search_results: A Python `namedtuple` containing the following items:\n estimated_root: `Tensor` containing the last position explored. If the\n search was successful within the specified tolerance, this position is\n a root of the objective function.\n objective_at_estimated_root: `Tensor` containing the value of the\n objective function at `position`. If the search was successful within\n the specified tolerance, then this is close to 0.\n num_iterations: The number of iterations performed.\n\n Raises:\n ValueError: if a non-callable `stopping_policy_fn` is passed.\n\n #### Examples\n\n ```python\n import tensorflow as tf\n import tensorflow_probability as tfp\n tf.enable_eager_execution()\n\n # Example 1: Roots of a single function from two different starting points.\n\n f = lambda x: (63 * x**5 - 70 * x**3 + 15 * x) / 8.\n x = tf.constant([-1, 10], dtype=tf.float64)\n\n tfp.math.secant_root(objective_fn=f, initial_position=x))\n # ==> RootSearchResults(\n estimated_root=array([-0.90617985, 0.90617985]),\n objective_at_estimated_root=array([-4.81727769e-10, 7.44957651e-10]),\n num_iterations=array([ 7, 24], dtype=int32))\n\n tfp.math.secant_root(objective_fn=f,\n initial_position=x,\n stopping_policy_fn=tf.reduce_any)\n # ==> RootSearchResults(\n estimated_root=array([-0.90617985, 3.27379206]),\n objective_at_estimated_root=array([-4.81727769e-10, 2.66058312e+03]),\n num_iterations=array([7, 8], dtype=int32))\n\n # Example 2: Roots of a multiplex function from a single starting point.\n\n def f(x):\n return tf.constant([0., 63. / 8], dtype=tf.float64) * x**5 \\\n + tf.constant([5. / 2, -70. / 8], dtype=tf.float64) * x**3 \\\n + tf.constant([-3. / 2, 15. / 8], dtype=tf.float64) * x\n\n x = tf.constant([-1, -1], dtype=tf.float64)\n\n tfp.math.secant_root(objective_fn=f, initial_position=x)\n # ==> RootSearchResults(\n estimated_root=array([-0.77459667, -0.90617985]),\n objective_at_estimated_root=array([-7.81339438e-11, -4.81727769e-10]),\n num_iterations=array([7, 7], dtype=int32))\n\n # Example 3: Roots of a multiplex function from two starting points.\n\n def f(x):\n return tf.constant([0., 63. / 8], dtype=tf.float64) * x**5 \\\n + tf.constant([5. / 2, -70. / 8], dtype=tf.float64) * x**3 \\\n + tf.constant([-3. / 2, 15. / 8], dtype=tf.float64) * x\n\n x = tf.constant([[-1, -1], [10, 10]], dtype=tf.float64)\n\n tfp.math.secant_root(objective_fn=f, initial_position=x)\n # ==> RootSearchResults(\n estimated_root=array([\n [-0.77459667, -0.90617985],\n [ 0.77459667, 0.90617985]]),\n objective_at_estimated_root=array([\n [-7.81339438e-11, -4.81727769e-10],\n [6.66025013e-11, 7.44957651e-10]]),\n num_iterations=array([\n [7, 7],\n [16, 24]], dtype=int32))\n ```\n \"\"\"\n if not callable(stopping_policy_fn):\n raise ValueError('stopping_policy_fn must be callable')\n\n position = tf.convert_to_tensor(\n value=initial_position,\n name='position',\n )\n value_at_position = tf.convert_to_tensor(\n value=value_at_position or objective_fn(position),\n name='value_at_position',\n dtype=position.dtype.base_dtype)\n\n zero = tf.zeros_like(position)\n position_tolerance = tf.convert_to_tensor(\n value=position_tolerance, name='position_tolerance', dtype=position.dtype)\n value_tolerance = tf.convert_to_tensor(\n value=value_tolerance, name='value_tolerance', dtype=position.dtype)\n\n num_iterations = tf.zeros_like(position, dtype=tf.int32)\n max_iterations = tf.convert_to_tensor(value=max_iterations, dtype=tf.int32)\n max_iterations = tf.broadcast_to(\n max_iterations, name='max_iterations', shape=position.shape)\n\n # Compute the step from `next_position` if present. This covers the case where\n # a user has two starting points, which bound the root or has a specific step\n # size in mind.\n if next_position is None:\n epsilon = tf.constant(1e-4, dtype=position.dtype, shape=position.shape)\n step = position * epsilon + tf.sign(position) * epsilon\n else:\n step = next_position - initial_position\n\n finished = tf.constant(False, shape=position.shape)\n\n # Negate `stopping_condition` to determine if the search should continue.\n # This means, in particular, that tf.reduce_*all* will return only when the\n # search is finished for *all* starting points.\n def _should_continue(position, value_at_position, num_iterations, step,\n finished):\n \"\"\"Indicates whether the overall search should continue.\n\n Args:\n position: `Tensor` containing the current root estimates.\n value_at_position: `Tensor` containing the value of `objective_fn` at\n `position`.\n num_iterations: `Tensor` containing the current iteration index for each\n point.\n step: `Tensor` containing the size of the step to take for each point.\n finished: `Tensor` indicating for which points the search is finished.\n\n Returns:\n A boolean value indicating whether the overall search should continue.\n \"\"\"\n del position, value_at_position, num_iterations, step # Unused\n return ~tf.convert_to_tensor(\n value=stopping_policy_fn(finished), name='should_stop', dtype=tf.bool)\n\n # For each point in `position`, the search is stopped if either:\n # (1) A root has been found\n # (2) f(position) == f(position + step)\n # (3) The maximum number of iterations has been reached\n # In case (2), the search may be stopped both before the desired tolerance is\n # achieved (or even a root is found), and the maximum number of iterations is\n # reached.\n def _body(position, value_at_position, num_iterations, step, finished):\n \"\"\"Performs one iteration of the secant root-finding algorithm.\n\n Args:\n position: `Tensor` containing the current root estimates.\n value_at_position: `Tensor` containing the value of `objective_fn` at\n `position`.\n num_iterations: `Tensor` containing the current iteration index for each\n point.\n step: `Tensor` containing the size of the step to take for each point.\n finished: `Tensor` indicating for which points the search is finished.\n\n Returns:\n The `Tensor`s to use for the next iteration of the algorithm.\n \"\"\"\n\n # True if the search was already finished, or (1) or (3) just became true.\n was_finished = finished | (num_iterations >= max_iterations) | (\n tf.abs(step) < position_tolerance) | (\n tf.abs(value_at_position) < value_tolerance)\n\n # Compute the next position and the value at that point.\n next_position = tf.compat.v1.where(was_finished, position, position + step)\n value_at_next_position = tf.compat.v1.where(was_finished, value_at_position,\n objective_fn(next_position))\n\n # True if the search was already finished, or (2) just became true.\n is_finished = tf.equal(value_at_position, value_at_next_position)\n\n # Use the mid-point between the last two positions if (2) just became true.\n next_position = tf.compat.v1.where(is_finished & ~was_finished,\n (position + next_position) * 0.5,\n next_position)\n\n # Once finished, stop updating the iteration index and set the step to zero.\n num_iterations = tf.compat.v1.where(is_finished, num_iterations,\n num_iterations + 1)\n next_step = tf.compat.v1.where(\n is_finished, zero, step * value_at_next_position /\n (value_at_position - value_at_next_position))\n\n return (next_position, value_at_next_position, num_iterations, next_step,\n is_finished)\n\n with tf.compat.v1.name_scope(\n name, 'secant_root',\n [position, next_position, value_at_position, max_iterations]):\n\n assertions = []\n if validate_args:\n assertions += [\n tf.Assert(\n tf.reduce_all(input_tensor=position_tolerance > zero),\n [position_tolerance]),\n tf.Assert(\n tf.reduce_all(input_tensor=value_tolerance > zero),\n [value_tolerance]),\n tf.Assert(\n tf.reduce_all(input_tensor=max_iterations >= num_iterations),\n [max_iterations]),\n ]\n\n with tf.control_dependencies(assertions):\n root, value_at_root, num_iterations, _, _ = tf.while_loop(\n cond=_should_continue,\n body=_body,\n loop_vars=[\n position, value_at_position, num_iterations, step, finished\n ])\n\n return RootSearchResults(\n estimated_root=root,\n objective_at_estimated_root=value_at_root,\n num_iterations=num_iterations)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Blockwise bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector as bijector_base\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n__all__ = [\n 'Blockwise',\n]\n\n\nclass Blockwise(bijector_base.Bijector):\n \"\"\"Bijector which applies a list of bijectors to blocks of a `Tensor`.\n\n More specifically, given [F_0, F_1, ... F_n] which are scalar or vector\n bijectors this bijector creates a transformation which operates on the vector\n [x_0, ... x_n] with the transformation [F_0(x_0), F_1(x_1) ..., F_n(x_n)]\n where x_0, ..., x_n are blocks (partitions) of the vector.\n\n Example Use:\n\n ```python\n blockwise = tfb.Blockwise(\n bijectors=[tfb.Exp(), tfb.Sigmoid()], block_sizes=[2, 1]\n )\n y = blockwise.forward(x)\n\n # Equivalent to:\n x_0, x_1 = tf.split(x, [2, 1], axis=-1)\n y_0 = tfb.Exp().forward(x_0)\n y_1 = tfb.Sigmoid().forward(x_1)\n y = tf.concat([y_0, y_1], axis=-1)\n ```\n \"\"\"\n\n def __init__(self,\n bijectors,\n block_sizes=None,\n validate_args=False,\n name=None):\n \"\"\"Creates the bijector.\n\n Args:\n bijectors: A non-empty list of bijectors.\n block_sizes: A 1-D integer `Tensor` with each element signifying the\n length of the block of the input vector to pass to the corresponding\n bijector. The length of `block_sizes` must be be equal to the length of\n `bijectors`. If left as None, a vector of 1's is used.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str`, name given to ops managed by this object. Default:\n E.g., `Blockwise([Exp(), Softplus()]).name ==\n 'blockwise_of_exp_and_softplus'`.\n\n Raises:\n NotImplementedError: If a bijector with `event_ndims` > 1 or one that\n reshapes events is passed.\n ValueError: If `bijectors` list is empty.\n ValueError: If size of `block_sizes` does not equal to the length of\n bijectors or is not a vector.\n \"\"\"\n if not name:\n name = 'blockwise_of_' + '_and_'.join([b.name for b in bijectors])\n name = name.replace('/', '')\n with tf.name_scope(name) as name:\n super(Blockwise, self).__init__(\n forward_min_event_ndims=1,\n validate_args=validate_args,\n name=name)\n\n if not bijectors:\n raise ValueError('`bijectors` must not be empty.')\n\n for bijector in bijectors:\n if (bijector.forward_min_event_ndims > 1 or\n (bijector.inverse_min_event_ndims !=\n bijector.forward_min_event_ndims)):\n # TODO(siege): In the future, it can be reasonable to support N-D\n # bijectors by concatenating along some specific axis, broadcasting\n # low-D bijectors appropriately.\n raise NotImplementedError('Only scalar and vector event-shape '\n 'bijectors that do not alter the '\n 'shape are supported at this time.')\n\n self._bijectors = bijectors\n\n if block_sizes is None:\n block_sizes = tf.ones(len(bijectors), dtype=tf.int32)\n self._block_sizes = tf.convert_to_tensor(\n value=block_sizes, name='block_sizes', dtype_hint=tf.int32)\n\n self._block_sizes = _validate_block_sizes(self._block_sizes, bijectors,\n validate_args)\n\n @property\n def bijectors(self):\n return self._bijectors\n\n @property\n def block_sizes(self):\n return self._block_sizes\n\n def _forward(self, x):\n split_x = tf.split(x, self.block_sizes, axis=-1, num=len(self.bijectors))\n split_y = [b.forward(x_) for b, x_ in zip(self.bijectors, split_x)]\n y = tf.concat(split_y, axis=-1)\n tensorshape_util.set_shape(y, x.shape)\n return y\n\n def _inverse(self, y):\n split_y = tf.split(y, self.block_sizes, axis=-1, num=len(self.bijectors))\n split_x = [b.inverse(y_) for b, y_ in zip(self.bijectors, split_y)]\n x = tf.concat(split_x, axis=-1)\n tensorshape_util.set_shape(x, y.shape)\n return x\n\n def _forward_log_det_jacobian(self, x):\n split_x = tf.split(x, self.block_sizes, axis=-1, num=len(self.bijectors))\n fldjs = [\n b.forward_log_det_jacobian(x_, event_ndims=1)\n for b, x_ in zip(self.bijectors, split_x)\n ]\n return sum(fldjs)\n\n def _inverse_log_det_jacobian(self, y):\n split_y = tf.split(y, self.block_sizes, axis=-1, num=len(self.bijectors))\n ildjs = [\n b.inverse_log_det_jacobian(y_, event_ndims=1)\n for b, y_ in zip(self.bijectors, split_y)\n ]\n return sum(ildjs)\n\n\ndef _validate_block_sizes(block_sizes, bijectors, validate_args):\n \"\"\"Helper to validate block sizes.\"\"\"\n block_sizes_shape = block_sizes.shape\n if tensorshape_util.is_fully_defined(block_sizes_shape):\n if (tensorshape_util.rank(block_sizes_shape) != 1 or\n (tensorshape_util.num_elements(block_sizes_shape) != len(bijectors))):\n raise ValueError(\n '`block_sizes` must be `None`, or a vector of the same length as '\n '`bijectors`. Got a `Tensor` with shape {} and `bijectors` of '\n 'length {}'.format(block_sizes_shape, len(bijectors)))\n return block_sizes\n elif validate_args:\n message = ('`block_sizes` must be `None`, or a vector of the same length '\n 'as `bijectors`.')\n with tf.control_dependencies([\n assert_util.assert_equal(\n tf.size(input=block_sizes), len(bijectors), message=message),\n assert_util.assert_equal(tf.rank(block_sizes), 1)\n ]):\n return tf.identity(block_sizes)\n else:\n return block_sizes\n" ]
[ [ "tensorflow.compat.v2.math.erf", "numpy.sqrt", "tensorflow.compat.v2.minimum", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.convert_to_tensor", "numpy.exp", "numpy.arange", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.math.log", "tensorflow.compat.v2.abs", "tensorflow.compat.v1.where", "numpy.log", "tensorflow.compat.v2.less", "tensorflow.compat.v2.square", "tensorflow.compat.v2.constant", "numpy.array", "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.math.erfc", "tensorflow.compat.v2.maximum", "numpy.expm1", "tensorflow.compat.v2.math.log1p", "tensorflow.compat.v2.greater" ], [ "tensorflow.compat.v2.cumsum", "tensorflow.compat.v2.size", "tensorflow.compat.v2.rank", "tensorflow.compat.v2.maximum", "tensorflow.compat.v2.reduce_mean", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.range", "tensorflow.compat.v2.math.squared_difference", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.compat.dimension_value" ], [ "tensorflow.compat.v2.size", "tensorflow.compat.v2.rank", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.convert_to_tensor" ], [ "tensorflow.compat.v2.linalg.diag_part", "tensorflow.compat.v2.reduce_mean", "tensorflow.compat.v2.linalg.cholesky", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.linalg.LinearOperatorLowerTriangular", "tensorflow.compat.v2.sqrt", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.math.reciprocal", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.matmul", "tensorflow.compat.v2.linalg.LinearOperatorFullMatrix" ], [ "tensorflow.convert_to_tensor", "tensorflow.sign", "tensorflow.constant", "tensorflow.while_loop", "tensorflow.control_dependencies", "tensorflow.broadcast_to", "tensorflow.equal", "tensorflow.zeros_like", "tensorflow.compat.v1.name_scope", "tensorflow.reduce_all", "tensorflow.abs", "tensorflow.compat.v1.where" ], [ "tensorflow.compat.v2.size", "tensorflow.compat.v2.rank", "tensorflow.compat.v2.concat", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
suytingwan/models
[ "ccdbfe77d071cc19b55fb9f4b738912e35d982ef", "ccdbfe77d071cc19b55fb9f4b738912e35d982ef", "ccdbfe77d071cc19b55fb9f4b738912e35d982ef", "ccdbfe77d071cc19b55fb9f4b738912e35d982ef", "ccdbfe77d071cc19b55fb9f4b738912e35d982ef", "14c3209118b2cadcce9a8f66b760c9cddb3a02ad", "ccdbfe77d071cc19b55fb9f4b738912e35d982ef", "ccdbfe77d071cc19b55fb9f4b738912e35d982ef" ]
[ "PaddleCV/video/metrics/youtube8m/eval_util.py", "dygraph/bmn/model.py", "PaddleCV/metric_learning/eval.py", "PaddleNLP/pretrain_language_models/XLNet/run_classifier.py", "PaddleRec/gru4rec/infer.py", "PaddleNLP/pretrain_language_models/XLNet/modeling.py", "PaddleCV/tracking/ltr/train_settings/siamfc/siamfc_alexnet_vid.py", "PaddleCV/3d_vision/PointNet++/ext_op/tests/test_gather_point_op.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Provides functions to help with evaluating models.\"\"\"\nimport datetime\nimport numpy\n\nfrom . import mean_average_precision_calculator as map_calculator\nfrom . import average_precision_calculator as ap_calculator\n\n\ndef flatten(l):\n \"\"\" Merges a list of lists into a single list. \"\"\"\n return [item for sublist in l for item in sublist]\n\n\ndef calculate_hit_at_one(predictions, actuals):\n \"\"\"Performs a local (numpy) calculation of the hit at one.\n\n Args:\n predictions: Matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n actuals: Matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n\n Returns:\n float: The average hit at one across the entire batch.\n \"\"\"\n top_prediction = numpy.argmax(predictions, 1)\n hits = actuals[numpy.arange(actuals.shape[0]), top_prediction]\n return numpy.average(hits)\n\n\ndef calculate_precision_at_equal_recall_rate(predictions, actuals):\n \"\"\"Performs a local (numpy) calculation of the PERR.\n\n Args:\n predictions: Matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n actuals: Matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n\n Returns:\n float: The average precision at equal recall rate across the entire batch.\n \"\"\"\n aggregated_precision = 0.0\n num_videos = actuals.shape[0]\n for row in numpy.arange(num_videos):\n num_labels = int(numpy.sum(actuals[row]))\n top_indices = numpy.argpartition(predictions[row],\n -num_labels)[-num_labels:]\n item_precision = 0.0\n for label_index in top_indices:\n if predictions[row][label_index] > 0:\n item_precision += actuals[row][label_index]\n item_precision /= top_indices.size\n aggregated_precision += item_precision\n aggregated_precision /= num_videos\n return aggregated_precision\n\n\ndef calculate_gap(predictions, actuals, top_k=20):\n \"\"\"Performs a local (numpy) calculation of the global average precision.\n\n Only the top_k predictions are taken for each of the videos.\n\n Args:\n predictions: Matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n actuals: Matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n top_k: How many predictions to use per video.\n\n Returns:\n float: The global average precision.\n \"\"\"\n gap_calculator = ap_calculator.AveragePrecisionCalculator()\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(\n predictions, actuals, top_k)\n gap_calculator.accumulate(\n flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))\n return gap_calculator.peek_ap_at_n()\n\n\ndef top_k_by_class(predictions, labels, k=20):\n \"\"\"Extracts the top k predictions for each video, sorted by class.\n\n Args:\n predictions: A numpy matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n k: the top k non-zero entries to preserve in each prediction.\n\n Returns:\n A tuple (predictions,labels, true_positives). 'predictions' and 'labels'\n are lists of lists of floats. 'true_positives' is a list of scalars. The\n length of the lists are equal to the number of classes. The entries in the\n predictions variable are probability predictions, and\n the corresponding entries in the labels variable are the ground truth for\n those predictions. The entries in 'true_positives' are the number of true\n positives for each class in the ground truth.\n\n Raises:\n ValueError: An error occurred when the k is not a positive integer.\n \"\"\"\n if k <= 0:\n raise ValueError(\"k must be a positive integer.\")\n k = min(k, predictions.shape[1])\n num_classes = predictions.shape[1]\n prediction_triplets = []\n for video_index in range(predictions.shape[0]):\n prediction_triplets.extend(\n top_k_triplets(predictions[video_index], labels[video_index], k))\n out_predictions = [[] for v in range(num_classes)]\n out_labels = [[] for v in range(num_classes)]\n for triplet in prediction_triplets:\n out_predictions[triplet[0]].append(triplet[1])\n out_labels[triplet[0]].append(triplet[2])\n out_true_positives = [numpy.sum(labels[:, i]) for i in range(num_classes)]\n\n return out_predictions, out_labels, out_true_positives\n\n\ndef top_k_triplets(predictions, labels, k=20):\n \"\"\"Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in\n (prediction, class) format\"\"\"\n m = len(predictions)\n k = min(k, m)\n indices = numpy.argpartition(predictions, -k)[-k:]\n return [(index, predictions[index], labels[index]) for index in indices]\n\n\nclass EvaluationMetrics(object):\n \"\"\"A class to store the evaluation metrics.\"\"\"\n\n def __init__(self, num_class, top_k):\n \"\"\"Construct an EvaluationMetrics object to store the evaluation metrics.\n\n Args:\n num_class: A positive integer specifying the number of classes.\n top_k: A positive integer specifying how many predictions are considered per video.\n\n Raises:\n ValueError: An error occurred when MeanAveragePrecisionCalculator cannot\n not be constructed.\n \"\"\"\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(\n num_class)\n self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()\n self.top_k = top_k\n self.num_examples = 0\n\n #def accumulate(self, predictions, labels, loss):\n def accumulate(self, loss, predictions, labels):\n \"\"\"Accumulate the metrics calculated locally for this mini-batch.\n\n Args:\n predictions: A numpy matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n labels: A numpy matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n loss: A numpy array containing the loss for each sample.\n\n Returns:\n dictionary: A dictionary storing the metrics for the mini-batch.\n\n Raises:\n ValueError: An error occurred when the shape of predictions and actuals\n does not match.\n \"\"\"\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions,\n labels)\n mean_loss = numpy.mean(loss)\n\n # Take the top 20 predictions.\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(\n predictions, labels, self.top_k)\n self.map_calculator.accumulate(sparse_predictions, sparse_labels,\n num_positives)\n self.global_ap_calculator.accumulate(\n flatten(sparse_predictions),\n flatten(sparse_labels), sum(num_positives))\n\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_loss += mean_loss * batch_size\n\n return {\n \"hit_at_one\": mean_hit_at_one,\n \"perr\": mean_perr,\n \"loss\": mean_loss\n }\n\n def get(self):\n \"\"\"Calculate the evaluation metrics for the whole epoch.\n\n Raises:\n ValueError: If no examples were accumulated.\n\n Returns:\n dictionary: a dictionary storing the evaluation metrics for the epoch. The\n dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and\n aps (default nan).\n \"\"\"\n if self.num_examples <= 0:\n raise ValueError(\"total_sample must be positive.\")\n avg_hit_at_one = self.sum_hit_at_one / self.num_examples\n avg_perr = self.sum_perr / self.num_examples\n avg_loss = self.sum_loss / self.num_examples\n\n aps = self.map_calculator.peek_map_at_n()\n gap = self.global_ap_calculator.peek_ap_at_n()\n\n epoch_info_dict = {}\n return {\n \"avg_hit_at_one\": avg_hit_at_one,\n \"avg_perr\": avg_perr,\n \"avg_loss\": avg_loss,\n \"aps\": aps,\n \"gap\": gap\n }\n\n def clear(self):\n \"\"\"Clear the evaluation metrics and reset the EvaluationMetrics object.\"\"\"\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import ParamAttr\nimport numpy as np\nimport math\n\nfrom bmn_utils import get_interp1d_mask\n\nDATATYPE = 'float32'\n\n\n# Net\nclass Conv1D(fluid.dygraph.Layer):\n def __init__(self,\n prefix,\n num_channels=256,\n num_filters=256,\n size_k=3,\n padding=1,\n groups=1,\n act=\"relu\"):\n super(Conv1D, self).__init__()\n fan_in = num_channels * size_k * 1\n k = 1. / math.sqrt(fan_in)\n param_attr = ParamAttr(\n name=prefix + \"_w\",\n initializer=fluid.initializer.Uniform(\n low=-k, high=k))\n bias_attr = ParamAttr(\n name=prefix + \"_b\",\n initializer=fluid.initializer.Uniform(\n low=-k, high=k))\n\n self._conv2d = fluid.dygraph.Conv2D(\n num_channels=num_channels,\n num_filters=num_filters,\n filter_size=(1, size_k),\n stride=1,\n padding=(0, padding),\n groups=groups,\n act=act,\n param_attr=param_attr,\n bias_attr=bias_attr)\n\n def forward(self, x):\n x = fluid.layers.unsqueeze(input=x, axes=[2])\n x = self._conv2d(x)\n x = fluid.layers.squeeze(input=x, axes=[2])\n return x\n\n\nclass BMN(fluid.dygraph.Layer):\n def __init__(self, cfg):\n super(BMN, self).__init__()\n\n #init config\n self.tscale = cfg.MODEL.tscale\n self.dscale = cfg.MODEL.dscale\n self.prop_boundary_ratio = cfg.MODEL.prop_boundary_ratio\n self.num_sample = cfg.MODEL.num_sample\n self.num_sample_perbin = cfg.MODEL.num_sample_perbin\n\n self.hidden_dim_1d = 256\n self.hidden_dim_2d = 128\n self.hidden_dim_3d = 512\n\n # Base Module\n self.b_conv1 = Conv1D(\n prefix=\"Base_1\",\n num_channels=400,\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n self.b_conv2 = Conv1D(\n prefix=\"Base_2\",\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n\n # Temporal Evaluation Module\n self.ts_conv1 = Conv1D(\n prefix=\"TEM_s1\",\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n self.ts_conv2 = Conv1D(\n prefix=\"TEM_s2\", num_filters=1, size_k=1, padding=0, act=\"sigmoid\")\n self.te_conv1 = Conv1D(\n prefix=\"TEM_e1\",\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n self.te_conv2 = Conv1D(\n prefix=\"TEM_e2\", num_filters=1, size_k=1, padding=0, act=\"sigmoid\")\n\n #Proposal Evaluation Module\n self.p_conv1 = Conv1D(\n prefix=\"PEM_1d\",\n num_filters=self.hidden_dim_2d,\n size_k=3,\n padding=1,\n act=\"relu\")\n\n # init to speed up\n sample_mask = get_interp1d_mask(self.tscale, self.dscale,\n self.prop_boundary_ratio,\n self.num_sample, self.num_sample_perbin)\n self.sample_mask = fluid.dygraph.base.to_variable(sample_mask)\n self.sample_mask.stop_gradient = True\n\n self.p_conv3d1 = fluid.dygraph.Conv3D(\n num_channels=128,\n num_filters=self.hidden_dim_3d,\n filter_size=(self.num_sample, 1, 1),\n stride=(self.num_sample, 1, 1),\n padding=0,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_3d1_w\"),\n bias_attr=ParamAttr(name=\"PEM_3d1_b\"))\n\n self.p_conv2d1 = fluid.dygraph.Conv2D(\n num_channels=512,\n num_filters=self.hidden_dim_2d,\n filter_size=1,\n stride=1,\n padding=0,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_2d1_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d1_b\"))\n self.p_conv2d2 = fluid.dygraph.Conv2D(\n num_channels=128,\n num_filters=self.hidden_dim_2d,\n filter_size=3,\n stride=1,\n padding=1,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_2d2_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d2_b\"))\n self.p_conv2d3 = fluid.dygraph.Conv2D(\n num_channels=128,\n num_filters=self.hidden_dim_2d,\n filter_size=3,\n stride=1,\n padding=1,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_2d3_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d3_b\"))\n self.p_conv2d4 = fluid.dygraph.Conv2D(\n num_channels=128,\n num_filters=2,\n filter_size=1,\n stride=1,\n padding=0,\n act=\"sigmoid\",\n param_attr=ParamAttr(name=\"PEM_2d4_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d4_b\"))\n\n def forward(self, x):\n #Base Module\n x = self.b_conv1(x)\n x = self.b_conv2(x)\n\n #TEM\n xs = self.ts_conv1(x)\n xs = self.ts_conv2(xs)\n xs = fluid.layers.squeeze(xs, axes=[1])\n xe = self.te_conv1(x)\n xe = self.te_conv2(xe)\n xe = fluid.layers.squeeze(xe, axes=[1])\n\n #PEM\n xp = self.p_conv1(x)\n #BM layer\n xp = fluid.layers.matmul(xp, self.sample_mask)\n xp = fluid.layers.reshape(\n xp, shape=[0, 0, -1, self.dscale, self.tscale])\n\n xp = self.p_conv3d1(xp)\n xp = fluid.layers.squeeze(xp, axes=[2])\n xp = self.p_conv2d1(xp)\n xp = self.p_conv2d2(xp)\n xp = self.p_conv2d3(xp)\n xp = self.p_conv2d4(xp)\n return xp, xs, xe\n\n\ndef bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end,\n cfg):\n def _get_mask(cfg):\n dscale = cfg.MODEL.dscale\n tscale = cfg.MODEL.tscale\n bm_mask = []\n for idx in range(dscale):\n mask_vector = [1 for i in range(tscale - idx)\n ] + [0 for i in range(idx)]\n bm_mask.append(mask_vector)\n bm_mask = np.array(bm_mask, dtype=np.float32)\n self_bm_mask = fluid.layers.create_global_var(\n shape=[dscale, tscale], value=0, dtype=DATATYPE, persistable=True)\n fluid.layers.assign(bm_mask, self_bm_mask)\n self_bm_mask.stop_gradient = True\n return self_bm_mask\n\n def tem_loss_func(pred_start, pred_end, gt_start, gt_end):\n def bi_loss(pred_score, gt_label):\n pred_score = fluid.layers.reshape(\n x=pred_score, shape=[-1], inplace=False)\n gt_label = fluid.layers.reshape(\n x=gt_label, shape=[-1], inplace=False)\n gt_label.stop_gradient = True\n pmask = fluid.layers.cast(x=(gt_label > 0.5), dtype=DATATYPE)\n num_entries = fluid.layers.cast(\n fluid.layers.shape(pmask), dtype=DATATYPE)\n num_positive = fluid.layers.cast(\n fluid.layers.reduce_sum(pmask), dtype=DATATYPE)\n ratio = num_entries / num_positive\n coef_0 = 0.5 * ratio / (ratio - 1)\n coef_1 = 0.5 * ratio\n epsilon = 0.000001\n temp = fluid.layers.log(pred_score + epsilon)\n loss_pos = fluid.layers.elementwise_mul(\n fluid.layers.log(pred_score + epsilon), pmask)\n loss_pos = coef_1 * fluid.layers.reduce_mean(loss_pos)\n loss_neg = fluid.layers.elementwise_mul(\n fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask))\n loss_neg = coef_0 * fluid.layers.reduce_mean(loss_neg)\n loss = -1 * (loss_pos + loss_neg)\n return loss\n\n loss_start = bi_loss(pred_start, gt_start)\n loss_end = bi_loss(pred_end, gt_end)\n loss = loss_start + loss_end\n return loss\n\n def pem_reg_loss_func(pred_score, gt_iou_map, mask):\n\n gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask)\n\n u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE)\n u_mmask = fluid.layers.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3)\n u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE)\n u_lmask = fluid.layers.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.)\n u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE)\n u_lmask = fluid.layers.elementwise_mul(u_lmask, mask)\n\n num_h = fluid.layers.cast(\n fluid.layers.reduce_sum(u_hmask), dtype=DATATYPE)\n num_m = fluid.layers.cast(\n fluid.layers.reduce_sum(u_mmask), dtype=DATATYPE)\n num_l = fluid.layers.cast(\n fluid.layers.reduce_sum(u_lmask), dtype=DATATYPE)\n\n r_m = num_h / num_m\n u_smmask = fluid.layers.uniform_random(\n shape=[gt_iou_map.shape[1], gt_iou_map.shape[2]],\n dtype=DATATYPE,\n min=0.0,\n max=1.0)\n u_smmask = fluid.layers.elementwise_mul(u_mmask, u_smmask)\n u_smmask = fluid.layers.cast(x=(u_smmask > (1. - r_m)), dtype=DATATYPE)\n\n r_l = num_h / num_l\n u_slmask = fluid.layers.uniform_random(\n shape=[gt_iou_map.shape[1], gt_iou_map.shape[2]],\n dtype=DATATYPE,\n min=0.0,\n max=1.0)\n u_slmask = fluid.layers.elementwise_mul(u_lmask, u_slmask)\n u_slmask = fluid.layers.cast(x=(u_slmask > (1. - r_l)), dtype=DATATYPE)\n\n weights = u_hmask + u_smmask + u_slmask\n weights.stop_gradient = True\n loss = fluid.layers.square_error_cost(pred_score, gt_iou_map)\n loss = fluid.layers.elementwise_mul(loss, weights)\n loss = 0.5 * fluid.layers.reduce_sum(loss) / fluid.layers.reduce_sum(\n weights)\n\n return loss\n\n def pem_cls_loss_func(pred_score, gt_iou_map, mask):\n gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask)\n gt_iou_map.stop_gradient = True\n pmask = fluid.layers.cast(x=(gt_iou_map > 0.9), dtype=DATATYPE)\n nmask = fluid.layers.cast(x=(gt_iou_map <= 0.9), dtype=DATATYPE)\n nmask = fluid.layers.elementwise_mul(nmask, mask)\n\n num_positive = fluid.layers.reduce_sum(pmask)\n num_entries = num_positive + fluid.layers.reduce_sum(nmask)\n ratio = num_entries / num_positive\n coef_0 = 0.5 * ratio / (ratio - 1)\n coef_1 = 0.5 * ratio\n epsilon = 0.000001\n loss_pos = fluid.layers.elementwise_mul(\n fluid.layers.log(pred_score + epsilon), pmask)\n loss_pos = coef_1 * fluid.layers.reduce_sum(loss_pos)\n loss_neg = fluid.layers.elementwise_mul(\n fluid.layers.log(1.0 - pred_score + epsilon), nmask)\n loss_neg = coef_0 * fluid.layers.reduce_sum(loss_neg)\n loss = -1 * (loss_pos + loss_neg) / num_entries\n return loss\n\n pred_bm_reg = fluid.layers.squeeze(\n fluid.layers.slice(\n pred_bm, axes=[1], starts=[0], ends=[1]), axes=[1])\n pred_bm_cls = fluid.layers.squeeze(\n fluid.layers.slice(\n pred_bm, axes=[1], starts=[1], ends=[2]), axes=[1])\n\n bm_mask = _get_mask(cfg)\n\n pem_reg_loss = pem_reg_loss_func(pred_bm_reg, gt_iou_map, bm_mask)\n pem_cls_loss = pem_cls_loss_func(pred_bm_cls, gt_iou_map, bm_mask)\n\n tem_loss = tem_loss_func(pred_start, pred_end, gt_start, gt_end)\n\n loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss\n return loss, tem_loss, pem_reg_loss, pem_cls_loss\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport math\nimport time\nimport argparse\nimport functools\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport models\nimport reader\nfrom utility import add_arguments, print_arguments, check_cuda\nfrom utility import fmt_time, recall_topk\n\n# yapf: disable\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\nadd_arg('model', str, \"ResNet50\", \"Set the network to use.\")\nadd_arg('embedding_size', int, 0, \"Embedding size.\")\nadd_arg('batch_size', int, 10, \"Minibatch size.\")\nadd_arg('image_shape', str, \"3,224,224\", \"Input image size.\")\nadd_arg('use_gpu', bool, True, \"Whether to use GPU or not.\")\nadd_arg('pretrained_model', str, None, \"Whether to use pretrained model.\")\n# yapf: enable\n\nmodel_list = [m for m in dir(models) if \"__\" not in m]\n\n\ndef eval(args):\n # parameters from arguments\n model_name = args.model\n pretrained_model = args.pretrained_model\n image_shape = [int(m) for m in args.image_shape.split(\",\")]\n\n assert model_name in model_list, \"{} is not in lists: {}\".format(args.model,\n model_list)\n\n image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')\n label = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n # model definition\n model = models.__dict__[model_name]()\n out = model.net(input=image, embedding_size=args.embedding_size)\n\n test_program = fluid.default_main_program().clone(for_test=True)\n\n place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n if pretrained_model:\n\n def if_exist(var):\n return os.path.exists(os.path.join(pretrained_model, var.name))\n\n fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)\n\n test_reader = paddle.batch(reader.test(args), batch_size=args.batch_size, drop_last=False)\n feeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n fetch_list = [out.name]\n\n f, l = [], []\n for batch_id, data in enumerate(test_reader()):\n t1 = time.time()\n [feas] = exe.run(test_program, fetch_list=fetch_list, feed=feeder.feed(data))\n label = np.asarray([x[1] for x in data])\n f.append(feas)\n l.append(label)\n\n t2 = time.time()\n period = t2 - t1\n if batch_id % 20 == 0:\n print(\"[%s] testbatch %d, time %2.2f sec\" % \\\n (fmt_time(), batch_id, period))\n\n f = np.vstack(f)\n l = np.hstack(l)\n recall = recall_topk(f, l, k=1)\n print(\"[%s] End test %d, test_recall %.5f\" % (fmt_time(), len(f), recall))\n sys.stdout.flush()\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n check_cuda(args.use_gpu)\n eval(args)\n\n\nif __name__ == '__main__':\n main()\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fine-tuning on regression/classification tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport sys\nif six.PY2:\n reload(sys)\n sys.setdefaultencoding('utf8')\n\nimport os\nimport time\nimport json\nimport argparse\nimport numpy as np\nimport subprocess\nimport multiprocessing\nfrom scipy.stats import pearsonr\n\nimport paddle\nimport paddle.fluid as fluid\n\nimport reader.cls as reader\nfrom model.xlnet import XLNetConfig\nfrom model.classifier import create_model\nfrom optimization import optimization\nfrom utils.args import ArgumentGroup, print_arguments, check_cuda\nfrom utils.init import init_pretraining_params, init_checkpoint\nfrom utils.cards import get_cards\n\nnum_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))\n\n# yapf: disable\nparser = argparse.ArgumentParser(__doc__)\nmodel_g = ArgumentGroup(parser, \"model\", \"model configuration and paths.\")\nmodel_g.add_arg(\"model_config_path\", str, None, \"Path to the json file for bert model config.\")\nmodel_g.add_arg(\"dropout\", float, 0.1, \"Dropout rate.\")\nmodel_g.add_arg(\"dropatt\", float, 0.1, \"Attention dropout rate.\")\nmodel_g.add_arg(\"clamp_len\", int, -1, \"Clamp length.\")\nmodel_g.add_arg(\"summary_type\", str, \"last\",\n \"Method used to summarize a sequence into a vector.\", choices=['last'])\nmodel_g.add_arg(\"use_summ_proj\", bool, True,\n \"Whether to use projection for summarizing sequences.\")\nmodel_g.add_arg(\"spiece_model_file\", str, None, \"Sentence Piece model path.\")\nmodel_g.add_arg(\"init_checkpoint\", str, None, \"Init checkpoint to resume training from.\")\nmodel_g.add_arg(\"init_pretraining_params\", str, None,\n \"Init pre-training params which preforms fine-tuning from. If the \"\n \"arg 'init_checkpoint' has been set, this argument wouldn't be valid.\")\nmodel_g.add_arg(\"checkpoints\", str, \"checkpoints\", \"Path to save checkpoints.\")\n\ninit_g = ArgumentGroup(parser, \"init\", \"parameter initialization options.\")\ninit_g.add_arg(\"init\", str, \"normal\", \"Initialization method.\", choices=[\"normal\", \"uniform\"])\ninit_g.add_arg(\"init_std\", str, 0.02, \"Initialization std when init is normal.\")\ninit_g.add_arg(\"init_range\", str, 0.1, \"Initialization std when init is uniform.\")\n\ntrain_g = ArgumentGroup(parser, \"training\", \"training options.\")\ntrain_g.add_arg(\"epoch\", int, 1000, \"Number of epoches for fine-tuning.\")\ntrain_g.add_arg(\"learning_rate\", float, 5e-5, \"Learning rate used to train with warmup.\")\ntrain_g.add_arg(\"lr_scheduler\", str, \"linear_warmup_decay\",\n \"scheduler of learning rate.\", choices=['linear_warmup_decay', 'noam_decay'])\ntrain_g.add_arg(\"weight_decay\", float, 0.01, \"Weight decay rate for L2 regularizer.\")\ntrain_g.add_arg(\"lr_layer_decay_rate\", float, 1.0, \"Top layer: lr[L] = args.learning_rate. \"\n \"Lower layers: lr[l-1] = lr[l] * lr_layer_decay_rate.\")\ntrain_g.add_arg(\"save_steps\", int, 10000, \"The steps interval to save checkpoints.\")\ntrain_g.add_arg(\"train_batch_size\", int, 8, \"Total examples' number in batch for training.\")\ntrain_g.add_arg(\"eval_batch_size\", int, 128, \"Total examples' number in batch for development.\")\ntrain_g.add_arg(\"predict_batch_size\", int, 128, \"Total examples' number in batch for prediction.\")\ntrain_g.add_arg(\"train_steps\", int, 1000, \"The total steps for training.\")\ntrain_g.add_arg(\"warmup_steps\", int, 1000, \"The steps for warmup.\")\ntrain_g.add_arg(\"validation_steps\", int, 1000, \"The steps interval to evaluate model performance.\")\n\nlog_g = ArgumentGroup(parser, \"logging\", \"logging related.\")\nlog_g.add_arg(\"skip_steps\", int, 10, \"The steps interval to print loss.\")\nlog_g.add_arg(\"verbose\", bool, False, \"Whether to output verbose log.\")\n\ndata_g = ArgumentGroup(parser, \"data\", \"Data paths, vocab paths and data processing options\")\ndata_g.add_arg(\"data_dir\", str, None, \"Path to training data.\")\ndata_g.add_arg(\"predict_dir\", str, None, \"Path to write predict results.\")\ndata_g.add_arg(\"predict_threshold\", float, 0.0, \"Threshold for binary prediction.\")\ndata_g.add_arg(\"max_seq_length\", int, 512, \"Number of words of the longest seqence.\")\ndata_g.add_arg(\"uncased\", bool, True,\n \"Whether to lower case the input text. Should be True for uncased models and False for cased models.\")\ndata_g.add_arg(\"random_seed\", int, 0, \"Random seed.\")\n\nrun_type_g = ArgumentGroup(parser, \"run_type\", \"running type options.\")\nrun_type_g.add_arg(\"use_cuda\", bool, True, \"If set, use GPU for training.\")\nrun_type_g.add_arg(\"use_fast_executor\", bool, False, \"If set, use fast parallel executor (in experiment).\")\nrun_type_g.add_arg(\"shuffle\", bool, True, \"\")\nrun_type_g.add_arg(\"task_name\", str, None,\n \"The name of task to perform fine-tuning, should be in {'xnli', 'mnli', 'cola', 'mrpc'}.\")\nrun_type_g.add_arg(\"is_regression\", str, None, \"Whether it's a regression task.\")\nrun_type_g.add_arg(\"do_train\", bool, True, \"Whether to perform training.\")\nrun_type_g.add_arg(\"do_eval\", bool, True, \"Whether to perform evaluation on dev data set.\")\nrun_type_g.add_arg(\"do_predict\", bool, True, \"Whether to perform evaluation on test data set.\")\nrun_type_g.add_arg(\"eval_split\", str, \"dev\", \"Could be dev or test\")\n\nparser.add_argument(\"--enable_ce\", action='store_true', help=\"The flag indicating whether to run the task for continuous evaluation.\")\n\nargs = parser.parse_args()\n# yapf: enable.\n\n\ndef evaluate(exe, predict_program, test_data_loader, fetch_list, eval_phase, num_examples):\n test_data_loader.start()\n total_cost, total_num_seqs = [], []\n all_logits, all_labels = [], []\n time_begin = time.time()\n total_steps = int(num_examples / args.eval_batch_size)\n steps = 0\n while True:\n try:\n np_loss, np_num_seqs, np_logits, np_labels = exe.run(program=predict_program,\n fetch_list=fetch_list)\n total_cost.extend(np_loss * np_num_seqs)\n total_num_seqs.extend(np_num_seqs)\n all_logits.extend(np_logits)\n all_labels.extend(np_labels)\n if steps % (int(total_steps / 10)) == 0:\n print(\"Evaluation [{}/{}]\".format(steps, total_steps))\n steps += 1\n except fluid.core.EOFException:\n test_data_loader.reset()\n break\n all_logits = np.array(all_logits)\n all_labels = np.array(all_labels)\n if args.is_regression:\n key = \"eval_pearsonr\"\n eval_result, _ = pearsonr(all_logits, all_labels)\n else:\n key = \"eval_accuracy\"\n pred = np.argmax(all_logits, axis=1).reshape(all_labels.shape)\n eval_result = np.sum(pred == all_labels) / float(all_labels.size)\n time_end = time.time()\n print(\"[%s evaluation] ave loss: %f, %s: %f, elapsed time: %f s\" %\n (eval_phase, np.sum(total_cost) / np.sum(total_num_seqs), key, eval_result,\n time_end - time_begin))\n\ndef predict(exe, predict_program, test_data_loader, task_name, label_list, fetch_list):\n test_data_loader.start()\n pred_cnt = 0\n predict_results = []\n with open(os.path.join(args.predict_dir, \"{}.tsv\".format(\n task_name)), \"w\") as fout:\n fout.write(\"index\\tprediction\\n\")\n while True:\n try:\n np_logits = exe.run(program=predict_program,\n fetch_list=fetch_list)\n for result in np_logits[0]:\n if pred_cnt % 1000 == 0:\n print(\"Predicting submission for example: {}\".format(\n pred_cnt))\n\n logits = [float(x) for x in result.flat]\n predict_results.append(logits)\n\n if len(logits) == 1:\n label_out = logits[0]\n elif len(logits) == 2:\n if logits[1] - logits[0] > args.predict_threshold:\n label_out = label_list[1]\n else:\n label_out = label_list[0]\n elif len(logits) > 2:\n max_index = np.argmax(np.array(logits, dtype=np.float32))\n label_out = label_list[max_index]\n else:\n raise NotImplementedError\n\n fout.write(\"{}\\t{}\\n\".format(pred_cnt, label_out))\n pred_cnt += 1\n\n except fluid.core.EOFException:\n test_data_loader.reset()\n break\n\n predict_json_path = os.path.join(args.predict_dir, \"{}.logits.json\".format(\n task_name))\n\n with open(predict_json_path, \"w\") as fp:\n json.dump(predict_results, fp, indent=4)\n\ndef get_device_num():\n # NOTE(zcd): for multi-processe training, each process use one GPU card.\n if num_trainers > 1 : return 1\n visible_device = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if visible_device:\n device_num = len(visible_device.split(','))\n else:\n device_num = subprocess.check_output(['nvidia-smi','-L']).decode().count('\\n')\n return device_num\n\ndef main(args):\n if not (args.do_train or args.do_eval or args.do_predict):\n raise ValueError(\"For args `do_train`, `do_eval` and `do_predict`, at \"\n \"least one of them must be True.\")\n if args.do_predict and not args.predict_dir:\n raise ValueError(\"args 'predict_dir' should be given when doing predict\")\n\n if not os.path.exists(args.predict_dir):\n os.makedirs(args.predict_dir)\n\n xlnet_config = XLNetConfig(args.model_config_path)\n xlnet_config.print_config()\n\n if args.use_cuda:\n place = fluid.CUDAPlace(int(os.getenv('FLAGS_selected_gpus', '0')))\n dev_count = get_device_num()\n else:\n place = fluid.CPUPlace()\n dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n exe = fluid.Executor(place)\n\n task_name = args.task_name.lower()\n processors = {\n \"mnli_matched\": reader.MnliMatchedProcessor,\n \"mnli_mismatched\": reader.MnliMismatchedProcessor,\n 'sts-b': reader.StsbProcessor,\n 'imdb': reader.ImdbProcessor,\n \"yelp5\": reader.Yelp5Processor\n }\n\n processor = processors[task_name](args)\n\n label_list = processor.get_labels() if not args.is_regression else None\n num_labels = len(label_list) if label_list is not None else None\n train_program = fluid.Program()\n startup_prog = fluid.Program()\n if args.random_seed is not None:\n startup_prog.random_seed = args.random_seed\n train_program.random_seed = args.random_seed\n\n if args.do_train:\n # NOTE: If num_trainers > 1, the shuffle_seed must be set, because\n # the order of batch data generated by reader\n # must be the same in the respective processes.\n shuffle_seed = 1 if num_trainers > 1 else None\n train_data_generator = processor.data_generator(\n batch_size=args.train_batch_size,\n is_regression=args.is_regression,\n phase='train',\n epoch=args.epoch,\n dev_count=dev_count,\n shuffle=args.shuffle)\n\n num_train_examples = processor.get_num_examples(phase='train')\n print(\"Device count: %d\" % dev_count)\n print(\"Max num of epoches: %d\" % args.epoch)\n print(\"Num of train examples: %d\" % num_train_examples)\n print(\"Num of train steps: %d\" % args.train_steps)\n print(\"Num of warmup steps: %d\" % args.warmup_steps)\n\n with fluid.program_guard(train_program, startup_prog):\n with fluid.unique_name.guard():\n train_data_loader, loss, logits, num_seqs, label_ids = create_model(\n args,\n xlnet_config=xlnet_config,\n n_class=num_labels)\n scheduled_lr = optimization(\n loss=loss,\n warmup_steps=args.warmup_steps,\n num_train_steps=args.train_steps,\n learning_rate=args.learning_rate,\n train_program=train_program,\n startup_prog=startup_prog,\n weight_decay=args.weight_decay,\n lr_layer_decay_rate=args.lr_layer_decay_rate,\n scheduler=args.lr_scheduler)\n\n if args.do_eval:\n dev_prog = fluid.Program()\n with fluid.program_guard(dev_prog, startup_prog):\n with fluid.unique_name.guard():\n dev_data_loader, loss, logits, num_seqs, label_ids = create_model(\n args,\n xlnet_config=xlnet_config,\n n_class=num_labels)\n\n dev_prog = dev_prog.clone(for_test=True)\n dev_data_loader.set_batch_generator(\n processor.data_generator(\n batch_size=args.eval_batch_size,\n is_regression=args.is_regression,\n phase=args.eval_split,\n epoch=1,\n dev_count=1,\n shuffle=False), place)\n\n if args.do_predict:\n predict_prog = fluid.Program()\n with fluid.program_guard(predict_prog, startup_prog):\n with fluid.unique_name.guard():\n predict_data_loader, loss, logits, num_seqs, label_ids = create_model(\n args,\n xlnet_config=xlnet_config,\n n_class=num_labels)\n\n predict_prog = predict_prog.clone(for_test=True)\n predict_data_loader.set_batch_generator(\n processor.data_generator(\n batch_size=args.predict_batch_size,\n is_regression=args.is_regression,\n phase=args.eval_split,\n epoch=1,\n dev_count=1,\n shuffle=False), place)\n\n exe.run(startup_prog)\n\n if args.do_train:\n if args.init_checkpoint and args.init_pretraining_params:\n print(\n \"WARNING: args 'init_checkpoint' and 'init_pretraining_params' \"\n \"both are set! Only arg 'init_checkpoint' is made valid.\")\n if args.init_checkpoint:\n init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=startup_prog)\n elif args.init_pretraining_params:\n init_pretraining_params(\n exe,\n args.init_pretraining_params,\n main_program=startup_prog)\n elif args.do_eval or args.do_predict:\n if not args.init_checkpoint:\n raise ValueError(\"args 'init_checkpoint' should be set if\"\n \"only doing validation or testing!\")\n init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=startup_prog)\n\n if args.do_train:\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.use_experimental_executor = args.use_fast_executor\n exec_strategy.num_threads = dev_count\n build_strategy = fluid.BuildStrategy()\n\n if args.use_cuda and num_trainers > 1:\n assert shuffle_seed is not None\n dist_utils.prepare_for_multi_process(exe, build_strategy, train_program)\n train_data_generator = fluid.contrib.reader.distributed_batch_reader(\n train_data_generator)\n\n train_compiled_program = fluid.CompiledProgram(train_program).with_data_parallel(\n loss_name=loss.name, build_strategy=build_strategy)\n\n train_data_loader.set_batch_generator(train_data_generator, place)\n\n\n if args.do_train:\n train_data_loader.start()\n steps = 0\n total_cost, total_num_seqs, total_time = [], [], 0.0\n throughput = []\n ce_info = []\n while steps < args.train_steps:\n try:\n time_begin = time.time()\n steps += 1\n if steps % args.skip_steps == 0:\n fetch_list = [loss.name, scheduled_lr.name, num_seqs.name]\n else:\n fetch_list = []\n\n outputs = exe.run(train_compiled_program, fetch_list=fetch_list)\n\n time_end = time.time()\n used_time = time_end - time_begin\n total_time += used_time\n\n if steps % args.skip_steps == 0:\n np_loss, np_lr, np_num_seqs = outputs\n\n total_cost.extend(np_loss * np_num_seqs)\n total_num_seqs.extend(np_num_seqs)\n\n if args.verbose:\n verbose = \"train data_loader queue size: %d, \" % train_data_loader.queue.size(\n )\n verbose += \"learning rate: %f\" % np_lr[0]\n print(verbose)\n\n current_example, current_epoch = processor.get_train_progress(\n )\n\n log_record = \"epoch: {}, progress: {}/{}, step: {}, ave loss: {}\".format(\n current_epoch, current_example, num_train_examples,\n steps, np.sum(total_cost) / np.sum(total_num_seqs))\n ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), used_time])\n if steps > 0 :\n throughput.append( args.skip_steps / total_time)\n log_record = log_record + \", speed: %f steps/s\" % (args.skip_steps / total_time)\n print(log_record)\n else:\n print(log_record)\n total_cost, total_num_seqs, total_time = [], [], 0.0\n\n if steps % args.save_steps == 0:\n save_path = os.path.join(args.checkpoints,\n \"step_\" + str(steps))\n fluid.io.save_persistables(exe, save_path, train_program)\n\n if steps % args.validation_steps == 0:\n print(\"Average throughtput: %s\" % (np.average(throughput)))\n throughput = []\n # evaluate dev set\n if args.do_eval:\n evaluate(exe, dev_prog, dev_data_loader,\n [loss.name, num_seqs.name, logits.name, label_ids.name],\n args.eval_split, processor.get_num_examples(phase=args.eval_split))\n except fluid.core.EOFException:\n save_path = os.path.join(args.checkpoints, \"step_\" + str(steps))\n fluid.io.save_persistables(exe, save_path, train_program)\n train_data_loader.reset()\n break\n if args.enable_ce:\n card_num = get_cards()\n ce_cost = 0\n ce_time = 0\n try:\n ce_cost = ce_info[-2][0]\n ce_time = ce_info[-2][1]\n except:\n print(\"ce info error\")\n print(\"kpis\\ttrain_duration_%s_card%s\\t%s\" %\n (args.task_name.replace(\"-\", \"_\"), card_num, ce_time))\n print(\"kpis\\ttrain_cost_%s_card%s\\t%f\" %\n (args.task_name.replace(\"-\", \"_\"), card_num, ce_cost))\n\n\n # final eval on dev set\n if args.do_eval:\n evaluate(exe, dev_prog, dev_data_loader,\n [loss.name, num_seqs.name, logits.name, label_ids], args.eval_split,\n processor.get_num_examples(phase=args.eval_split))\n\n # final eval on test set\n if args.do_predict:\n predict(exe, predict_prog, predict_data_loader, task_name, label_list, [logits.name])\n\n\nif __name__ == '__main__':\n print_arguments(args)\n check_cuda(args.use_cuda)\n main(args)\n", "import argparse\nimport sys\nimport time\nimport math\nimport unittest\nimport contextlib\nimport numpy as np\nimport six\nimport paddle.fluid as fluid\nimport paddle\n\nimport utils\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"gru4rec benchmark.\")\n parser.add_argument(\n '--test_dir', type=str, default='test_data', help='test file address')\n parser.add_argument(\n '--start_index', type=int, default='1', help='start index')\n parser.add_argument(\n '--last_index', type=int, default='10', help='end index')\n parser.add_argument(\n '--model_dir', type=str, default='model_recall20', help='model dir')\n parser.add_argument(\n '--use_cuda', type=int, default='0', help='whether use cuda')\n parser.add_argument(\n '--batch_size', type=int, default='5', help='batch_size')\n parser.add_argument(\n '--vocab_path', type=str, default='vocab.txt', help='vocab file')\n args = parser.parse_args()\n return args\n\n\ndef infer(test_reader, use_cuda, model_path):\n \"\"\" inference function \"\"\"\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n with fluid.scope_guard(fluid.Scope()):\n infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(\n model_path, exe)\n accum_num_recall = 0.0\n accum_num_sum = 0.0\n t0 = time.time()\n step_id = 0\n for data in test_reader():\n step_id += 1\n src_wordseq = utils.to_lodtensor([dat[0] for dat in data], place)\n label_data = [dat[1] for dat in data]\n dst_wordseq = utils.to_lodtensor(label_data, place)\n para = exe.run(\n infer_program,\n feed={\"src_wordseq\": src_wordseq,\n \"dst_wordseq\": dst_wordseq},\n fetch_list=fetch_vars,\n return_numpy=False)\n\n acc_ = para[1]._get_float_element(0)\n data_length = len(\n np.concatenate(\n label_data, axis=0).astype(\"int64\"))\n accum_num_sum += (data_length)\n accum_num_recall += (data_length * acc_)\n if step_id % 1 == 0:\n print(\"step:%d recall@20:%.4f\" %\n (step_id, accum_num_recall / accum_num_sum))\n t1 = time.time()\n print(\"model:%s recall@20:%.3f time_cost(s):%.2f\" %\n (model_path, accum_num_recall / accum_num_sum, t1 - t0))\n\n\nif __name__ == \"__main__\":\n utils.check_version()\n args = parse_args()\n start_index = args.start_index\n last_index = args.last_index\n test_dir = args.test_dir\n model_dir = args.model_dir\n batch_size = args.batch_size\n vocab_path = args.vocab_path\n use_cuda = True if args.use_cuda else False\n print(\"start index: \", start_index, \" last_index:\", last_index)\n vocab_size, test_reader = utils.prepare_data(\n test_dir,\n vocab_path,\n batch_size=batch_size,\n buffer_size=1000,\n word_freq_threshold=0,\n is_train=False)\n\n for epoch in range(start_index, last_index + 1):\n epoch_path = model_dir + \"/epoch_\" + str(epoch)\n infer(test_reader=test_reader, use_cuda=use_cuda, model_path=epoch_path)\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport numpy as np\nimport paddle.fluid as fluid\n\n\ndef log_softmax(logits, axis=-1):\n logsoftmax = logits - fluid.layers.log(\n fluid.layers.reduce_sum(fluid.layers.exp(logits), axis))\n return logsoftmax\n\n\ndef einsum4x4(equation, x, y):\n idx_x, idx_y, idx_z = re.split(\",|->\", equation)\n repeated_idx = list(set(idx_x + idx_y) - set(idx_z))\n\n unique_idx_x = list(set(idx_x) - set(idx_y))\n unique_idx_y = list(set(idx_y) - set(idx_x))\n common_idx = list(set(idx_x) & set(idx_y) - set(repeated_idx))\n\n new_idx_x = common_idx + unique_idx_x + repeated_idx\n new_idx_y = common_idx + unique_idx_y + repeated_idx\n new_idx_z = common_idx + unique_idx_x + unique_idx_y\n\n perm_x = [idx_x.index(i) for i in new_idx_x]\n perm_y = [idx_y.index(i) for i in new_idx_y]\n perm_z = [new_idx_z.index(i) for i in idx_z]\n\n x = fluid.layers.transpose(x, perm=perm_x)\n y = fluid.layers.transpose(y, perm=perm_y)\n z = fluid.layers.matmul(x=x, y=y, transpose_y=True)\n z = fluid.layers.transpose(z, perm=perm_z)\n return z\n\n\ndef positional_embedding(pos_seq, inv_freq, bsz=None):\n pos_seq = fluid.layers.reshape(pos_seq, [-1, 1])\n inv_freq = fluid.layers.reshape(inv_freq, [1, -1])\n sinusoid_inp = fluid.layers.matmul(pos_seq, inv_freq)\n pos_emb = fluid.layers.concat(\n input=[fluid.layers.sin(sinusoid_inp), fluid.layers.cos(sinusoid_inp)],\n axis=-1)\n pos_emb = fluid.layers.unsqueeze(pos_emb, [1])\n if bsz is not None:\n pos_emb = fluid.layers.expand(pos_emb, [1, bsz, 1])\n\n return pos_emb\n\n\ndef positionwise_ffn(inp,\n d_model,\n d_inner,\n dropout_prob,\n param_initializer=None,\n act_type='relu',\n name='ff'):\n \"\"\"Position-wise Feed-forward Network.\"\"\"\n if act_type not in ['relu', 'gelu']:\n raise ValueError('Unsupported activation type {}'.format(act_type))\n\n output = fluid.layers.fc(input=inp,\n size=d_inner,\n act=act_type,\n num_flatten_dims=2,\n param_attr=fluid.ParamAttr(\n name=name + '_layer_1_weight',\n initializer=param_initializer),\n bias_attr=name + '_layer_1_bias')\n output = fluid.layers.dropout(\n output,\n dropout_prob=dropout_prob,\n dropout_implementation=\"upscale_in_train\",\n is_test=False)\n output = fluid.layers.fc(output,\n size=d_model,\n num_flatten_dims=2,\n param_attr=fluid.ParamAttr(\n name=name + '_layer_2_weight',\n initializer=param_initializer),\n bias_attr=name + '_layer_2_bias')\n output = fluid.layers.dropout(\n output,\n dropout_prob=dropout_prob,\n dropout_implementation=\"upscale_in_train\",\n is_test=False)\n output = fluid.layers.layer_norm(\n output + inp,\n begin_norm_axis=len(output.shape) - 1,\n epsilon=1e-12,\n param_attr=fluid.ParamAttr(\n name=name + '_layer_norm_scale',\n initializer=fluid.initializer.Constant(1.)),\n bias_attr=fluid.ParamAttr(\n name + '_layer_norm_bias',\n initializer=fluid.initializer.Constant(0.)))\n return output\n\n\ndef head_projection(h, d_model, n_head, d_head, param_initializer, name=''):\n \"\"\"Project hidden states to a specific head with a 4D-shape.\"\"\"\n proj_weight = fluid.layers.create_parameter(\n shape=[d_model, n_head, d_head],\n dtype=h.dtype,\n attr=fluid.ParamAttr(\n name=name + '_weight', initializer=param_initializer),\n is_bias=False)\n # ibh,hnd->ibnd \n head = fluid.layers.mul(x=h,\n y=proj_weight,\n x_num_col_dims=2,\n y_num_col_dims=1)\n return head\n\n\ndef post_attention(h,\n attn_vec,\n d_model,\n n_head,\n d_head,\n dropout,\n param_initializer,\n residual=True,\n name=''):\n \"\"\"Post-attention processing.\"\"\"\n # post-attention projection (back to `d_model`)\n proj_o = fluid.layers.create_parameter(\n shape=[d_model, n_head, d_head],\n dtype=h.dtype,\n attr=fluid.ParamAttr(\n name=name + '_o_weight', initializer=param_initializer),\n is_bias=False)\n # ibnd,hnd->ibh\n proj_o = fluid.layers.transpose(proj_o, perm=[1, 2, 0])\n attn_out = fluid.layers.mul(x=attn_vec,\n y=proj_o,\n x_num_col_dims=2,\n y_num_col_dims=2)\n\n attn_out = fluid.layers.dropout(\n attn_out,\n dropout_prob=dropout,\n dropout_implementation=\"upscale_in_train\",\n is_test=False)\n\n if residual:\n output = fluid.layers.layer_norm(\n attn_out + h,\n begin_norm_axis=len(attn_out.shape) - 1,\n epsilon=1e-12,\n param_attr=fluid.ParamAttr(\n name=name + '_layer_norm_scale',\n initializer=fluid.initializer.Constant(1.)),\n bias_attr=fluid.ParamAttr(\n name + '_layer_norm_bias',\n initializer=fluid.initializer.Constant(0.)))\n else:\n output = fluid.layers.layer_norm(\n attn_out,\n begin_norm_axis=len(attn_out.shape) - 1,\n epsilon=1e-12,\n param_attr=fluid.ParamAttr(\n name=name + '_layer_norm_scale',\n initializer=fluid.initializer.Constant(1.)),\n bias_attr=fluid.ParamAttr(\n name + '_layer_norm_bias',\n initializer=fluid.initializer.Constant(0.)))\n\n return output\n\n\ndef abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, scale):\n \"\"\"Core absolute positional attention operations.\"\"\"\n\n attn_score = einsum4x4('ibnd,jbnd->ijbn', q_head, k_head)\n\n attn_score *= scale\n if attn_mask is not None:\n attn_score = attn_score - 1e30 * attn_mask\n\n # attention probability\n attn_prob = fluid.layers.softmax(attn_score, axis=1)\n attn_prob = fluid.layers.dropout(\n attn_prob,\n dropout_prob=dropatt,\n dropout_implementation=\"upscale_in_train\",\n is_test=False)\n\n # attention output\n attn_vec = einsum4x4('ijbn,jbnd->ibnd', attn_prob, v_head)\n\n return attn_vec\n\n\ndef rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,\n r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, scale,\n name):\n \"\"\"Core relative positional attention operations.\"\"\"\n ## content based attention score\n ac = einsum4x4('ibnd,jbnd->ijbn',\n fluid.layers.elementwise_add(q_head, r_w_bias, 2), k_head_h)\n\n # position based attention score\n bd = einsum4x4('ibnd,jbnd->ijbn',\n fluid.layers.elementwise_add(q_head, r_r_bias, 2), k_head_r)\n\n bd = rel_shift(bd, klen=ac.shape[1])\n\n # segment based attention score\n if seg_mat is None:\n ef = 0\n else:\n seg_embed = fluid.layers.stack([seg_embed] * q_head.shape[0], axis=0)\n\n ef = einsum4x4('ibnd,isnd->ibns',\n fluid.layers.elementwise_add(q_head, r_s_bias, 2),\n seg_embed)\n ef = einsum4x4('ijbs,ibns->ijbn', seg_mat, ef)\n\n attn_score = (ac + bd + ef) * scale\n\n if attn_mask is not None:\n # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask\n attn_score = attn_score - 1e30 * attn_mask\n\n # attention probability\n attn_prob = fluid.layers.softmax(attn_score, axis=1)\n attn_prob = fluid.layers.dropout(\n attn_prob, dropatt, dropout_implementation=\"upscale_in_train\")\n\n # attention output\n attn_vec = einsum4x4('ijbn,jbnd->ibnd', attn_prob, v_head_h)\n return attn_vec\n\n\ndef rel_shift(x, klen=-1):\n \"\"\"perform relative shift to form the relative attention score.\"\"\"\n x_size = x.shape\n x = fluid.layers.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])\n x = fluid.layers.slice(x, axes=[0], starts=[1], ends=[x_size[1]])\n x = fluid.layers.reshape(x,\n [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])\n x = fluid.layers.slice(x, axes=[1], starts=[0], ends=[klen])\n\n return x\n\n\ndef _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None):\n \"\"\"cache hidden states into memory.\"\"\"\n if mem_len is None or mem_len == 0:\n return None\n else:\n if reuse_len is not None and reuse_len > 0:\n curr_out = curr_out[:reuse_len]\n\n if prev_mem is None:\n new_mem = curr_out[-mem_len:]\n else:\n new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:]\n\n new_mem.stop_gradient = True\n return new_mem\n\n\ndef relative_positional_encoding(qlen,\n klen,\n d_model,\n clamp_len,\n attn_type,\n bi_data,\n bsz=None,\n dtype=None):\n \"\"\"create relative positional encoding.\"\"\"\n freq_seq = fluid.layers.range(0, d_model, 2.0, 'float32')\n if dtype is not None and dtype != 'float32':\n freq_seq = tf.cast(freq_seq, dtype=dtype)\n inv_freq = 1 / (10000**(freq_seq / d_model))\n\n if attn_type == 'bi':\n beg, end = klen, -qlen\n elif attn_type == 'uni':\n beg, end = klen, -1\n else:\n raise ValueError('Unknown `attn_type` {}.'.format(attn_type))\n\n if bi_data:\n fwd_pos_seq = fluid.layers.range(beg, end, -1.0, 'float32')\n bwd_pos_seq = fluid.layers.range(-beg, -end, 1.0, 'float32')\n\n if dtype is not None and dtype != 'float32':\n fwd_pos_seq = fluid.layers.cast(fwd_pos_seq, dtype='float32')\n bwd_pos_seq = fluid.layers.cast(bwd_pos_seq, dtype='float32')\n\n if clamp_len > 0:\n fwd_pos_seq = fluid.layers.clip(fwd_pos_seq, -clamp_len, clamp_len)\n bwd_pos_seq = fluid.layers.clip(bwd_pos_seq, -clamp_len, clamp_len)\n\n if bsz is not None:\n # With bi_data, the batch size should be divisible by 2.\n assert bsz % 2 == 0\n fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)\n bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)\n else:\n fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq)\n bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq)\n\n pos_emb = fluid.layers.concat([fwd_pos_emb, bwd_pos_emb], axis=1)\n else:\n fwd_pos_seq = fluid.layers.range(beg, end, -1.0, 'float32')\n if dtype is not None and dtype != 'float32':\n fwd_pos_seq = fluid.layers.cast(fwd_pos_seq, dtype=dtype)\n if clamp_len > 0:\n fwd_pos_seq = fluid.layers.clip(fwd_pos_seq, -clamp_len, clamp_len)\n pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz)\n fluid.layers.reshape(pos_emb, [2 * qlen, -1, d_model], inplace=True)\n return pos_emb\n\n\ndef multihead_attn(q,\n k,\n v,\n attn_mask,\n d_model,\n n_head,\n d_head,\n dropout,\n dropatt,\n is_training,\n kernel_initializer,\n residual=True,\n scope='abs_attn',\n reuse=None):\n \"\"\"Standard multi-head attention with absolute positional embedding.\"\"\"\n\n scale = 1 / (d_head**0.5)\n with tf.variable_scope(scope, reuse=reuse):\n # attention heads\n q_head_h = head_projection(\n h, d_model, n_head, d_head, initializer, name=name + '_rel_attn_q')\n\n q_head = head_projection(q, d_model, n_head, d_head, kernel_initializer,\n 'q')\n k_head = head_projection(k, d_model, n_head, d_head, kernel_initializer,\n 'k')\n v_head = head_projection(v, d_model, n_head, d_head, kernel_initializer,\n 'v')\n\n # attention vector\n attn_vec = abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt,\n is_training, scale)\n\n # post processing\n output = post_attention(v, attn_vec, d_model, n_head, d_head, dropout,\n is_training, kernel_initializer, residual)\n\n return output\n\n\ndef rel_multihead_attn(h,\n r,\n r_w_bias,\n r_r_bias,\n seg_mat,\n r_s_bias,\n seg_embed,\n attn_mask,\n mems,\n d_model,\n n_head,\n d_head,\n dropout,\n dropatt,\n initializer,\n name=''):\n \"\"\"Multi-head attention with relative positional encoding.\"\"\"\n\n scale = 1 / (d_head**0.5)\n if mems is not None and len(mems.shape) > 1:\n cat = fluid.layers.concat([mems, h], 0)\n else:\n cat = h\n\n # content heads\n q_head_h = head_projection(\n h, d_model, n_head, d_head, initializer, name=name + '_rel_attn_q')\n k_head_h = head_projection(\n cat, d_model, n_head, d_head, initializer, name=name + '_rel_attn_k')\n v_head_h = head_projection(\n cat, d_model, n_head, d_head, initializer, name=name + '_rel_attn_v')\n\n # positional heads\n k_head_r = head_projection(\n r, d_model, n_head, d_head, initializer, name=name + '_rel_attn_r')\n\n # core attention ops\n attn_vec = rel_attn_core(q_head_h, k_head_h, v_head_h, k_head_r, seg_embed,\n seg_mat, r_w_bias, r_r_bias, r_s_bias, attn_mask,\n dropatt, scale, name)\n\n # post processing\n output = post_attention(\n h,\n attn_vec,\n d_model,\n n_head,\n d_head,\n dropout,\n initializer,\n name=name + '_rel_attn')\n\n return output\n\n\ndef transformer_xl(inp_k,\n n_token,\n n_layer,\n d_model,\n n_head,\n d_head,\n d_inner,\n dropout,\n dropatt,\n attn_type,\n bi_data,\n initializer,\n mem_len=None,\n inp_q=None,\n mems=None,\n same_length=False,\n clamp_len=-1,\n untie_r=False,\n input_mask=None,\n perm_mask=None,\n seg_id=None,\n reuse_len=None,\n ff_activation='relu',\n target_mapping=None,\n use_fp16=False,\n name='',\n **kwargs):\n \"\"\"\n Defines a Transformer-XL computation graph with additional\n\tsupport for XLNet.\n\n Args:\n\tinp_k: int64 Tensor in shape [len, bsz], the input token IDs.\n\tseg_id: int64 Tensor in shape [len, bsz], the input segment IDs.\n\tinput_mask: float32 Tensor in shape [len, bsz], the input mask.\n\t 0 for real tokens and 1 for padding.\n\tmems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory\n\t from previous batches. The length of the list equals n_layer.\n\t If None, no memory is used.\n\tperm_mask: float32 Tensor in shape [len, len, bsz].\n\t If perm_mask[i, j, k] = 0, i attend to j in batch k;\n\t if perm_mask[i, j, k] = 1, i does not attend to j in batch k.\n\t If None, each position attends to all the others.\n\ttarget_mapping: float32 Tensor in shape [num_predict, len, bsz].\n\t If target_mapping[i, j, k] = 1, the i-th predict in batch k is\n\t on the j-th token.\n\t Only used during pretraining for partial prediction.\n\t Set to None during finetuning.\n\tinp_q: float32 Tensor in shape [len, bsz].\n\t 1 for tokens with losses and 0 for tokens without losses.\n\t Only used during pretraining for two-stream attention.\n\t Set to None during finetuning.\n\tn_layer: int, the number of layers.\n\td_model: int, the hidden size.\n\tn_head: int, the number of attention heads.\n\td_head: int, the dimension size of each attention head.\n\td_inner: int, the hidden size in feed-forward layers.\n\tff_activation: str, \"relu\" or \"gelu\".\n\tuntie_r: bool, whether to untie the biases in attention.\n\tn_token: int, the vocab size.\n\tis_training: bool, whether in training mode.\n\tuse_tpu: bool, whether TPUs are used.\n\tuse_fp16: bool, use bfloat16 instead of float32.\n\tdropout: float, dropout rate.\n\tdropatt: float, dropout rate on attention probabilities.\n\tinit: str, the initialization scheme, either \"normal\" or \"uniform\".\n\tinit_range: float, initialize the parameters with a uniform distribution\n\t in [-init_range, init_range]. Only effective when init=\"uniform\".\n\tinit_std: float, initialize the parameters with a normal distribution\n\t with mean 0 and stddev init_std. Only effective when init=\"normal\".\n\tmem_len: int, the number of tokens to cache.\n\treuse_len: int, the number of tokens in the currect batch to be cached\n\t and reused in the future.\n\tbi_data: bool, whether to use bidirectional input pipeline.\n\t Usually set to True during pretraining and False during finetuning.\n\tclamp_len: int, clamp all relative distances larger than clamp_len.\n\t -1 means no clamping.\n\tsame_length: bool, whether to use the same attention length for each token.\n\tsummary_type: str, \"last\", \"first\", \"mean\", or \"attn\". The method\n\t to pool the input to get a vector representation.\n \"\"\"\n print('memory input {}'.format(mems))\n data_type = \"float16\" if use_fp16 else \"float32\"\n print('Use float type {}'.format(data_type))\n\n qlen = inp_k.shape[0]\n mlen = mems[0].shape[0] if mems is not None else 0\n klen = mlen + qlen\n bsz = fluid.layers.slice(\n fluid.layers.shape(inp_k), axes=[0], starts=[1], ends=[2])\n\n ##### Attention mask\n # causal attention mask\n if attn_type == 'uni':\n attn_mask = fluid.layers.create_global_var(\n name='attn_mask',\n shape=[qlen, klen, 1, 1],\n value=0.0,\n dtype=data_type,\n persistable=True)\n elif attn_type == 'bi':\n attn_mask = None\n else:\n raise ValueError('Unsupported attention type: {}'.format(attn_type))\n\n # data mask: input mask & perm mask\n if input_mask is not None and perm_mask is not None:\n data_mask = fluid.layers.unsqueeze(input_mask, [0]) + perm_mask\n elif input_mask is not None and perm_mask is None:\n data_mask = fluid.layers.unsqueeze(input_mask, [0])\n elif input_mask is None and perm_mask is not None:\n data_mask = perm_mask\n else:\n data_mask = None\n\n if data_mask is not None:\n # all mems can be attended to\n mems_mask = fluid.layers.zeros(\n shape=[data_mask.shape[0], mlen, 1], dtype='float32')\n mems_mask = fluid.layers.expand(mems_mask, [1, 1, bsz])\n data_mask = fluid.layers.concat([mems_mask, data_mask], 1)\n if attn_mask is None:\n attn_mask = fluid.layers.unsqueeze(data_mask, [-1])\n else:\n attn_mask += fluid.layers.unsqueeze(data_mask, [-1])\n if attn_mask is not None:\n attn_mask = fluid.layers.cast(attn_mask > 0, dtype=data_type)\n\n if attn_mask is not None:\n non_tgt_mask = fluid.layers.diag(\n np.array([-1] * qlen).astype(data_type))\n non_tgt_mask = fluid.layers.concat(\n [fluid.layers.zeros(\n [qlen, mlen], dtype=data_type), non_tgt_mask],\n axis=-1)\n\n attn_mask = fluid.layers.expand(attn_mask, [qlen, 1, 1, 1])\n non_tgt_mask = fluid.layers.unsqueeze(non_tgt_mask, axes=[2, 3])\n non_tgt_mask = fluid.layers.expand(non_tgt_mask, [1, 1, bsz, 1])\n non_tgt_mask = fluid.layers.cast(\n (attn_mask + non_tgt_mask) > 0, dtype=data_type)\n non_tgt_mask.stop_gradient = True\n else:\n non_tgt_mask = None\n\n if untie_r:\n r_w_bias = fluid.layers.create_parameter(\n shape=[n_layer, n_head, d_head],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_r_w_bias', initializer=initializer),\n is_bias=True)\n r_w_bias = [\n fluid.layers.slice(\n r_w_bias, axes=[0], starts=[i], ends=[i + 1])\n for i in range(n_layer)\n ]\n r_w_bias = [\n fluid.layers.squeeze(\n r_w_bias[i], axes=[0]) for i in range(n_layer)\n ]\n r_r_bias = fluid.layers.create_parameter(\n shape=[n_layer, n_head, d_head],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_r_r_bias', initializer=initializer),\n is_bias=True)\n r_r_bias = [\n fluid.layers.slice(\n r_r_bias, axes=[0], starts=[i], ends=[i + 1])\n for i in range(n_layer)\n ]\n r_r_bias = [\n fluid.layers.squeeze(\n r_r_bias[i], axes=[0]) for i in range(n_layer)\n ]\n else:\n r_w_bias = fluid.layers.create_parameter(\n shape=[n_head, d_head],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_r_w_bias', initializer=initializer),\n is_bias=True)\n r_r_bias = fluid.layers.create_parameter(\n shape=[n_head, d_head],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_r_r_bias', initializer=initializer),\n is_bias=True)\n\n lookup_table = fluid.layers.create_parameter(\n shape=[n_token, d_model],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_word_embedding', initializer=initializer),\n is_bias=False)\n word_emb_k = fluid.layers.embedding(\n input=inp_k,\n size=[n_token, d_model],\n dtype=data_type,\n param_attr=fluid.ParamAttr(\n name=name + '_word_embedding', initializer=initializer))\n\n if inp_q is not None:\n pass\n\n output_h = fluid.layers.dropout(\n word_emb_k,\n dropout_prob=dropout,\n dropout_implementation=\"upscale_in_train\")\n\n if inp_q is not None:\n pass\n\n if seg_id is not None:\n if untie_r:\n r_s_bias = fluid.layers.create_parameter(\n shape=[n_layer, n_head, d_head],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_r_s_bias', initializer=initializer),\n is_bias=True)\n r_s_bias = [\n fluid.layers.slice(\n r_s_bias, axes=[0], starts=[i], ends=[i + 1])\n for i in range(n_layer)\n ]\n r_s_bias = [\n fluid.layers.squeeze(\n r_s_bias[i], axes=[0]) for i in range(n_layer)\n ]\n else:\n r_s_bias = fluid.layers.create_parameter(\n shape=[n_head, d_head],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_r_s_bias', initializer=initializer),\n is_bias=True)\n\n seg_embed = fluid.layers.create_parameter(\n shape=[n_layer, 2, n_head, d_head],\n dtype=data_type,\n attr=fluid.ParamAttr(\n name=name + '_seg_embed', initializer=initializer))\n seg_embed = [\n fluid.layers.slice(\n seg_embed, axes=[0], starts=[i], ends=[i + 1])\n for i in range(n_layer)\n ]\n seg_embed = [\n fluid.layers.squeeze(\n seg_embed[i], axes=[0]) for i in range(n_layer)\n ]\n\n # COnver `seg_id` to one-hot seg_mat\n # seg_id: [bsz, qlen, 1]\n mem_pad = fluid.layers.fill_constant_batch_size_like(\n input=seg_id, shape=[-1, mlen], value=0, dtype='int64')\n # cat_ids: [bsz, klen, 1]\n cat_ids = fluid.layers.concat(input=[mem_pad, seg_id], axis=1)\n seg_id = fluid.layers.stack([seg_id] * klen, axis=2)\n cat_ids = fluid.layers.stack([cat_ids] * qlen, axis=2)\n cat_ids = fluid.layers.transpose(cat_ids, perm=[0, 2, 1])\n\n # seg_mat: [bsz, qlen, klen]\n seg_mat = fluid.layers.cast(\n fluid.layers.logical_not(fluid.layers.equal(seg_id, cat_ids)),\n dtype='int64')\n\n seg_mat = fluid.layers.transpose(seg_mat, perm=[1, 2, 0])\n seg_mat = fluid.layers.unsqueeze(seg_mat, [-1])\n seg_mat = fluid.layers.one_hot(seg_mat, 2)\n seg_mat.stop_gradient = True\n else:\n seg_mat = None\n\n pos_emb = relative_positional_encoding(\n qlen,\n klen,\n d_model,\n clamp_len,\n attn_type,\n bi_data,\n bsz=bsz,\n dtype=data_type)\n pos_emb = fluid.layers.dropout(\n pos_emb, dropout, dropout_implementation=\"upscale_in_train\")\n pos_emb.stop_gradient = True\n ##### Attention layers\n if mems is None:\n mems = [None] * n_layer\n\n for i in range(n_layer):\n # cache new mems\n #new_mems.append(_cache_mem(output_h, mems[i], mem_len, reuse_len)) \n\n # segment bias\n if seg_id is None:\n r_s_bias_i = None\n seg_embed_i = None\n else:\n r_s_bias_i = r_s_bias if not untie_r else r_s_bias[i]\n seg_embed_i = seg_embed[i]\n\n if inp_q is not None:\n pass\n else:\n output_h = rel_multihead_attn(\n h=output_h,\n r=pos_emb,\n r_w_bias=r_w_bias if not untie_r else r_w_bias[i],\n r_r_bias=r_r_bias if not untie_r else r_r_bias[i],\n seg_mat=seg_mat,\n r_s_bias=r_s_bias_i,\n seg_embed=seg_embed_i,\n attn_mask=non_tgt_mask,\n mems=mems[i],\n d_model=d_model,\n n_head=n_head,\n d_head=d_head,\n dropout=dropout,\n dropatt=dropatt,\n initializer=initializer,\n name=name + '_layer_{}'.format(i))\n\n if inp_q is not None:\n pass\n output_h = positionwise_ffn(\n inp=output_h,\n d_model=d_model,\n d_inner=d_inner,\n dropout_prob=dropout,\n param_initializer=initializer,\n act_type=ff_activation,\n name=name + '_layer_{}_ff'.format(i))\n if inp_q is not None:\n output = fluid.layers.dropout(\n output_g, dropout, dropout_implementation=\"upscale_in_train\")\n else:\n output = fluid.layers.dropout(\n output_h, dropout, dropout_implementation=\"upscale_in_train\")\n new_mems = None\n return output, new_mems, lookup_table\n\n\ndef lm_loss(hidden,\n target,\n n_token,\n d_model,\n initializer,\n lookup_table=None,\n tie_weight=False,\n bi_data=True):\n\n if tie_weight:\n assert lookup_table is not None, \\\n 'lookup_table cannot be None for tie_weight'\n softmax_w = lookup_table\n else:\n softmax_w = fluid.layers.create_parameter(\n shape=[n_token, d_model],\n dtype=hidden.dtype,\n attr=fluid.ParamAttr(\n name='model_loss_weight', initializer=initializer),\n is_bias=False)\n\n softmax_b = fluid.layers.create_parameter(\n shape=[n_token],\n dtype=hidden.dtype,\n attr=fluid.ParamAttr(\n name='model_lm_loss_bias', initializer=initializer),\n is_bias=False)\n\n logits = fluid.layers.matmul(\n x=hidden, y=softmax_w, transpose_y=True) + softmax_b\n\n loss = fluid.layers.softmax_cross_entropy_with_logits(\n input=logits, label=target)\n\n return loss\n\n\ndef summarize_sequence(summary_type,\n hidden,\n d_model,\n n_head,\n d_head,\n dropout,\n dropatt,\n input_mask,\n initializer,\n scope=None,\n reuse=None,\n use_proj=True,\n name=''):\n \"\"\"\n Different classification tasks may not may not share the same parameters\n to summarize the sequence features.\n If shared, one can keep the `scope` to the default value `None`.\n Otherwise, one should specify a different `scope` for each task.\n \"\"\"\n if summary_type == 'last':\n summary = hidden[-1]\n elif summary_type == 'first':\n summary = hidden[0]\n elif summary_type == 'mean':\n summary = fluid.layers.reduce_mean(hidden, axis=0)\n elif summary_type == 'attn':\n bsz = fluid.layers.slice(\n fluid.layers.shape(hidden), axes=[0], starts=[1], ends=[2])\n\n summary_bias = tf.get_variable(\n 'summary_bias', [d_model],\n dtype=hidden.dtype,\n initializer=initializer)\n summary_bias = tf.tile(summary_bias[None, None], [1, bsz, 1])\n\n if input_mask is not None:\n input_mask = input_mask[None, :, :, None]\n\n summary = multihead_attn(\n summary_bias,\n hidden,\n hidden,\n input_mask,\n d_model,\n n_head,\n d_head,\n dropout,\n dropatt,\n is_training,\n initializer,\n residual=False)\n summary = summary[0]\n else:\n raise ValueError('Unsupported summary type {}'.format(summary_type))\n\n # use another projection as in BERT\n if use_proj:\n summary = fluid.layers.fc(input=summary,\n size=d_model,\n act='tanh',\n param_attr=fluid.ParamAttr(\n name=name + '_summary_weight',\n initializer=initializer),\n bias_attr=name + '_summary_bias')\n\n summary = fluid.layers.dropout(\n summary,\n dropout_prob=dropout,\n dropout_implementation=\"upscale_in_train\")\n\n return summary\n\n\ndef classification_loss(hidden,\n labels,\n n_class,\n initializer,\n name,\n reuse=None,\n return_logits=False):\n \"\"\"\n Different classification tasks should use different parameter names to ensure\n different dense layers (parameters) are used to produce the logits.\n An exception will be in transfer learning, where one hopes to transfer\n the classification weights.\n \"\"\"\n\n logits = fluid.layers.fc(input=hidden,\n size=n_class,\n param_attr=fluid.ParamAttr(\n name=name + '_logit_weight',\n initializer=initializer),\n bias_attr=name + '_logit_bias')\n\n one_hot_target = fluid.layers.one_hot(labels, depth=n_class)\n loss = -1.0 * fluid.layers.reduce_sum(\n log_softmax(logits) * one_hot_target, dim=-1)\n\n if return_logits:\n return loss, logits\n\n return loss\n\n\ndef regression_loss(hidden, labels, initializer, name, return_logits=False):\n\n logits = fluid.layers.fc(input=hidden,\n size=1,\n param_attr=fluid.ParamAttr(\n name=name + '_logits_weight',\n initializer=initializer),\n bias_attr=name + '_logits_bias')\n\n loss = fluid.layers.square(logits - labels)\n\n if return_logits:\n return loss, logits\n\n return loss\n", "import paddle.fluid as fluid\r\nimport paddle.fluid.dygraph as dygraph\r\n\r\nimport ltr.actors as actors\r\nimport ltr.data.transforms as dltransforms\r\nfrom ltr.data import processing, sampler, loader\r\nfrom ltr.dataset import ImagenetVID, Got10k\r\nfrom ltr.models.siamese.siam import siamfc_alexnet\r\nfrom ltr.trainers import LTRTrainer\r\nimport numpy as np\r\nimport cv2 as cv\r\nfrom PIL import Image, ImageEnhance\r\n\r\n\r\nclass DataAug(dltransforms.Transform):\r\n def __init__(self):\r\n pass\r\n\r\n def random_blur(self, img):\r\n k = np.random.choice([3, 5, 7])\r\n return cv.GaussianBlur(img, (k, k), sigmaX=0, sigmaY=0)\r\n\r\n def brightness(self, img):\r\n img = Image.fromarray(img.astype('uint8'))\r\n enh_bri = ImageEnhance.Brightness(img)\r\n brightness = np.random.choice(np.linspace(0.5, 1.25, 4))\r\n img_brighted = enh_bri.enhance(brightness)\r\n\r\n return np.array(img_brighted)\r\n\r\n def contrast(self, img):\r\n img = Image.fromarray(img.astype('uint8'))\r\n enh_con = ImageEnhance.Contrast(img)\r\n contrast = np.random.choice(np.linspace(0.5, 1.25, 4))\r\n image_contrasted = enh_con.enhance(contrast)\r\n\r\n return np.array(image_contrasted)\r\n\r\n def no_aug(self, img):\r\n return img\r\n\r\n def flip(self, img):\r\n return cv.flip(img, 1)\r\n\r\n def transform(self, img, *args):\r\n func = np.random.choice(\r\n [self.contrast, self.random_blur, self.brightness, self.flip])\r\n return func(img)\r\n\r\n\r\ndef run(settings):\r\n # Most common settings are assigned in the settings struct\r\n settings.description = 'SiamFC with Alexnet backbone and trained with vid'\r\n settings.print_interval = 100 # How often to print loss and other info\r\n settings.batch_size = 8 # Batch size\r\n settings.num_workers = 8 # Number of workers for image loading\r\n settings.normalize_mean = [0., 0., 0.] # Normalize mean\r\n settings.normalize_std = [1 / 255., 1 / 255., 1 / 255.] # Normalize std\r\n settings.search_area_factor = {\r\n 'train': 1.0,\r\n 'test': 2.0078740157480315\r\n } # roughly the same as SiamFC\r\n settings.output_sz = {'train': 127, 'test': 255}\r\n settings.scale_type = 'context'\r\n settings.border_type = 'meanpad'\r\n\r\n # Settings for the image sample and proposal generation\r\n settings.center_jitter_factor = {'train': 0, 'test': 0}\r\n settings.scale_jitter_factor = {'train': 0, 'test': 0.}\r\n\r\n # Train datasets\r\n vid_train = ImagenetVID()\r\n\r\n # Validation datasets\r\n got10k_val = vid_train #Got10k(split='val')\r\n\r\n # The joint augmentation transform, that is applied to the pairs jointly\r\n transform_joint = dltransforms.ToGrayscale(probability=0.25)\r\n\r\n # The augmentation transform applied to the training set (individually to each image in the pair)\r\n transform_exemplar = dltransforms.Compose([\r\n dltransforms.ToArray(), dltransforms.Normalize(\r\n mean=settings.normalize_mean, std=settings.normalize_std)\r\n ])\r\n transform_instance = dltransforms.Compose([\r\n DataAug(), dltransforms.ToArray(), dltransforms.Normalize(\r\n mean=settings.normalize_mean, std=settings.normalize_std)\r\n ])\r\n\r\n # Data processing to do on the training pairs\r\n data_processing_train = processing.SiamFCProcessing(\r\n search_area_factor=settings.search_area_factor,\r\n output_sz=settings.output_sz,\r\n center_jitter_factor=settings.center_jitter_factor,\r\n scale_jitter_factor=settings.scale_jitter_factor,\r\n scale_type=settings.scale_type,\r\n border_type=settings.border_type,\r\n mode='sequence',\r\n train_transform=transform_exemplar,\r\n test_transform=transform_instance,\r\n joint_transform=transform_joint)\r\n\r\n # Data processing to do on the validation pairs\r\n data_processing_val = processing.SiamFCProcessing(\r\n search_area_factor=settings.search_area_factor,\r\n output_sz=settings.output_sz,\r\n center_jitter_factor=settings.center_jitter_factor,\r\n scale_jitter_factor=settings.scale_jitter_factor,\r\n scale_type=settings.scale_type,\r\n border_type=settings.border_type,\r\n mode='sequence',\r\n transform=transform_exemplar,\r\n joint_transform=transform_joint)\r\n\r\n # The sampler for training\r\n dataset_train = sampler.ATOMSampler(\r\n [vid_train], [1, ],\r\n samples_per_epoch=6650 * settings.batch_size,\r\n max_gap=100,\r\n processing=data_processing_train)\r\n\r\n # The loader for training\r\n train_loader = loader.LTRLoader(\r\n 'train',\r\n dataset_train,\r\n training=True,\r\n batch_size=settings.batch_size,\r\n num_workers=settings.num_workers,\r\n stack_dim=1)\r\n\r\n # The sampler for validation\r\n dataset_val = sampler.ATOMSampler(\r\n [got10k_val], [1, ],\r\n samples_per_epoch=1000 * settings.batch_size,\r\n max_gap=100,\r\n processing=data_processing_val)\r\n\r\n # The loader for validation\r\n val_loader = loader.LTRLoader(\r\n 'val',\r\n dataset_val,\r\n training=False,\r\n batch_size=settings.batch_size,\r\n num_workers=settings.num_workers,\r\n epoch_interval=5,\r\n stack_dim=1)\r\n\r\n # creat network, set objective, creat optimizer, learning rate scheduler, trainer\r\n with dygraph.guard():\r\n # Create network\r\n net = siamfc_alexnet()\r\n\r\n # Create actor, which wraps network and objective\r\n actor = actors.SiamFCActor(\r\n net=net,\r\n objective=None,\r\n batch_size=settings.batch_size,\r\n shape=(17, 17),\r\n radius=16,\r\n stride=8)\r\n\r\n # Set to training mode\r\n actor.train()\r\n\r\n # define optimizer and learning rate\r\n lr_scheduler = fluid.layers.exponential_decay(\r\n learning_rate=0.01,\r\n decay_steps=6650,\r\n decay_rate=0.8685,\r\n staircase=True)\r\n regularizer = fluid.regularizer.L2DecayRegularizer(\r\n regularization_coeff=0.0005)\r\n optimizer = fluid.optimizer.Momentum(\r\n momentum=0.9,\r\n regularization=regularizer,\r\n parameter_list=net.parameters(),\r\n learning_rate=lr_scheduler)\r\n\r\n trainer = LTRTrainer(actor, [train_loader], optimizer, settings,\r\n lr_scheduler)\r\n trainer.train(50, load_latest=False, fail_safe=False)\r\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle.fluid as fluid\nimport pointnet_lib\n\n\ndef gather_point_np(points, index):\n result = []\n for i in range(len(index)):\n a = points[i][index[i]]\n result.append(a.tolist())\n return result\n\n\nclass TestGatherPointOp(unittest.TestCase):\n def test_check_output(self):\n x_shape = (1, 512, 3)\n x_type = 'float32'\n idx_shape = (1, 32)\n idx_type = 'int32'\n\n x = fluid.layers.data(\n name='x', shape=x_shape, dtype=x_type, append_batch_size=False)\n idx = fluid.layers.data(\n name='idx', shape=idx_shape, dtype=idx_type, append_batch_size=False)\n y = pointnet_lib.gather_point(x, idx)\n\n x_np = np.random.uniform(-10, 10, x_shape).astype(x_type)\n idx_np = np.random.randint(0, x_shape[1], idx_shape).astype(idx_type)\n out_np = gather_point_np(x_np, idx_np)\n\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n outs = exe.run(feed={'x': x_np, 'idx': idx_np}, fetch_list=[y])\n\n self.assertTrue(np.allclose(outs[0], out_np))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.arange", "numpy.argmax", "numpy.mean", "numpy.argpartition", "numpy.average", "numpy.sum" ], [ "numpy.array" ], [ "numpy.asarray", "numpy.hstack", "numpy.vstack" ], [ "scipy.stats.pearsonr", "numpy.argmax", "numpy.average", "numpy.array", "numpy.sum" ], [ "numpy.concatenate" ], [ "numpy.array" ], [ "numpy.array", "numpy.linspace", "numpy.random.choice" ], [ "numpy.random.uniform", "numpy.allclose", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Faiz99khan/ISL_hand_gesture_recognition_in_real-time
[ "dade99478e9b37440ebe7fb7842d451582132f0a" ]
[ "models/resnet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\n\n__all__ = [\n 'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet200'\n]\n\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n\n\ndef downsample_basic_block(x, planes, stride):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.Tensor(\n out.size(0), planes - out.size(1), out.size(2), out.size(3),\n out.size(4)).zero_()\n if isinstance(out.data, torch.cuda.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = Variable(torch.cat([out.data, zero_pads], dim=1))\n\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3x3(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n self.conv2 = nn.Conv3d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self,\n block,\n layers,\n sample_size,\n sample_duration,\n shortcut_type='B',\n num_classes=400):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv3d(\n 3,\n 64,\n kernel_size=7,\n stride=(1, 2, 2),\n padding=(3, 3, 3),\n bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)\n self.layer2 = self._make_layer(\n block, 128, layers[1], shortcut_type, stride=2)\n self.layer3 = self._make_layer(\n block, 256, layers[2], shortcut_type, stride=2)\n self.layer4 = self._make_layer(\n block, 512, layers[3], shortcut_type, stride=2)\n last_duration = int(math.ceil(sample_duration / 16))\n last_size = int(math.ceil(sample_size / 32))\n self.avgpool = nn.AvgPool3d(\n (last_duration, last_size, last_size), stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(\n downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef get_fine_tuning_parameters(model, ft_portion):\n if ft_portion == \"complete\":\n return model.parameters()\n\n elif ft_portion == \"last_layer\":\n ft_module_names = []\n ft_module_names.append('classifier')\n\n parameters = []\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ft_module in k:\n parameters.append({'params': v})\n break\n else:\n parameters.append({'params': v, 'lr': 0.0})\n return parameters\n\n else:\n raise ValueError(\"Unsupported ft_portion: 'complete' or 'last_layer' expected\")\n\n\ndef resnet10(**kwargs):\n \"\"\"Constructs a ResNet-10 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model\n\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef resnet152(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\n\ndef resnet200(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model\n" ]
[ [ "torch.nn.AvgPool3d", "torch.nn.Sequential", "torch.nn.init.kaiming_normal", "torch.cat", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.Linear", "torch.nn.functional.avg_pool3d", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ghmagazine/python_ml_book
[ "57e874fd4fa86abaa2e2d032d18946942cf50c42" ]
[ "03/evaluation.py" ]
[ "from sklearn import tree\nfrom sklearn.datasets import load_wine\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nwine = load_wine()\ndata = wine.data\ntarget = wine.target\nX_train, X_test, Y_train, Y_test = train_test_split(data, target, test_size=0.2, random_state=0)\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X_train, Y_train)\n\n# テストデータのラベルを予測\nY_pred = clf.predict(X_test)\n\n# 各クラスの適合率と再現率を表示\nprint (classification_report(Y_test, Y_pred, target_names=wine.target_names))\n" ]
[ [ "sklearn.tree.DecisionTreeClassifier", "sklearn.datasets.load_wine", "sklearn.metrics.classification_report", "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cdbethune/d3m-primitives
[ "5530da1b8efba7de8cec6890401c5d4091acd45a" ]
[ "scripts/plot_forecasting_comparison.py" ]
[ "from typing import List\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style=\"whitegrid\")\n\nfrom compare_forecasting_methods import pred_lengths\n\ndef to_query(\n elements: List[str],\n):\n if len(elements) == 1:\n return elements[0]\n else:\n return '(' + ' or '.join(elements) + ')'\n\ndef plot(\n metrics: str = 'ts_metrics.csv',\n datasets: str = 'Sorghum',\n horizon: str = 'Short',\n metric: str = 'MAPE',\n predictors: List[str] = [\n 'DeepAR',\n 'DeepFactor',\n 'DeepState',\n 'NBEATS',\n 'NBEATS-Interp',\n 'MQCNN',\n 'MQRNN',\n 'WaveNet', \n 'NPTS',\n ]\n):\n\n metrics = pd.read_csv('ts_metrics.csv')\n\n if datasets == 'Sorghum':\n dataset_names = [\n 'LL1_terra_canopy_height_long_form_s4_70_MIN_METADATA',\n 'LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA',\n 'LL1_terra_canopy_height_long_form_s4_90_MIN_METADATA',\n 'LL1_terra_canopy_height_long_form_s4_100_MIN_METADATA',\n 'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA',\n ]\n elif datasets == 'Malnutrition':\n dataset_names = [\n 'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA', \n 'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA'\n ]\n else:\n raise ValueError(\"'Datasets' must be one of 'Sorghum' or 'Malnutrition'\") \n\n if horizon == 'Short':\n pred_ls = [pred_lengths[dataset_name][0] for dataset_name in dataset_names]\n elif horizon == 'Long':\n pred_ls = [pred_lengths[dataset_name][1] for dataset_name in dataset_names]\n else:\n raise ValueError(\"'Horizon' must be one of 'Short' or 'Long'\")\n\n pred_list = to_query(\n [f'Pred_Length==\"{pred_l}\"' for pred_l in pred_ls]\n )\n dataset_list = to_query(\n [f'Dataset==\"{dataset_name}\"' for dataset_name in dataset_names]\n )\n predictor_list = to_query(\n [f'Predictor==\"{predictor}\"' for predictor in predictors]\n )\n query_list = pred_list + ' and ' + dataset_list + ' and ' + predictor_list\n\n df_slice = metrics.query(query_list)\n plt.clf()\n sns.barplot(x=\"Predictor\", y=metric, data=df_slice)\n plt.xticks(rotation=45)\n plt.subplots_adjust(bottom=0.3)\n plt.xlabel('Forecasting Method')\n plt.title(f'Average {metric} on {datasets} Datasets with {horizon} Horizon')\n plt.savefig(f'{datasets}_{horizon}.png')\n\nplot(\n datasets='Sorghum', \n horizon='Short', \n metric = 'MAPE', \n)\nplot(\n datasets='Sorghum', \n horizon='Long', \n metric = 'MAPE', \n)\nplot(\n datasets='Malnutrition', \n horizon='Short', \n metric = 'MAPE', \n)\nplot(\n datasets='Malnutrition', \n horizon='Long', \n metric = 'MAPE', \n)\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
YZ-Zheng/AI-Learns-Handwritten-Digits
[ "6ce2dcce7ed6e4689b3f7d0da3ddcf8ad06ce6ce" ]
[ "train_data.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\n# check if gpu is available\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass Flatten(nn.Module):\n \"\"\"\n performs the flatten operation\n \"\"\"\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\ndef batchify_data(x_data, y_data, batch_size):\n \"\"\"\n Takes a set of data points and labels and groups them into batches\n \"\"\"\n\n N = int(len(x_data) / batch_size) * batch_size\n batches = []\n for i in range(0, N, batch_size):\n batches.append({\n 'x': torch.tensor(x_data[i:i + batch_size],\n dtype=torch.float32),\n 'y': torch.tensor([y_data[0][i:i + batch_size],\n y_data[1][i:i + batch_size]],\n dtype=torch.int64)\n })\n return batches\n\n\ndef compute_accuracy(predictions, y):\n \"\"\"\n Computes the accuracy of predictions against actual label y\n \"\"\"\n return np.mean(np.equal(predictions.to('cpu').numpy(), y.to('cpu').numpy()))\n\n\ndef train_model(train_data, dev_data, model, lr=0.001, n_epochs=50):\n \"\"\"\n Train a model for N epochs given data and hyper-params\n \"\"\"\n # Optimize with Adam\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n print(\"Start training...\")\n acc_train_upper = []\n acc_train_lower = []\n acc_val_upper = []\n acc_val_lower = []\n\n for epoch in range(1, n_epochs + 1):\n print(\"Epoch {}:\\n\".format(epoch))\n\n # Run training\n loss, acc = run_epoch(train_data, model.train(), optimizer)\n print('Train | loss1: {:.6f} accuracy1: {:.6f} | loss2: {:.6f} accuracy2: {:.6f}'.format(loss[0], acc[0], loss[1], acc[1]))\n acc_train_upper.append(acc[0])\n acc_train_lower.append(acc[1])\n\n\n # Run validation\n val_loss, val_acc = run_epoch(dev_data, model.eval(), optimizer)\n print('Valid | loss1: {:.6f} accuracy1: {:.6f} | loss2: {:.6f} accuracy2: {:.6f}\\n'.format(val_loss[0], val_acc[0], val_loss[1], val_acc[1]))\n acc_val_upper.append(val_acc[0])\n acc_val_lower.append(val_acc[1])\n\n return acc_train_upper, acc_train_lower, acc_val_upper, acc_val_lower\n\n\n\n\ndef run_epoch(data, model, optimizer):\n \"\"\"\n Train model for one pass of train data, and return loss, acccuracy\n \"\"\"\n # Gather losses\n losses_first_label = []\n losses_second_label = []\n batch_accuracies_first = []\n batch_accuracies_second = []\n\n # If model is in train mode, use optimizer.\n is_training = model.training\n\n # Iterate through batches\n for batch in data:\n x, y = batch['x'].to(device), batch['y'].to(device)\n\n # Get output predictions for both the upper and lower numbers\n out1, out2 = model(x)\n\n # Predict and store accuracy\n predictions_first_label = torch.argmax(out1, dim=1)\n predictions_second_label = torch.argmax(out2, dim=1)\n batch_accuracies_first.append(compute_accuracy(predictions_first_label, y[0]))\n batch_accuracies_second.append(compute_accuracy(predictions_second_label, y[1]))\n\n # Compute both losses\n loss1 = F.cross_entropy(out1, y[0])\n loss2 = F.cross_entropy(out2, y[1])\n losses_first_label.append(loss1.data.item())\n losses_second_label.append(loss2.data.item())\n\n # If training, do an update.\n if is_training:\n optimizer.zero_grad()\n joint_loss = 0.5 * (loss1 + loss2)\n joint_loss.backward()\n optimizer.step()\n\n # Calculate epoch level scores\n avg_loss = np.mean(losses_first_label), np.mean(losses_second_label)\n avg_accuracy = np.mean(batch_accuracies_first), np.mean(batch_accuracies_second)\n return avg_loss, avg_accuracy\n" ]
[ [ "torch.nn.functional.cross_entropy", "torch.tensor", "numpy.mean", "torch.cuda.is_available", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ai2cm/fv3net
[ "e62038aee0a97d6207e66baabd8938467838cf51", "e62038aee0a97d6207e66baabd8938467838cf51", "e62038aee0a97d6207e66baabd8938467838cf51", "e62038aee0a97d6207e66baabd8938467838cf51", "e62038aee0a97d6207e66baabd8938467838cf51", "e62038aee0a97d6207e66baabd8938467838cf51" ]
[ "workflows/diagnostics/fv3net/diagnostics/prognostic_run/computed_diagnostics.py", "external/loaders/loaders/batches/_sequences.py", "external/fv3fit/fv3fit/emulation/data/dict_dataset.py", "workflows/diagnostics/fv3net/diagnostics/_shared/transform.py", "external/vcm/vcm/calc/calc.py", "external/vcm/vcm/calc/histogram.py" ]
[ "\"\"\"Utilities for loading computed diagnostics\n\n\"\"\"\nimport json\nfrom typing import Iterable, Hashable, Sequence, Tuple, Any, Set, Mapping\nimport os\nimport xarray as xr\nimport numpy as np\nimport fsspec\nimport pandas as pd\nfrom pathlib import Path\nfrom dataclasses import dataclass\nimport tempfile\n\nfrom .metrics import metrics_registry\nfrom .derived_diagnostics import derived_registry\nfrom .constants import MovieUrls\n\n\n__all__ = [\"ComputedDiagnosticsList\", \"RunDiagnostics\"]\n\n\nGRID_VARS = [\"area\", \"lonb\", \"latb\", \"lon\", \"lat\", \"land_sea_mask\"]\n\nDiagnostics = Sequence[xr.Dataset]\nMetadata = Any\n\n\n@dataclass\nclass ComputedDiagnosticsList:\n folders: Mapping[str, \"DiagnosticFolder\"]\n\n @staticmethod\n def from_directory(url: str) -> \"ComputedDiagnosticsList\":\n \"\"\"Open a directory of computed diagnostics\n\n Args:\n url: URL to directory containing rundirs as subdirectories.\n \"rundirs\". rundirs are subdirectories of this bucket. They each\n contain diags.nc, metrics.json, and .mp4 files.\n \"\"\"\n fs, _, _ = fsspec.get_fs_token_paths(url)\n return ComputedDiagnosticsList(detect_folders(url, fs))\n\n @staticmethod\n def from_urls(urls: Sequence[str]) -> \"ComputedDiagnosticsList\":\n \"\"\"Open computed diagnostics at the specified urls\n \"\"\"\n\n def url_to_folder(url):\n fs, _, path = fsspec.get_fs_token_paths(url)\n return DiagnosticFolder(fs, path[0])\n\n return ComputedDiagnosticsList(\n {str(k): url_to_folder(url) for k, url in enumerate(urls)}\n )\n\n @staticmethod\n def from_json(\n url: str, urls_are_rundirs: bool = False\n ) -> \"ComputedDiagnosticsList\":\n \"\"\"Open labeled computed diagnostics at urls specified in given JSON.\"\"\"\n\n def url_to_folder(url):\n fs, _, path = fsspec.get_fs_token_paths(url)\n return DiagnosticFolder(fs, path[0])\n\n with fsspec.open(url) as f:\n rundirs = json.load(f)\n\n if urls_are_rundirs:\n for item in rundirs:\n item[\"url\"] += \"_diagnostics\"\n\n return ComputedDiagnosticsList(\n {item[\"name\"]: url_to_folder(item[\"url\"]) for item in rundirs}\n )\n\n def load_metrics(self) -> \"RunMetrics\":\n return RunMetrics(load_metrics(self.folders))\n\n def load_diagnostics(self) -> Tuple[Metadata, \"RunDiagnostics\"]:\n metadata, xarray_diags = load_diagnostics(self.folders)\n return metadata, RunDiagnostics(xarray_diags)\n\n def load_metrics_from_diagnostics(self) -> \"RunMetrics\":\n \"\"\"Compute metrics on the fly from the pre-computed diagnostics.\"\"\"\n return RunMetrics(load_metrics_from_diagnostics(self.folders))\n\n def find_movie_urls(self) -> MovieUrls:\n return {name: folder.movie_urls for name, folder in self.folders.items()}\n\n\n@dataclass\nclass RunDiagnostics:\n \"\"\"A collection of diagnostics from different runs, not all of which have\n the same variables\n\n \"\"\"\n\n diagnostics: Diagnostics\n\n def __post_init__(self):\n # indexes for faster lookup\n self._attrs = {ds.run: ds.attrs for ds in self.diagnostics}\n self._varnames = {ds.run: set(ds) for ds in self.diagnostics}\n self._run_index = {ds.run: k for k, ds in enumerate(self.diagnostics)}\n\n @property\n def runs(self) -> Sequence[str]:\n \"\"\"The available runs\"\"\"\n return list(self._run_index)\n\n @property\n def variables(self) -> Set[str]:\n \"\"\"The available variables\"\"\"\n return set.union(*[set(d) for d in self.diagnostics])\n\n @property\n def long_names(self) -> Mapping[str, str]:\n \"\"\"Mapping from variable name to long names\"\"\"\n vars = self.variables\n run = self.runs[0]\n return {v: self.get_variable(run, v).attrs.get(\"long_name\", v) for v in vars}\n\n def _get_run(self, run: str) -> xr.Dataset:\n return self.diagnostics[self._run_index[run]]\n\n def get_variable(self, run: str, varname: Hashable) -> xr.DataArray:\n \"\"\"Query a collection of diagnostics for a given run and variable\n\n Args:\n diagnostics: list of xarray datasets, each with a \"run\" attribute\n varname: variable to exctract from the expected run\n\n Returns:\n varname of run if present, otherwise nans with the expected\n metadata\n\n \"\"\"\n if varname in self._varnames[run]:\n return self._get_run(run)[varname]\n else:\n for run in self._varnames:\n if varname in self._varnames[run]:\n template = self._get_run(run)[varname]\n return xr.full_like(template, np.nan)\n raise ValueError(f\"{varname} not found.\")\n\n def get_variables(self, run: str, varnames: Sequence[Hashable]) -> xr.Dataset:\n \"\"\"Query a collection of diagnostics and return dataset of variables.\"\"\"\n variables = [self.get_variable(run, v) for v in varnames]\n return xr.merge(variables)\n\n def matching_variables(self, varfilter: str) -> Set[str]:\n \"\"\"The available variabes that include varfilter in their names.\"\"\"\n return set(v for v in self.variables if varfilter in v)\n\n def is_baseline(self, run: str) -> bool:\n return self._attrs[run][\"baseline\"]\n\n @staticmethod\n def is_verification(run: str) -> bool:\n return run == \"verification\"\n\n\n@dataclass\nclass RunMetrics:\n \"\"\"A collection of metrics from different runs, not all of which have the\n same metrics\"\"\"\n\n metrics: pd.DataFrame\n\n @property\n def empty(self) -> bool:\n return self.metrics.empty\n\n @property\n def runs(self) -> Sequence[str]:\n \"\"\"The available runs\"\"\"\n return list(self.metrics.run.unique())\n\n @property\n def types(self) -> Set[str]:\n \"\"\"The available types of metrics\"\"\"\n metric_names = [self._prefix(m) for m in self.metrics.metric]\n return set(metric_names)\n\n def get_metric_variables(self, metric_type: str) -> Set[str]:\n \"\"\"The available variables for given metric_type\"\"\"\n metric_names = [\n m for m in self.metrics.metric if self._prefix(m) == metric_type\n ]\n return set([self._suffix(m) for m in metric_names])\n\n def get_metric_value(self, metric_type: str, variable: str, run: str) -> float:\n m = self._get_metric(metric_type, variable, run)\n if m.empty:\n return np.nan\n else:\n return m.value.item()\n\n def get_metric_units(self, metric_type: str, variable: str, run: str) -> str:\n m = self._get_metric(metric_type, variable, run)\n if m.empty:\n return \"\"\n else:\n return m.units.item()\n\n def get_metric_all_runs(self, metric_type: str, variable: str) -> pd.Series:\n metric_name = self.metric_name(metric_type, variable)\n return self.metrics[self.metrics.metric == metric_name]\n\n @staticmethod\n def _prefix(metric: str) -> str:\n return metric.split(\"/\")[0]\n\n @staticmethod\n def _suffix(metric: str) -> str:\n return metric.split(\"/\")[1]\n\n @staticmethod\n def metric_name(metric_type: str, variable: str) -> str:\n return f\"{metric_type}/{variable}\"\n\n def _get_metric(self, metric_type: str, variable: str, run: str) -> pd.Series:\n _metrics = self.get_metric_all_runs(metric_type, variable)\n return _metrics[_metrics.run == run]\n\n\ndef load_metrics(rundirs) -> pd.DataFrame:\n \"\"\"Load the metrics from a bucket\"\"\"\n metrics = _load_metrics(rundirs)\n return _metrics_dataframe_from_dict(metrics)\n\n\ndef load_metrics_from_diagnostics(rundirs) -> pd.DataFrame:\n \"\"\"Load the diagnostics from a bucket and compute metrics\"\"\"\n metrics = {}\n _, diagnostics = load_diagnostics(rundirs)\n for ds in diagnostics:\n metrics[ds.run] = metrics_registry.compute(ds, n_jobs=1)\n return _metrics_dataframe_from_dict(metrics)\n\n\ndef _metrics_dataframe_from_dict(metrics) -> pd.DataFrame:\n metric_table = pd.DataFrame.from_records(_yield_metric_rows(metrics))\n run_table = parse_rundirs(list(metrics.keys()))\n return pd.merge(run_table, metric_table, on=\"run\")\n\n\ndef load_diagnostics(rundirs) -> Tuple[Metadata, Diagnostics]:\n \"\"\"Load metadata and merged diagnostics from a bucket\"\"\"\n diags = _load_diags(rundirs)\n run_table_lookup = parse_rundirs(rundirs)\n diagnostics = [\n ds.assign_attrs(run=key, **run_table_lookup.loc[key])\n for key, ds in diags.items()\n ]\n diagnostics = [convert_index_to_datetime(ds, \"time\") for ds in diagnostics]\n longest_run_ds = _longest_run(diagnostics)\n diagnostics.append(_get_verification_diagnostics(longest_run_ds))\n diagnostics = [_add_derived_diagnostics(ds) for ds in diagnostics]\n return get_metadata(diags), diagnostics\n\n\ndef _add_derived_diagnostics(ds):\n merged = xr.merge([ds, derived_registry.compute(ds, n_jobs=1)])\n return merged.assign_attrs(ds.attrs)\n\n\ndef _longest_run(diagnostics: Iterable[xr.Dataset]) -> xr.Dataset:\n max_length = 0\n for ds in diagnostics:\n if ds.sizes[\"time\"] > max_length:\n longest_ds = ds\n max_length = ds.sizes[\"time\"]\n return longest_ds\n\n\n@dataclass\nclass DiagnosticFolder:\n \"\"\"Represents the output of compute diagnostics\"\"\"\n\n fs: fsspec.AbstractFileSystem\n path: str\n\n @property\n def metrics(self):\n path = os.path.join(self.path, \"metrics.json\")\n return json.loads(self.fs.cat(path))\n\n @property\n def diagnostics(self) -> xr.Dataset:\n path = os.path.join(self.path, \"diags.nc\")\n with tempfile.NamedTemporaryFile() as f:\n self.fs.get(path, f.name)\n return xr.open_dataset(f.name, engine=\"h5netcdf\").compute()\n\n @property\n def movie_urls(self) -> Sequence[str]:\n movie_paths = self.fs.glob(os.path.join(self.path, \"*.mp4\"))\n if \"gs\" in self.fs.protocol:\n movie_paths = [\"gs://\" + path for path in movie_paths]\n return movie_paths\n\n\ndef detect_folders(\n bucket: str, fs: fsspec.AbstractFileSystem,\n) -> Mapping[str, DiagnosticFolder]:\n diag_ncs = fs.glob(os.path.join(bucket, \"*\", \"diags.nc\"))\n return {\n Path(url).parent.name: DiagnosticFolder(fs, Path(url).parent.as_posix())\n for url in diag_ncs\n }\n\n\ndef _load_diags(rundirs: Mapping[str, DiagnosticFolder]):\n metrics = {}\n for rundir, diag_folder in rundirs.items():\n metrics[rundir] = diag_folder.diagnostics\n return metrics\n\n\ndef _yield_metric_rows(metrics):\n \"\"\"yield rows to be combined into a dataframe\n \"\"\"\n for run in metrics:\n for name in metrics[run]:\n yield {\n \"run\": run,\n \"metric\": name,\n \"value\": metrics[run][name][\"value\"],\n \"units\": metrics[run][name][\"units\"],\n }\n\n\ndef _load_metrics(rundirs):\n metrics = {}\n for rundir, diag_folder in rundirs.items():\n metrics[rundir] = diag_folder.metrics\n return metrics\n\n\ndef parse_rundirs(rundirs) -> pd.DataFrame:\n run_table = pd.DataFrame.from_records(_parse_metadata(run) for run in rundirs)\n return run_table.set_index(\"run\")\n\n\ndef _parse_metadata(run: str):\n\n if \"baseline\" in run:\n baseline = True\n else:\n baseline = False\n\n return {\"run\": run, \"baseline\": baseline}\n\n\ndef _get_verification_diagnostics(ds: xr.Dataset) -> xr.Dataset:\n \"\"\"Back out verification diagnostics from prognostic run values and biases\"\"\"\n verif_diagnostics = {}\n verif_attrs = {\"run\": \"verification\", \"baseline\": True}\n mean_bias_pairs = {\n \"spatial_mean\": \"mean_bias\",\n \"diurn_component\": \"diurn_bias\",\n \"zonal_and_time_mean\": \"zonal_bias\",\n \"zonal_mean_value\": \"zonal_mean_bias\",\n \"time_mean_value\": \"time_mean_bias\",\n \"histogram\": \"hist_bias\",\n \"hist_2d\": \"hist2d_bias\",\n \"pressure_level_zonal_time_mean\": \"pressure_level_zonal_bias\",\n }\n for mean_filter, bias_filter in mean_bias_pairs.items():\n mean_vars = [var for var in ds if mean_filter in var]\n for var in mean_vars:\n matching_bias_var = var.replace(mean_filter, bias_filter)\n if matching_bias_var in ds:\n # verification = prognostic - bias\n verif_diagnostics[var] = ds[var] - ds[matching_bias_var]\n verif_diagnostics[var].attrs = ds[var].attrs\n # special handling for histogram bin widths\n bin_width_vars = [var for var in ds if \"bin_width_histogram\" in var]\n bin_width_vars += [var for var in ds if \"bin_width_hist_2d\" in var]\n for var in bin_width_vars:\n verif_diagnostics[var] = ds[var]\n verif_dataset = xr.Dataset(verif_diagnostics)\n return xr.merge([ds[GRID_VARS], verif_dataset]).assign_attrs(verif_attrs)\n\n\ndef get_metadata(diags):\n run_urls = {key: ds.attrs[\"url\"] for key, ds in diags.items()}\n verification_datasets = [ds.attrs[\"verification\"] for ds in diags.values()]\n if any([verification_datasets[0] != item for item in verification_datasets]):\n raise ValueError(\n \"Report cannot be generated with diagnostics computed against \"\n \"different verification datasets.\"\n )\n verification_label = {\"verification dataset\": verification_datasets[0]}\n return {**verification_label, **run_urls}\n\n\ndef convert_index_to_datetime(ds, dim):\n return ds.assign_coords({dim: ds.indexes[dim].to_datetimeindex()})\n", "import os\nimport glob\nimport joblib\nimport collections.abc\nfrom copy import deepcopy\nfrom functools import partial\nimport numpy as np\nfrom typing import (\n Callable,\n Sequence,\n MutableMapping,\n TypeVar,\n Hashable,\n Any,\n Union,\n)\n\nT = TypeVar(\"T\")\n\n\nclass BaseSequence(Sequence[T]):\n def local(self, path: str, n_jobs: int = 4) -> \"Local\":\n \"\"\"Download a sequence of xarray objects to a local path\n\n Args:\n path: local directory, will be created if not existing\n n_jobs: parallelism\n \"\"\"\n return to_local(self, path=path, n_jobs=n_jobs)\n\n def _save_item(self, path: str, i: int):\n item = self[i]\n path = os.path.join(path, \"%05d.pkl\" % i)\n Local.dump(item, path)\n\n def take(self, n: int) -> \"Take\":\n \"\"\"Return a sequence consisting of the first n elements\n \"\"\"\n return Take(self, n)\n\n def map(self, func) -> \"Map\":\n \"\"\"Map a function over the elements of this sequence\n \"\"\"\n return Map(func, self)\n\n\nclass Take(BaseSequence[T]):\n def __init__(self, parent_seq, n):\n self._seq = parent_seq\n self.n = n\n\n def __getitem__(self, i):\n if i < len(self):\n return self._seq[i]\n else:\n raise IndexError()\n\n def __len__(self):\n return self.n\n\n\nclass Local(BaseSequence[T]):\n def __init__(self, path: str):\n self.path = path\n\n @property\n def files(self):\n return sorted(glob.glob(os.path.join(self.path, \"*.pkl\")))\n\n @classmethod\n def dump(cls, dataset, path):\n try:\n loaded_data = dataset.load()\n except AttributeError:\n pass\n else:\n loaded_data = dataset\n\n joblib.dump(loaded_data, path)\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, i):\n slice_value = self.files[i]\n if isinstance(slice_value, str):\n return joblib.load(slice_value)\n else:\n return [joblib.load(file) for file in slice_value]\n\n\ndef to_local(sequence: Sequence[T], path: str, n_jobs: int = 4) -> Local[T]:\n \"\"\"\n Download a sequence of pickleable objects to a local path.\n\n Args:\n sequence: pickleable objects to dump locally\n path: local directory, will be created if not existing\n n_jobs: how many threads to use when dumping objects to file\n\n Returns:\n local_sequence\n \"\"\"\n os.makedirs(path, exist_ok=True)\n\n def save_item(path: str, i: int):\n item = sequence[i]\n path = os.path.join(path, \"%05d.pkl\" % i)\n Local.dump(item, path)\n\n joblib.Parallel(n_jobs=n_jobs)(\n joblib.delayed(save_item)(path, i) for i in range(len(sequence))\n )\n return Local(os.path.abspath(path))\n\n\nclass Map(BaseSequence[T]):\n \"\"\"A wrapper over a sequence of function arguments passed into a function.\n\n Attributes:\n attrs: a dictionary of metadata.\n \"\"\"\n\n attrs: MutableMapping[Hashable, Any]\n\n def __init__(self, func: Callable[..., T], args_sequence: Sequence[Any]):\n \"\"\"\n Args:\n func: the function to call, which takes in one argument\n args_sequence: a sequence of arguments\n Returns:\n result_sequence: a sequence of function results\n \"\"\"\n if not isinstance(args_sequence, collections.abc.Sequence):\n raise TypeError(f\"args_sequence must be a sequence, got {args_sequence}\")\n self._func = func\n self._args = args_sequence\n self.attrs = {}\n\n def __getitem__(self, item: Union[int, slice]):\n\n if isinstance(item, int):\n return self._func(self._args[item])\n elif isinstance(item, slice):\n return self._slice_selection(item)\n else:\n TypeError(f\"Invalid argument type of {type(item)} passed into __getitem__.\")\n\n def _slice_selection(self, selection: slice):\n seq = Map(self._func, self._args[selection])\n seq.attrs.update(deepcopy(self.attrs))\n return seq\n\n def __len__(self) -> int:\n return len(self._args)\n\n\ndef shuffle(sequence: Sequence[T]) -> Map[T]:\n \"\"\"Lazily shuffle a sequence. Uses numpy.random for randomness.\n\n Args:\n sequence: Input sequence to have access indices shuffled\n Returns:\n A new shuffled sequence\n \"\"\"\n seq_len = len(sequence)\n shuffled = np.random.choice(seq_len, size=seq_len, replace=False).tolist()\n func = partial(_simple_getitem, sequence)\n return Map(func, shuffled)\n\n\ndef _simple_getitem(sequence: Sequence[Any], item: Union[int, slice]):\n return sequence[item]\n", "from typing import Sequence\nimport tensorflow as tf\nimport vcm\nfrom fv3fit.emulation.data.io import get_nc_files\n\n__all__ = [\"netcdf_url_to_dataset\"]\n\n\ndef read_variables_as_dict(fs, url, variables):\n sig = (tf.float32,) * len(variables)\n # tf.py_function can only wrap functions which output tuples of tensors, not\n # dicts\n outputs = tf.py_function(\n lambda url: read_variables_greedily_as_tuple(fs, url, variables), [url], sig\n )\n return dict(zip(variables, outputs))\n\n\ndef read_variables_greedily_as_tuple(fs, url, variables):\n url = url.numpy().decode()\n print(f\"opening {url}\")\n ds = vcm.DerivedMapping(vcm.open_remote_nc(fs, url))\n return tuple([tf.convert_to_tensor(ds[v], dtype=tf.float32) for v in variables])\n\n\ndef netcdf_url_to_dataset(\n url: str, variables: Sequence[str], shuffle: bool = False\n) -> tf.data.Dataset:\n \"\"\"Open a url of netcdfs as a tf.data.Dataset of dicts\n\n Args:\n url: points to a directory of netcdf files.\n variables: a sequence of variable names to load from each netcdf file\n shuffle: if True, shuffle order the netcdf files will be loaded in. Does\n not shuffle BETWEEN files.\n\n Returns:\n a tensorflow dataset containing dictionaries of tensors. This\n dictionary contains all the variables specified in ``variables``.\n \"\"\"\n fs = vcm.get_fs(url)\n files = get_nc_files(url, fs)\n d = tf.data.Dataset.from_tensor_slices(sorted(files))\n if shuffle:\n d = d.shuffle(100_000)\n return d.map(lambda url: read_variables_as_dict(fs, url, variables))\n\n\ndef load_samples(train_dataset, n_train):\n train_data = train_dataset.take(n_train).shuffle(n_train).batch(n_train)\n return next(iter(train_data))\n", "\"\"\"\nTransforms operate on diagnostic function inputs to adjust data before\ndiagnostic values are calculated.\n\nA transform should take in the transform-specific arguments with a diagnostic\nfunction argument tuple as the final argument and return the adjusted\ndiagnostic function arguments.\n\"\"\"\n\nimport logging\nfrom typing import Sequence, Tuple, Callable, Optional\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nfrom datetime import datetime, timedelta\nimport cftime\nfrom vcm import interpolate_to_pressure_levels, minus_column_integrated_moistening\n\nfrom .constants import HORIZONTAL_DIMS, VERTICAL_DIM, DiagArg\n\n_TRANSFORM_FNS = {}\n\nlogger = logging.getLogger(__name__)\n\nSURFACE_TYPE_CODES = {\"sea\": (0, 2), \"land\": (1,), \"seaice\": (2,)}\n\n\ndef add_to_input_transform_fns(func):\n\n _TRANSFORM_FNS[func.__name__] = func\n\n return func\n\n\ndef apply(\n transform_func: Callable[[DiagArg], DiagArg],\n *transform_args_partial,\n **transform_kwargs,\n):\n \"\"\"\n Wrapper to apply transform to input diagnostic arguments (tuple of three datasets).\n Transform arguments are specified per diagnostic function to enable a query-style\n operation on input data.\n\n apply -> wraps diagnostic function in save_prognostic_run_diags and\n returns a new function with an input transform prepended to the diagnostic call.\n\n I.e., call to diagnostic_function becomes::\n\n input_transform(*diag_args):\n adjusted_args = transform(*diagargs)\n diagnostic_function(*adjusted_args)\n\n Args:\n transform_key: name of transform function to call\n transform_args_partial: All transform function specific arguments preceding the\n final diagnostic argument tuple, e.g., [freq_label] for resample_time\n transform_kwargs: Any transform function keyword arguments\n\n Note: I tried memoizing the current transforms but am unsure\n if it will work on highly mutable datasets.\n \"\"\"\n\n def _apply_to_diag_func(diag_func):\n def transform(diag_args):\n\n logger.debug(\n f\"Adding transform, {transform_func.__name__}, \"\n f\"to diagnostic function: {diag_func.__name__}\"\n f\"\\n\\targs: {transform_args_partial}\"\n f\"\\n\\tkwargs: {transform_kwargs}\"\n )\n\n # append diagnostic function input to be transformed\n transform_args = (*transform_args_partial, diag_args)\n\n transformed_diag_args = transform_func(*transform_args, **transform_kwargs)\n\n return diag_func(transformed_diag_args)\n\n return transform\n\n return _apply_to_diag_func\n\n\n@add_to_input_transform_fns\ndef resample_time(\n freq_label: str,\n arg: DiagArg,\n time_slice=slice(None, -1),\n inner_join: bool = False,\n method: str = \"nearest\",\n) -> DiagArg:\n \"\"\"\n Subset times in prognostic and verification data\n\n Args:\n arg: input arguments to transform prior to the diagnostic calculation\n freq_label: Time resampling frequency label (should be valid input for xarray's\n resampling function)\n time_slice: Index slice to reduce times after frequency resampling. Omits final\n time by default to work with crashed simulations.\n inner_join: Subset times to the intersection of prognostic and verification\n data. Defaults to False.\n method: how to do resampling. Can be \"nearest\" or \"mean\".\n \"\"\"\n prognostic, verification, grid = arg.prediction, arg.verification, arg.grid\n prognostic = _downsample_only(prognostic, freq_label, method)\n verification = _downsample_only(verification, freq_label, method)\n\n prognostic = prognostic.isel(time=time_slice)\n if inner_join:\n prognostic, verification = _inner_join_time(prognostic, verification)\n return DiagArg(prognostic, verification, grid)\n\n\ndef _downsample_only(ds: xr.Dataset, freq_label: str, method: str) -> xr.Dataset:\n \"\"\"Resample in time, only if given freq_label is lower frequency than time\n sampling of given dataset ds\"\"\"\n ds_freq = ds.time.values[1] - ds.time.values[0]\n if ds_freq < pd.to_timedelta(freq_label):\n resampled = ds.resample(time=freq_label, label=\"right\")\n if method == \"nearest\":\n return resampled.nearest()\n elif method == \"mean\":\n with xr.set_options(keep_attrs=True):\n return resampled.mean()\n else:\n raise ValueError(f\"Don't know how to resample with method={method}.\")\n else:\n return ds\n\n\n@add_to_input_transform_fns\ndef skip_if_3d_output_absent(arg: DiagArg) -> DiagArg:\n prognostic, verification, grid = arg.prediction, arg.verification, arg.grid\n dummy_ds = xr.Dataset().assign_coords(\n {\n \"time\": [\n cftime.DatetimeJulian(2020, 1, 1, 12),\n cftime.DatetimeJulian(2020, 1, 1, 15, 30),\n ]\n }\n )\n prog = prognostic if len(prognostic) > 0 else dummy_ds\n verif = verification if len(verification) > 0 else dummy_ds\n\n return DiagArg(prog, verif, grid)\n\n\n@add_to_input_transform_fns\ndef daily_mean(split: timedelta, arg: DiagArg) -> DiagArg:\n \"\"\"Resample time to daily mean for all times after split.\n\n Args:\n split: time since start of prognostic run after which resampling occurs\n arg: input arguments to transform prior to the diagnostic calculation\n \"\"\"\n prognostic, verification, grid = arg.prediction, arg.verification, arg.grid\n split_time = prognostic.time.values[0] + split\n prognostic = _resample_end(prognostic, split_time, \"1D\")\n verification = _resample_end(verification, split_time, \"1D\")\n return DiagArg(prognostic, verification, grid)\n\n\ndef _resample_end(ds: xr.Dataset, split: datetime, freq_label: str) -> xr.Dataset:\n start_segment = ds.sel(time=slice(None, split))\n end_segment = ds.sel(time=slice(split, None))\n if end_segment.sizes[\"time\"] != 0:\n with xr.set_options(keep_attrs=True):\n end_segment = end_segment.resample(time=freq_label, label=\"right\").mean()\n return xr.concat([start_segment, end_segment], dim=\"time\")\n\n\ndef _inner_join_time(\n prognostic: xr.Dataset, verification: xr.Dataset\n) -> Tuple[xr.Dataset, xr.Dataset]:\n \"\"\" Subset times within the prognostic data to be within the verification data,\n as necessary and vice versa, and return the subset datasets\n \"\"\"\n\n inner_join_time = xr.merge(\n [\n prognostic.time.rename(\"prognostic_time\"),\n verification.time.rename(\"verification_time\"),\n ],\n join=\"inner\",\n )\n\n return (\n prognostic.sel(time=inner_join_time.prognostic_time),\n verification.sel(time=inner_join_time.verification_time),\n )\n\n\ndef _mask_vars_with_horiz_dims(ds, surface_type, latitude, land_sea_mask):\n \"\"\"\n Subset data to variables with specified dimensions before masking\n to prevent odd behavior from variables with non-compliant dims\n (e.g., interfaces)\n \"\"\"\n\n spatial_ds_varnames = [\n var_name\n for var_name in ds.data_vars\n if set(HORIZONTAL_DIMS).issubset(set(ds[var_name].dims))\n ]\n masked = xr.Dataset()\n for var in spatial_ds_varnames:\n masked[var] = _mask_array(surface_type, ds[var], latitude, land_sea_mask)\n\n non_spatial_varnames = list(set(ds.data_vars) - set(spatial_ds_varnames))\n\n return masked.update(ds[non_spatial_varnames])\n\n\n@add_to_input_transform_fns\ndef mask_to_sfc_type(surface_type: str, arg: DiagArg) -> DiagArg:\n \"\"\"\n Mask prognostic run and verification data to the specified surface type.\n This function does not mask the grid area- if you are taking an\n area-weighted mean, use mask_area instead!\n\n Args:\n arg: input arguments to transform prior to the diagnostic calculation\n surface_type: Type of grid locations to leave unmasked\n \"\"\"\n prognostic, verification, grid = arg.prediction, arg.verification, arg.grid\n\n masked_prognostic = _mask_vars_with_horiz_dims(\n prognostic, surface_type, grid.lat, grid.land_sea_mask\n )\n\n masked_verification = _mask_vars_with_horiz_dims(\n verification, surface_type, grid.lat, grid.land_sea_mask\n )\n\n return DiagArg(masked_prognostic, masked_verification, grid)\n\n\n@add_to_input_transform_fns\ndef mask_area(region: str, arg: DiagArg) -> DiagArg:\n \"\"\"\n Set area variable to zero everywhere outside of specified region.\n\n Args:\n region: name of region to leave unmasked. Valid options are \"global\",\n \"land\", \"sea\", \"seaice\", \"tropics\", \"tropics20\",\n \"positive_net_precipitation\" and \"negative_net_precipitation\"\n arg: input arguments to transform prior to the diagnostic calculation\n \"\"\"\n prognostic, verification, grid, delp = (\n arg.prediction,\n arg.verification,\n arg.grid,\n arg.delp,\n )\n\n net_precipitation = (\n _get_net_precipitation(verification, grid.area, delp)\n if \"precipitation\" in region\n else None\n )\n\n masked_area = _mask_array(\n region, grid.area, grid.lat, grid.land_sea_mask, net_precipitation\n )\n\n grid_copy = grid.copy()\n return DiagArg(prognostic, verification, grid_copy.update({\"area\": masked_area}))\n\n\ndef _get_net_precipitation(\n verification: xr.Dataset, area: xr.DataArray, delp: Optional[xr.DataArray]\n) -> xr.DataArray:\n if delp is not None and \"Q2\" in verification.data_vars:\n return minus_column_integrated_moistening(verification[\"Q2\"], delp)\n else:\n return xr.full_like(area, fill_value=np.nan, dtype=float)\n\n\ndef _mask_array(\n region: str,\n arr: xr.DataArray,\n latitude: xr.DataArray,\n land_sea_mask: xr.DataArray,\n net_precipitation: Optional[xr.DataArray] = None,\n) -> xr.DataArray:\n \"\"\"Mask given DataArray to a specific region.\"\"\"\n if net_precipitation is None:\n net_precipitation = xr.full_like(arr, fill_value=np.nan)\n if region == \"tropics\":\n masked_arr = arr.where(abs(latitude) <= 10.0)\n elif region == \"tropics20\":\n masked_arr = arr.where(abs(latitude) <= 20.0)\n elif region == \"global\":\n masked_arr = arr.copy()\n elif region == \"positive_net_precipitation\":\n masked_arr = arr.where(net_precipitation > 0.0)\n elif region == \"negative_net_precipitation\":\n masked_arr = arr.where(net_precipitation <= 0.0)\n elif region in SURFACE_TYPE_CODES:\n masks = [land_sea_mask == code for code in SURFACE_TYPE_CODES[region]]\n mask_union = masks[0]\n for mask in masks[1:]:\n mask_union = np.logical_or(mask_union, mask)\n masked_arr = arr.where(mask_union)\n else:\n raise ValueError(f\"Masking procedure for region '{region}' is not defined.\")\n return masked_arr\n\n\n@add_to_input_transform_fns\ndef subset_variables(variables: Sequence, arg: DiagArg) -> DiagArg:\n \"\"\"Subset the variables, without failing if a variable doesn't exist\"\"\"\n prognostic, verification, grid = (\n arg.prediction,\n arg.verification,\n arg.grid,\n )\n prognostic_vars = [var for var in variables if var in prognostic]\n verification_vars = [var for var in variables if var in verification]\n return DiagArg(\n prognostic[prognostic_vars], verification[verification_vars], grid, arg.delp,\n )\n\n\ndef _is_3d(da: xr.DataArray):\n return VERTICAL_DIM in da.dims\n\n\n@add_to_input_transform_fns\ndef select_3d_variables(arg: DiagArg) -> DiagArg:\n prediction, target, grid, delp = (\n arg.prediction,\n arg.verification,\n arg.grid,\n arg.delp,\n )\n prediction_vars = [var for var in prediction if _is_3d(prediction[var])]\n return DiagArg(prediction[prediction_vars], target[prediction_vars], grid, delp)\n\n\n@add_to_input_transform_fns\ndef select_2d_variables(arg: DiagArg) -> DiagArg:\n prediction, target, grid, delp = (\n arg.prediction,\n arg.verification,\n arg.grid,\n arg.delp,\n )\n prediction_vars = [var for var in prediction if not _is_3d(prediction[var])]\n return DiagArg(prediction[prediction_vars], target[prediction_vars], grid, delp)\n\n\n@add_to_input_transform_fns\ndef regrid_zdim_to_pressure_levels(arg: DiagArg, vertical_dim=VERTICAL_DIM) -> DiagArg:\n # Regrids to the default pressure grid used in vcm.interpolate_to_pressure_levels,\n # which match those in the ERA-Interim reanalysis dataset\n prediction, target, grid, delp = (\n arg.prediction,\n arg.verification,\n arg.grid,\n arg.delp,\n )\n prediction_regridded, target_regridded = xr.Dataset(), xr.Dataset()\n vertical_prediction_fields = [var for var in prediction if _is_3d(prediction[var])]\n for var in vertical_prediction_fields:\n prediction_regridded[var] = interpolate_to_pressure_levels(\n delp=delp, field=prediction[var], dim=vertical_dim,\n )\n target_regridded[var] = interpolate_to_pressure_levels(\n delp=delp, field=target[var], dim=vertical_dim,\n )\n return DiagArg(prediction_regridded, target_regridded, grid, delp)\n", "import numpy as np\nimport xarray as xr\nfrom vcm.cubedsphere.constants import INIT_TIME_DIM, VAR_LON_CENTER\nfrom typing import Sequence, Union\n\n\ngravity = 9.81\nspecific_heat = 1004\n\nHOUR_PER_DEG_LONGITUDE = 1.0 / 15\n\n\ndef timedelta_to_seconds(dt):\n one_second = np.timedelta64(1000000000, \"ns\")\n return dt / one_second\n\n\ndef local_time(ds, time=INIT_TIME_DIM, lon_var=VAR_LON_CENTER):\n fractional_hr = (\n ds[time].dt.hour + (ds[time].dt.minute / 60.0) + (ds[time].dt.second / 3600.0)\n )\n local_time = (fractional_hr + ds[lon_var] * HOUR_PER_DEG_LONGITUDE) % 24\n return local_time\n\n\ndef _weighted_average(array, weights, axis=None):\n\n return np.nansum(array * weights, axis=axis) / np.nansum(weights, axis=axis)\n\n\ndef weighted_average(\n array: Union[xr.Dataset, xr.DataArray],\n weights: xr.DataArray,\n dims: Sequence[str] = [\"tile\", \"y\", \"x\"],\n) -> xr.Dataset:\n \"\"\"Compute a weighted average of an array or dataset\n\n Args:\n array: xr dataarray or dataset of variables to averaged\n weights: xr datarray of grid cell weights for averaging\n dims: dimensions to average over\n\n Returns:\n xr dataarray or dataset of weighted averaged variables\n \"\"\"\n if dims is not None:\n kwargs = {\"axis\": tuple(range(-len(dims), 0))}\n else:\n kwargs = {}\n with xr.set_options(keep_attrs=True):\n return xr.apply_ufunc(\n _weighted_average,\n array,\n weights,\n input_core_dims=[dims, dims],\n kwargs=kwargs,\n dask=\"allowed\",\n )\n\n\ndef vertical_tapering_scale_factors(n_levels: int, cutoff: int, rate: float):\n z_arr = np.arange(n_levels)\n scaled = np.exp((z_arr[slice(None, cutoff)] - cutoff) / rate)\n unscaled = np.ones(n_levels - cutoff)\n return np.hstack([scaled, unscaled])\n", "from typing import Any, Hashable, Mapping, Tuple\n\nimport numpy as np\nimport xarray as xr\n\n\ndef histogram(da: xr.DataArray, **kwargs) -> Tuple[xr.DataArray, xr.DataArray]:\n \"\"\"Compute histogram and return tuple of counts and bin widths.\n\n Args:\n da: input data\n kwargs: optional parameters to pass on to np.histogram\n\n Return:\n counts, bin_widths tuple of xr.DataArrays. The coordinate of both arrays is\n equal to the left side of the histogram bins.\n \"\"\"\n coord_name = f\"{da.name}_bins\" if da.name is not None else \"bins\"\n count, bins = np.histogram(da, **kwargs)\n coords: Mapping[Hashable, Any] = {coord_name: bins[:-1]}\n width = bins[1:] - bins[:-1]\n width_da = xr.DataArray(width, coords=coords, dims=[coord_name])\n count_da = xr.DataArray(count, coords=coords, dims=[coord_name])\n if \"units\" in da.attrs:\n count_da[coord_name].attrs[\"units\"] = da.units\n width_da[coord_name].attrs[\"units\"] = da.units\n width_da.attrs[\"units\"] = da.units\n if \"long_name\" in da.attrs:\n count_da[coord_name].attrs[\"long_name\"] = da.long_name\n return count_da, width_da\n\n\ndef histogram2d(\n x: xr.DataArray, y: xr.DataArray, **kwargs\n) -> Tuple[xr.DataArray, xr.DataArray, xr.DataArray]:\n \"\"\"Compute 2D histogram and return tuple of counts and bin widths.\n\n Args:\n x: input data\n y: input data\n kwargs: optional parameters to pass on to np.histogram\n\n Return:\n counts, x_bin_widths, y_bin_widths tuple of xr.DataArrays. The coordinate of all\n arrays is equal to the left side of the histogram bins.\n \"\"\"\n xcoord_name = f\"{x.name}_bins\" if x.name is not None else \"xbins\"\n ycoord_name = f\"{y.name}_bins\" if y.name is not None else \"ybins\"\n count, xedges, yedges = np.histogram2d(\n x.values.ravel(), y.transpose(*x.dims).values.ravel(), **kwargs\n )\n xcoord: Mapping[Hashable, Any] = {xcoord_name: xedges[:-1]}\n ycoord: Mapping[Hashable, Any] = {ycoord_name: yedges[:-1]}\n xwidth = xedges[1:] - xedges[:-1]\n ywidth = yedges[1:] - yedges[:-1]\n xwidth_da = xr.DataArray(xwidth, coords=xcoord, dims=[xcoord_name])\n ywidth_da = xr.DataArray(ywidth, coords=ycoord, dims=[ycoord_name])\n count_da = xr.DataArray(\n count, coords={**xcoord, **ycoord}, dims=[xcoord_name, ycoord_name]\n )\n if \"units\" in x.attrs:\n xwidth_da[xcoord_name].attrs[\"units\"] = x.units\n xwidth_da.attrs[\"units\"] = x.units\n if \"units\" in y.attrs:\n ywidth_da[ycoord_name].attrs[\"units\"] = y.units\n ywidth_da.attrs[\"units\"] = y.units\n\n return count_da, xwidth_da, ywidth_da\n" ]
[ [ "pandas.merge" ], [ "numpy.random.choice" ], [ "tensorflow.convert_to_tensor" ], [ "pandas.to_timedelta", "numpy.logical_or" ], [ "numpy.hstack", "numpy.arange", "numpy.ones", "numpy.timedelta64", "numpy.nansum" ], [ "numpy.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
karen-pal/scikit-learn
[ "2a67d88258264eb2b6dfad221be8f8d61684dcba" ]
[ "sklearn/cross_decomposition/tests/test_pls.py" ]
[ "import pytest\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose\n\nfrom sklearn.datasets import load_linnerud\nfrom sklearn.cross_decomposition._pls import (\n _center_scale_xy,\n _get_first_singular_vectors_power_method,\n _get_first_singular_vectors_svd,\n _svd_flip_1d,\n)\nfrom sklearn.cross_decomposition import CCA\nfrom sklearn.cross_decomposition import PLSSVD, PLSRegression, PLSCanonical\nfrom sklearn.datasets import make_regression\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.extmath import svd_flip\nfrom sklearn.exceptions import ConvergenceWarning\n\n\ndef assert_matrix_orthogonal(M):\n K = np.dot(M.T, M)\n assert_array_almost_equal(K, np.diag(np.diag(K)))\n\n\ndef test_pls_canonical_basics():\n # Basic checks for PLSCanonical\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n pls = PLSCanonical(n_components=X.shape[1])\n pls.fit(X, Y)\n\n assert_matrix_orthogonal(pls.x_weights_)\n assert_matrix_orthogonal(pls.y_weights_)\n assert_matrix_orthogonal(pls._x_scores)\n assert_matrix_orthogonal(pls._y_scores)\n\n # Check X = TP' and Y = UQ'\n T = pls._x_scores\n P = pls.x_loadings_\n U = pls._y_scores\n Q = pls.y_loadings_\n # Need to scale first\n Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(\n X.copy(), Y.copy(), scale=True\n )\n assert_array_almost_equal(Xc, np.dot(T, P.T))\n assert_array_almost_equal(Yc, np.dot(U, Q.T))\n\n # Check that rotations on training data lead to scores\n Xt = pls.transform(X)\n assert_array_almost_equal(Xt, pls._x_scores)\n Xt, Yt = pls.transform(X, Y)\n assert_array_almost_equal(Xt, pls._x_scores)\n assert_array_almost_equal(Yt, pls._y_scores)\n\n # Check that inverse_transform works\n X_back = pls.inverse_transform(Xt)\n assert_array_almost_equal(X_back, X)\n\n\ndef test_sanity_check_pls_regression():\n # Sanity check for PLSRegression\n # The results were checked against the R-packages plspm, misOmics and pls\n\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n pls = PLSRegression(n_components=X.shape[1])\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [-0.61330704, -0.00443647, 0.78983213],\n [-0.74697144, -0.32172099, -0.58183269],\n [-0.25668686, 0.94682413, -0.19399983],\n ]\n )\n\n expected_x_loadings = np.array(\n [\n [-0.61470416, -0.24574278, 0.78983213],\n [-0.65625755, -0.14396183, -0.58183269],\n [-0.51733059, 1.00609417, -0.19399983],\n ]\n )\n\n expected_y_weights = np.array(\n [\n [+0.32456184, 0.29892183, 0.20316322],\n [+0.42439636, 0.61970543, 0.19320542],\n [-0.13143144, -0.26348971, -0.17092916],\n ]\n )\n\n expected_y_loadings = np.array(\n [\n [+0.32456184, 0.29892183, 0.20316322],\n [+0.42439636, 0.61970543, 0.19320542],\n [-0.13143144, -0.26348971, -0.17092916],\n ]\n )\n\n assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))\n assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))\n assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))\n\n # The R / Python difference in the signs should be consistent across\n # loadings, weights, etc.\n x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)\n x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)\n y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)\n y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)\n assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)\n assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)\n\n\ndef test_sanity_check_pls_regression_constant_column_Y():\n # Check behavior when the first column of Y is constant\n # The results are checked against a modified version of plsreg2\n # from the R-package plsdepot\n d = load_linnerud()\n X = d.data\n Y = d.target\n Y[:, 0] = 1\n pls = PLSRegression(n_components=X.shape[1])\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [-0.6273573, 0.007081799, 0.7786994],\n [-0.7493417, -0.277612681, -0.6011807],\n [-0.2119194, 0.960666981, -0.1794690],\n ]\n )\n\n expected_x_loadings = np.array(\n [\n [-0.6273512, -0.22464538, 0.7786994],\n [-0.6643156, -0.09871193, -0.6011807],\n [-0.5125877, 1.01407380, -0.1794690],\n ]\n )\n\n expected_y_loadings = np.array(\n [\n [0.0000000, 0.0000000, 0.0000000],\n [0.4357300, 0.5828479, 0.2174802],\n [-0.1353739, -0.2486423, -0.1810386],\n ]\n )\n\n assert_array_almost_equal(np.abs(expected_x_weights), np.abs(pls.x_weights_))\n assert_array_almost_equal(np.abs(expected_x_loadings), np.abs(pls.x_loadings_))\n # For the PLSRegression with default parameters, y_loadings == y_weights\n assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_loadings))\n\n x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)\n x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)\n # we ignore the first full-zeros row for y\n y_loadings_sign_flip = np.sign(expected_y_loadings[1:] / pls.y_loadings_[1:])\n\n assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)\n assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip)\n\n\ndef test_sanity_check_pls_canonical():\n # Sanity check for PLSCanonical\n # The results were checked against the R-package plspm\n\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n pls = PLSCanonical(n_components=X.shape[1])\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [-0.61330704, 0.25616119, -0.74715187],\n [-0.74697144, 0.11930791, 0.65406368],\n [-0.25668686, -0.95924297, -0.11817271],\n ]\n )\n\n expected_x_rotations = np.array(\n [\n [-0.61330704, 0.41591889, -0.62297525],\n [-0.74697144, 0.31388326, 0.77368233],\n [-0.25668686, -0.89237972, -0.24121788],\n ]\n )\n\n expected_y_weights = np.array(\n [\n [+0.58989127, 0.7890047, 0.1717553],\n [+0.77134053, -0.61351791, 0.16920272],\n [-0.23887670, -0.03267062, 0.97050016],\n ]\n )\n\n expected_y_rotations = np.array(\n [\n [+0.58989127, 0.7168115, 0.30665872],\n [+0.77134053, -0.70791757, 0.19786539],\n [-0.23887670, -0.00343595, 0.94162826],\n ]\n )\n\n assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))\n assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))\n assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))\n\n x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)\n x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)\n y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)\n y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)\n assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)\n assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)\n\n assert_matrix_orthogonal(pls.x_weights_)\n assert_matrix_orthogonal(pls.y_weights_)\n\n assert_matrix_orthogonal(pls._x_scores)\n assert_matrix_orthogonal(pls._y_scores)\n\n\ndef test_sanity_check_pls_canonical_random():\n # Sanity check for PLSCanonical on random data\n # The results were checked against the R-package plspm\n n = 500\n p_noise = 10\n q_noise = 5\n # 2 latents vars:\n rng = check_random_state(11)\n l1 = rng.normal(size=n)\n l2 = rng.normal(size=n)\n latents = np.array([l1, l1, l2, l2]).T\n X = latents + rng.normal(size=4 * n).reshape((n, 4))\n Y = latents + rng.normal(size=4 * n).reshape((n, 4))\n X = np.concatenate((X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)\n Y = np.concatenate((Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)\n\n pls = PLSCanonical(n_components=3)\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [0.65803719, 0.19197924, 0.21769083],\n [0.7009113, 0.13303969, -0.15376699],\n [0.13528197, -0.68636408, 0.13856546],\n [0.16854574, -0.66788088, -0.12485304],\n [-0.03232333, -0.04189855, 0.40690153],\n [0.1148816, -0.09643158, 0.1613305],\n [0.04792138, -0.02384992, 0.17175319],\n [-0.06781, -0.01666137, -0.18556747],\n [-0.00266945, -0.00160224, 0.11893098],\n [-0.00849528, -0.07706095, 0.1570547],\n [-0.00949471, -0.02964127, 0.34657036],\n [-0.03572177, 0.0945091, 0.3414855],\n [0.05584937, -0.02028961, -0.57682568],\n [0.05744254, -0.01482333, -0.17431274],\n ]\n )\n\n expected_x_loadings = np.array(\n [\n [0.65649254, 0.1847647, 0.15270699],\n [0.67554234, 0.15237508, -0.09182247],\n [0.19219925, -0.67750975, 0.08673128],\n [0.2133631, -0.67034809, -0.08835483],\n [-0.03178912, -0.06668336, 0.43395268],\n [0.15684588, -0.13350241, 0.20578984],\n [0.03337736, -0.03807306, 0.09871553],\n [-0.06199844, 0.01559854, -0.1881785],\n [0.00406146, -0.00587025, 0.16413253],\n [-0.00374239, -0.05848466, 0.19140336],\n [0.00139214, -0.01033161, 0.32239136],\n [-0.05292828, 0.0953533, 0.31916881],\n [0.04031924, -0.01961045, -0.65174036],\n [0.06172484, -0.06597366, -0.1244497],\n ]\n )\n\n expected_y_weights = np.array(\n [\n [0.66101097, 0.18672553, 0.22826092],\n [0.69347861, 0.18463471, -0.23995597],\n [0.14462724, -0.66504085, 0.17082434],\n [0.22247955, -0.6932605, -0.09832993],\n [0.07035859, 0.00714283, 0.67810124],\n [0.07765351, -0.0105204, -0.44108074],\n [-0.00917056, 0.04322147, 0.10062478],\n [-0.01909512, 0.06182718, 0.28830475],\n [0.01756709, 0.04797666, 0.32225745],\n ]\n )\n\n expected_y_loadings = np.array(\n [\n [0.68568625, 0.1674376, 0.0969508],\n [0.68782064, 0.20375837, -0.1164448],\n [0.11712173, -0.68046903, 0.12001505],\n [0.17860457, -0.6798319, -0.05089681],\n [0.06265739, -0.0277703, 0.74729584],\n [0.0914178, 0.00403751, -0.5135078],\n [-0.02196918, -0.01377169, 0.09564505],\n [-0.03288952, 0.09039729, 0.31858973],\n [0.04287624, 0.05254676, 0.27836841],\n ]\n )\n\n assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))\n assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))\n assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))\n\n x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)\n x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)\n y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)\n y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)\n assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)\n assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)\n\n assert_matrix_orthogonal(pls.x_weights_)\n assert_matrix_orthogonal(pls.y_weights_)\n\n assert_matrix_orthogonal(pls._x_scores)\n assert_matrix_orthogonal(pls._y_scores)\n\n\ndef test_convergence_fail():\n # Make sure ConvergenceWarning is raised if max_iter is too small\n d = load_linnerud()\n X = d.data\n Y = d.target\n pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2)\n with pytest.warns(ConvergenceWarning):\n pls_nipals.fit(X, Y)\n\n\[email protected](\"ignore:.*scores_ was deprecated\") # 1.1\[email protected](\"Est\", (PLSSVD, PLSRegression, PLSCanonical))\ndef test_attibutes_shapes(Est):\n # Make sure attributes are of the correct shape depending on n_components\n d = load_linnerud()\n X = d.data\n Y = d.target\n n_components = 2\n pls = Est(n_components=n_components)\n pls.fit(X, Y)\n assert all(\n attr.shape[1] == n_components\n for attr in (pls.x_scores_, pls.y_scores_, pls.x_weights_, pls.y_weights_)\n )\n\n\[email protected](\"Est\", (PLSRegression, PLSCanonical, CCA))\ndef test_univariate_equivalence(Est):\n # Ensure 2D Y with 1 column is equivalent to 1D Y\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n est = Est(n_components=1)\n one_d_coeff = est.fit(X, Y[:, 0]).coef_\n two_d_coeff = est.fit(X, Y[:, :1]).coef_\n\n assert one_d_coeff.shape == two_d_coeff.shape\n assert_array_almost_equal(one_d_coeff, two_d_coeff)\n\n\[email protected](\"Est\", (PLSRegression, PLSCanonical, CCA, PLSSVD))\ndef test_copy(Est):\n # check that the \"copy\" keyword works\n d = load_linnerud()\n X = d.data\n Y = d.target\n X_orig = X.copy()\n\n # copy=True won't modify inplace\n pls = Est(copy=True).fit(X, Y)\n assert_array_equal(X, X_orig)\n\n # copy=False will modify inplace\n with pytest.raises(AssertionError):\n Est(copy=False).fit(X, Y)\n assert_array_almost_equal(X, X_orig)\n\n if Est is PLSSVD:\n return # PLSSVD does not support copy param in predict or transform\n\n X_orig = X.copy()\n with pytest.raises(AssertionError):\n pls.transform(X, Y, copy=False),\n assert_array_almost_equal(X, X_orig)\n\n X_orig = X.copy()\n with pytest.raises(AssertionError):\n pls.predict(X, copy=False),\n assert_array_almost_equal(X, X_orig)\n\n # Make sure copy=True gives same transform and predictions as predict=False\n assert_array_almost_equal(\n pls.transform(X, Y, copy=True), pls.transform(X.copy(), Y.copy(), copy=False)\n )\n assert_array_almost_equal(\n pls.predict(X, copy=True), pls.predict(X.copy(), copy=False)\n )\n\n\ndef _generate_test_scale_and_stability_datasets():\n \"\"\"Generate dataset for test_scale_and_stability\"\"\"\n # dataset for non-regression 7818\n rng = np.random.RandomState(0)\n n_samples = 1000\n n_targets = 5\n n_features = 10\n Q = rng.randn(n_targets, n_features)\n Y = rng.randn(n_samples, n_targets)\n X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1\n X *= 1000\n yield X, Y\n\n # Data set where one of the features is constaint\n X, Y = load_linnerud(return_X_y=True)\n # causes X[:, -1].std() to be zero\n X[:, -1] = 1.0\n yield X, Y\n\n X = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [3.0, 5.0, 4.0]])\n Y = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]])\n yield X, Y\n\n # Seeds that provide a non-regression test for #18746, where CCA fails\n seeds = [530, 741]\n for seed in seeds:\n rng = np.random.RandomState(seed)\n X = rng.randn(4, 3)\n Y = rng.randn(4, 2)\n yield X, Y\n\n\[email protected](\"Est\", (CCA, PLSCanonical, PLSRegression, PLSSVD))\[email protected](\"X, Y\", _generate_test_scale_and_stability_datasets())\ndef test_scale_and_stability(Est, X, Y):\n \"\"\"scale=True is equivalent to scale=False on centered/scaled data\n This allows to check numerical stability over platforms as well\"\"\"\n\n X_s, Y_s, *_ = _center_scale_xy(X, Y)\n\n X_score, Y_score = Est(scale=True).fit_transform(X, Y)\n X_s_score, Y_s_score = Est(scale=False).fit_transform(X_s, Y_s)\n\n assert_allclose(X_s_score, X_score, atol=1e-4)\n assert_allclose(Y_s_score, Y_score, atol=1e-4)\n\n\[email protected](\"Est\", (PLSSVD, PLSCanonical, CCA))\[email protected](\"n_components\", (0, 4))\ndef test_n_components_bounds(Est, n_components):\n # n_components should be in [1, min(n_samples, n_features, n_targets)]\n # TODO: catch error instead of warning in 1.1\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = Est(n_components=n_components)\n with pytest.warns(FutureWarning, match=\"n_components=3 will be used instead\"):\n est.fit(X, Y)\n # make sure upper bound of rank is used as a fallback\n assert est.transform(X).shape[1] == 3\n\n\[email protected](\"n_components\", (0, 6))\ndef test_n_components_bounds_pls_regression(n_components):\n # For PLSRegression, the upper bound for n_components is n_features\n # TODO: catch error instead of warning in 1.1\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = PLSRegression(n_components=n_components)\n with pytest.warns(FutureWarning, match=\"n_components=5 will be used instead\"):\n est.fit(X, Y)\n # make sure upper bound of rank is used as a fallback\n assert est.transform(X).shape[1] == 5\n\n\[email protected](\"Est\", (PLSSVD, CCA, PLSCanonical))\ndef test_scores_deprecations(Est):\n # Make sure x_scores_ and y_scores_ are deprecated.\n # It's not deprecated for PLSRegression because y_score_ is different from\n # transform(Y_train)\n # TODO: remove attributes and test in 1.1\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = Est().fit(X, Y)\n with pytest.warns(FutureWarning, match=\"x_scores_ was deprecated\"):\n assert_allclose(est.x_scores_, est.transform(X))\n with pytest.warns(FutureWarning, match=\"y_scores_ was deprecated\"):\n assert_allclose(est.y_scores_, est.transform(X, Y)[1])\n\n\[email protected](\"Est\", (PLSRegression, PLSCanonical, CCA))\ndef test_norm_y_weights_deprecation(Est):\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = Est().fit(X, Y)\n with pytest.warns(FutureWarning, match=\"norm_y_weights was deprecated\"):\n est.norm_y_weights\n\n\n# TODO: Remove test in 1.1\[email protected](\"Estimator\", (PLSRegression, PLSCanonical, CCA, PLSSVD))\[email protected](\"attribute\", (\"x_mean_\", \"y_mean_\", \"x_std_\", \"y_std_\"))\ndef test_mean_and_std_deprecation(Estimator, attribute):\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n estimator = Estimator().fit(X, Y)\n with pytest.warns(FutureWarning, match=f\"{attribute} was deprecated\"):\n getattr(estimator, attribute)\n\n\[email protected](\"n_samples, n_features\", [(100, 10), (100, 200)])\[email protected](\"seed\", range(10))\ndef test_singular_value_helpers(n_samples, n_features, seed):\n # Make sure SVD and power method give approximately the same results\n X, Y = make_regression(n_samples, n_features, n_targets=5, random_state=seed)\n u1, v1, _ = _get_first_singular_vectors_power_method(X, Y, norm_y_weights=True)\n u2, v2 = _get_first_singular_vectors_svd(X, Y)\n\n _svd_flip_1d(u1, v1)\n _svd_flip_1d(u2, v2)\n\n rtol = 1e-1\n assert_allclose(u1, u2, rtol=rtol)\n assert_allclose(v1, v2, rtol=rtol)\n\n\ndef test_one_component_equivalence():\n # PLSSVD, PLSRegression and PLSCanonical should all be equivalent when\n # n_components is 1\n X, Y = make_regression(100, 10, n_targets=5, random_state=0)\n svd = PLSSVD(n_components=1).fit(X, Y).transform(X)\n reg = PLSRegression(n_components=1).fit(X, Y).transform(X)\n canonical = PLSCanonical(n_components=1).fit(X, Y).transform(X)\n\n assert_allclose(svd, reg, rtol=1e-2)\n assert_allclose(svd, canonical, rtol=1e-2)\n\n\ndef test_svd_flip_1d():\n # Make sure svd_flip_1d is equivalent to svd_flip\n u = np.array([1, -4, 2])\n v = np.array([1, 2, 3])\n\n u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1))\n _svd_flip_1d(u, v) # inplace\n\n assert_allclose(u, u_expected.ravel())\n assert_allclose(u, [-1, 4, -2])\n\n assert_allclose(v, v_expected.ravel())\n assert_allclose(v, [-1, -2, -3])\n\n\ndef test_loadings_converges():\n \"\"\"Test that CCA converges. Non-regression test for #19549.\"\"\"\n X, y = make_regression(n_samples=200, n_features=20, n_targets=20, random_state=20)\n\n cca = CCA(n_components=10, max_iter=500)\n\n with pytest.warns(None) as record:\n cca.fit(X, y)\n # ConvergenceWarning should not be raised\n if len(record) > 0:\n pytest.fail(f\"Unexpected warning: {str(record[0].message)}\")\n\n # Loadings converges to reasonable values\n assert np.all(np.abs(cca.x_loadings_) < 1)\n\n\ndef test_pls_constant_y():\n \"\"\"Checks warning when y is constant. Non-regression test for #19831\"\"\"\n rng = np.random.RandomState(42)\n x = rng.rand(100, 3)\n y = np.zeros(100)\n\n pls = PLSRegression()\n\n msg = \"Y residual is constant at iteration\"\n with pytest.warns(UserWarning, match=msg):\n pls.fit(x, y)\n\n assert_allclose(pls.x_rotations_, 0)\n" ]
[ [ "numpy.diag", "numpy.dot", "sklearn.cross_decomposition.CCA", "sklearn.cross_decomposition._pls._center_scale_xy", "sklearn.cross_decomposition.PLSSVD", "sklearn.cross_decomposition.PLSCanonical", "sklearn.cross_decomposition._pls._get_first_singular_vectors_svd", "numpy.zeros", "numpy.testing.assert_array_almost_equal", "numpy.testing.assert_allclose", "sklearn.cross_decomposition.PLSRegression", "numpy.array", "numpy.random.RandomState", "numpy.abs", "sklearn.datasets.load_linnerud", "sklearn.cross_decomposition._pls._svd_flip_1d", "numpy.sign", "numpy.testing.assert_array_equal", "sklearn.datasets.make_regression", "sklearn.cross_decomposition._pls._get_first_singular_vectors_power_method", "sklearn.utils.check_random_state" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rohithdesikan/evprediction
[ "3ea5a2b3db350397385c9c9835483eb7dfb2773b" ]
[ "app/app.py" ]
[ "# %%\nimport os\nimport numpy as np\nimport pandas as pd \nimport flask\nfrom flask import Flask, jsonify, request, make_response\nimport tensorflow as tf\n\nfrom evprediction import convert_to_array\n\n# %%\n# Load saved model\n# model_path = os.path.abspath(os.path.join(os.getcwd(), 'models'))\nmodel_name = 'evmodel.h5'\nmodel = tf.keras.models.load_model(model_name)\n\n# %%\napp = Flask(__name__)\n\[email protected]('/') \ndef hello(): \n return \"Welcome to EV Prediction\"\n\n\n# Works for any number of test points\[email protected]('/predict', methods = ['POST'])\ndef make_prediction():\n\n # Make the request in json format\n json = request.get_json()\n\n # It comes in as a list of list where the 2nd element is the meter data and convert to np array\n data = json[1]\n arr = np.array(data)\n\n # If there is only 1 point to be tested, reshape it as necessary (1, 2880)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, (-1, arr.shape[0]))\n\n # The House_ID could or could not be included in the data, so make sure to get rid of the 1st point\n if arr.shape[1] == 2881:\n arr = np.array(arr[:, 1:])\n\n \n # Reshape array to the required shape for predictions\n arr_reshaped = np.reshape(arr, (arr.shape[0], 60, -1))\n\n # Use the saved model to make a prediction\n out = model.predict(arr_reshaped)\n\n # Reshape the output into a single dimension, convert to list and then to int (for boolean prediction)\n out_reshaped = np.reshape(out, (out.shape[0], ))\n out_pred = np.round(out_reshaped).tolist()\n out_int = [int(p) for p in out_pred]\n\n # Return predictions as a dictionary, works as both single and multi input prediction\n return make_response({'Predictions': out_int})\n\nif __name__ == \"__main__\": \n app.run(host ='0.0.0.0', port = 5000, debug = True)\n" ]
[ [ "numpy.reshape", "tensorflow.keras.models.load_model", "numpy.array", "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
prateekiiest/interpret
[ "b5530a587251a77516ab443037fc37f71708564c" ]
[ "python/interpret-core/interpret/glassbox/ebm/test/test_internal.py" ]
[ "# Copyright (c) 2019 Microsoft Corporation\n# Distributed under the MIT software license\n\nfrom ..internal import Native, NativeEBMBooster\n\nimport numpy as np\nimport ctypes as ct\nfrom contextlib import closing\n\ndef test_booster_internals():\n with closing(\n NativeEBMBooster(\n model_type=\"classification\",\n n_classes=2,\n features_categorical=np.array([0], dtype=ct.c_int64, order=\"C\"), \n features_bin_count=np.array([2], dtype=ct.c_int64, order=\"C\"),\n feature_groups=[[0]],\n X_train=np.array([[0]], dtype=ct.c_int64, order=\"C\"),\n y_train=np.array([0], dtype=ct.c_int64, order=\"C\"),\n scores_train=None,\n X_val=np.array([[0]], dtype=ct.c_int64, order=\"C\"),\n y_val=np.array([0], dtype=ct.c_int64, order=\"C\"),\n scores_val=None,\n n_inner_bags=0,\n random_state=42,\n optional_temp_params=None,\n )\n ) as native_ebm_booster:\n gain = native_ebm_booster.generate_model_update(\n feature_group_index=0,\n generate_update_options=Native.GenerateUpdateOptions_Default,\n learning_rate=0.01,\n min_samples_leaf=2,\n max_leaves=np.array([2], dtype=ct.c_int64, order=\"C\"),\n )\n assert gain == 0\n\n cuts = native_ebm_booster.get_model_update_cuts()\n assert len(cuts) == 1\n assert len(cuts[0]) == 0\n\n model_update = native_ebm_booster.get_model_update_expanded()\n assert len(model_update.shape) == 1\n assert model_update.shape[0] == 2\n assert model_update[0] < 0\n\n native_ebm_booster.set_model_update_expanded(0, model_update)\n\n metric = native_ebm_booster.apply_model_update()\n assert 0 < metric\n\n model = native_ebm_booster.get_best_model()\n assert len(model) == 1\n assert len(model[0].shape) == 1\n assert model[0].shape[0] == 2\n assert model[0][0] < 0\n\n\ndef test_one_class():\n with closing(\n NativeEBMBooster(\n model_type=\"classification\",\n n_classes=1,\n features_categorical=np.array([0], dtype=ct.c_int64, order=\"C\"), \n features_bin_count=np.array([2], dtype=ct.c_int64, order=\"C\"),\n feature_groups=[[0]],\n X_train=np.array([[0, 1, 0]], dtype=ct.c_int64, order=\"C\"),\n y_train=np.array([0, 0, 0], dtype=ct.c_int64, order=\"C\"),\n scores_train=None,\n X_val=np.array([[1, 0, 1]], dtype=ct.c_int64, order=\"C\"),\n y_val=np.array([0, 0, 0], dtype=ct.c_int64, order=\"C\"),\n scores_val=None,\n n_inner_bags=0,\n random_state=42,\n optional_temp_params=None,\n )\n ) as native_ebm_booster:\n gain = native_ebm_booster.generate_model_update(\n feature_group_index=0,\n generate_update_options=Native.GenerateUpdateOptions_Default,\n learning_rate=0.01,\n min_samples_leaf=2,\n max_leaves=np.array([2], dtype=ct.c_int64, order=\"C\"),\n )\n assert gain == 0\n\n cuts = native_ebm_booster.get_model_update_cuts()\n assert len(cuts) == 1\n assert len(cuts[0]) == 0\n\n model_update = native_ebm_booster.get_model_update_expanded()\n assert model_update is None\n\n native_ebm_booster.set_model_update_expanded(0, model_update)\n\n metric = native_ebm_booster.apply_model_update()\n assert metric == 0\n\n model = native_ebm_booster.get_best_model()\n assert len(model) == 1\n assert model[0] is None\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mohammadpz/Associative_LSTM
[ "5094829ed8432be738c79c6a87396e0edf63b008" ]
[ "holographic_memory.py" ]
[ "import numpy as np\nimport theano\nimport theano.tensor as T\n# import matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nB = 10\nF = 110 * 110 * 3\nC = 20\n\n# shape: C x F/2\npermutations = []\nindices = np.arange(F / 2)\nfor i in range(C):\n np.random.shuffle(indices)\n permutations.append(np.concatenate(\n [indices,\n [ind + F / 2 for ind in indices]]))\n# C x F (numpy)\nPERMUTATIONS = np.vstack(permutations)\n\n\n# input: B x F\n# output: C x B x F\ndef permute(input):\n inputs_permuted = []\n for i in range(PERMUTATIONS.shape[0]):\n inputs_permuted.append(\n input[:, PERMUTATIONS[i]].dimshuffle('x', 0, 1))\n return T.concatenate(inputs_permuted, axis=0)\n\n\n# r: C x B x F\n# u: if mem: C x 1 x F\n# u: if value: 1 x B x F\ndef complex_mult(r, u, inverse_r=False, moduli_1=False):\n _, _, F = u.shape\n r_rl = r[:, :, :F / 2]\n r_im = r[:, :, F / 2:]\n if inverse_r:\n if moduli_1:\n r_im = -r_im\n else:\n tmp = r_rl / (r_rl ** 2 + r_im ** 2)\n r_im = -r_im / (r_rl ** 2 + r_im ** 2)\n r_rl = tmp\n u_rl = u[:, :, :F / 2]\n u_im = u[:, :, F / 2:]\n res_rl = r_rl * u_rl - r_im * u_im\n res_im = r_rl * u_im + r_im * u_rl\n res = T.concatenate([res_rl, res_im], axis=2)\n # C x B x F\n return res\n\n\n# key: C x B x F\n# mem: C x F\ndef read(key, mem):\n value = complex_mult(\n permute(key),\n mem.dimshuffle(0, 'x', 1),\n inverse_r=True, moduli_1=True)\n return value.mean(axis=0)\n\n\n# key: C x B x F\n# value: B x F\n# mem: C x F\ndef write(key, value):\n coded_value = complex_mult(permute(key), value.dimshuffle('x', 0, 1))\n # C x F\n return coded_value.sum(axis=1)\n\nif __name__ == \"__main__\":\n # B x F\n key = T.matrix('key')\n # B x F\n value = T.matrix('value')\n # C x F\n mem = T.matrix('mem')\n\n read_func = theano.function([key, mem], read(key, mem))\n write_func = theano.function([key, value], write(key, value))\n\n # shape: 20 x 110 x 110 x 3\n data = np.load('20_images_from_imagenet.npy')[:B]\n VALUES = data.reshape(B, F) - np.mean(data.reshape(B, F),\n axis=1, keepdims=True)\n\n phis = np.random.random((B, F / 2)) * 2 * np.pi\n KEYS = np.concatenate([np.cos(phis), np.sin(phis)], axis=1)\n\n MEM = write_func(KEYS, VALUES)\n\n all_imgs = read_func(KEYS, MEM)\n\n VALUES = VALUES + np.mean(data.reshape(B, F), axis=1, keepdims=True)\n VALUES = VALUES.reshape(B, 110, 110, 3)\n VALUES = np.swapaxes(VALUES, 0, 1)\n VALUES = np.reshape(VALUES, (110, 110 * B, 3))\n plt.imshow(VALUES[:, :110 * B])\n plt.show()\n\n all_imgs = all_imgs + np.mean(data.reshape(B, F), axis=1, keepdims=True)\n all_imgs = all_imgs.reshape(B, 110, 110, 3)\n all_imgs = np.swapaxes(all_imgs, 0, 1)\n all_imgs = np.reshape(all_imgs, (110, 110 * B, 3))\n plt.imshow(all_imgs[:, :110 * B])\n plt.show()\n" ]
[ [ "numpy.swapaxes", "matplotlib.pyplot.imshow", "numpy.random.random", "numpy.reshape", "numpy.arange", "numpy.cos", "numpy.random.shuffle", "numpy.sin", "numpy.concatenate", "numpy.load", "matplotlib.pyplot.show", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]