repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
xierensong/learnPython
[ "33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455" ]
[ "case/tfP.py" ]
[ "import tensorflow as tf\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=5)\nmodel.evaluate(x_test, y_test)" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Dense" ] ]
adeline-cs/GTR
[ "889b0cda8a3c2b061371c4a63ea871821ddcd3d7" ]
[ "lib/evaluators.py" ]
[ "from __future__ import print_function, absolute_import\nimport time\nfrom time import gmtime, strftime\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport torch\nimport numpy as np\nfrom random import randint\nfrom PIL import Image\nimport sys\nfrom . import metric\nfrom metric import Accuracy, EditDistance, RecPostProcess\nfrom tqdm import tqdm\n\nclass BaseEvaluator(object):\n def __init__(self, model, metric, use_cuda = True):\n super(BaseEvaluator, self).__init__()\n self.model = model\n self.metric = metric\n self.use_cuda = use_cuda\n self.device = torch.device('cuda' if use_cuda else 'cpu')\n self.cos_sim = torch.nn.CosineSimilarity(dim=1, eps=1e-6)\n\n def evaluate(self, data_loader, step = 1, print_freq =1, tfLogger = None, dataset = None, vis_dir = None):\n self.model.eval()\n # batch_time =\n # data_time =\n\n # forward the network\n image, outputs, targets, embeds, losses = [], {}, [], [], []\n file_names = []\n end = time.time()\n for i, input in enumerate(tqdm(data_loader)):\n data_time.update(time.time()-end)\n input_dict = self._parse_data(input)\n output_dict = self._forward(input_dict)\n batch_size = input_dict['images'].size(0)\n total_loss_batch = 0\n for k, loss in output_dict['images'].item():\n loss = loss.mean(dim = 0, keepdim = True)\n total_loss_batch += loss.item() * batch_size\n image.append(input_dict['images'])\n targets.append(input_dict['rec_tragets'])\n embeds.append(input_dict['rec_embeds'])\n losses.append(total_loss_batch)\n ## the normal situation is without lexicon, especially for IIIT5k, IC03, IC13\n if global_args.evaluate_with_lexicon:\n file_names = input_dict['file_name']\n for k, v in output_dict['output'].item():\n if k not in outputs:\n outputs[k] = []\n outputs[k].append(v.cpu())\n batch_time.update(time.time()-end)\n\n if (i+1) % print_freq == 0:\n print('[{}]\\t'\n 'Evaluation : {}/{}\\t'\n 'Time: {:.3f} ({:.3f})\\t'\n 'Data: {:.3f} ({:.3f})\\t'\n .format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n i+1, len(data_loader),\n batch_time.val, batch_time.avg,\n data_time.val, data_time.avg\n\n ))\n if not global_args.keep_ratio():\n image = torch.cat(image)\n num_sample = image.size(0)\n else:\n num_sample = sum([subimage.size(0) for subimage in image])\n targets = torch.cat(targets)\n losses = np.sum(losses)/(1.0*num_sample)\n for k , v in outputs.items():\n outputs[k] = torch.cat(outputs[k])\n\n # save info for recognition\n if 'pred_rec' in outputs:\n # evaluation with metric\n if global_args.evaluate_with_lexicon:\n eval_res = metrics_factory[self.metric + '_with_lexicon'](outputs['pred_rec'], targets, dataset,\n file_names)\n print('lexicon0: {0}, {1:.3f}'.format(self.metric, eval_res[0]))\n print('lexicon50: {0}, {1:.3f}'.format(self.metric, eval_res[1]))\n print('lexicon1k: {0}, {1:.3f}'.format(self.metric, eval_res[2]))\n print('lexiconfull: {0}, {1:.3f}'.format(self.metric, eval_res[3]))\n eval_res = eval_res[0]\n else:\n eval_res = metrics_factory[self.metric](outputs['pred_rec'], targets, dataset)\n print('lexicon0: {0}: {1:.3f}'.format(self.metric, eval_res))\n pred_list, targ_list, score_list = RecPostProcess(outputs['pred_rec'], targets, outputs['pred_rec_score'],\n dataset)\n with open(\"embed_v1_results.txt\", \"w\", encoding=\"utf-8\") as f:\n for pred, targ in zip(pred_list, targ_list):\n f.write(\"{} {}\\n\".format(pred, targ))\n if 'pred_embed' in outputs:\n output_cos_sim = self.cos_sim(outputs['pred_embed'], torch.cat(embeds).cpu())\n output_cos_sim = torch.mean(torch.abs(output_cos_sim))\n print(\"Emebedding vector cos similarity: {:3f}\".format(output_cos_sim.item()))\n if tfLogger is not None:\n # (1) Log the scalar values\n info = {\n 'loss': losses,\n self.metric: eval_res,\n }\n for tag, value in info.items():\n tfLogger.scalar_summary(tag, value, step)\n\n # ====== Visualization ======#\n if vis_dir is not None:\n # recognition_vis(images, outputs['pred_rec'], targets, score_list, dataset, vis_dir)\n stn_vis(images, outputs['rectified_images'], outputs['ctrl_points'], outputs['pred_rec'],\n targets, score_list, outputs['pred_score'] if 'pred_score' in outputs else None, dataset, vis_dir)\n return eval_res\n\n # NotImplementedError, ValueError will represent what , the framework of python\n def _parse_data(self, input):\n raise NotImplementedError\n def _forward(self, input):\n raise NotImplementedError\n\nclass Evaluator(BaseEvaluator):\n def _parse_data(self, input):\n input_dict = {}\n if global_args.evaluate_with_lexicon:\n imgs, label_encs, lengths, file_name = inputs\n else:\n imgs, label_encs, lengths, embeds_ = inputs\n with torch.no_grad():\n images = imgs.to(self.device)\n if label_encs is None:\n labels = label_encs.to(self.device)\n if embeds_ is not None:\n embeds_ = embeds_.to(self.device)\n input_dict['images'] = images\n input_dict['rec_tragets'] = labels\n input_dict['rec_lengths'] = lengths\n input_dict['rec_embeds'] = embeds\n if global_args.evaluate_with_lexicon:\n input_dict['file_name'] = file_name\n return input_dict\n def _forward(self, input_dict):\n self.model.eval()\n with torch.no_grad:\n output_dict = self.model(input_dict)\n return output_dict" ]
[ [ "numpy.sum", "torch.no_grad", "torch.abs", "torch.device", "torch.cat", "torch.nn.CosineSimilarity" ] ]
kevint324/tensorboard
[ "cbc5b1f2d74236d89baa9d4810c166e4cee973a9" ]
[ "tensorboard/plugins/core/core_plugin_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the TensorBoard core endpoints.\"\"\"\n\n\nimport collections.abc\nimport contextlib\nimport io\nimport json\nimport os\nfrom unittest import mock\nimport zipfile\n\nimport tensorflow as tf\nfrom werkzeug import test as werkzeug_test\nfrom werkzeug import wrappers\n\nfrom tensorboard.backend import application\nfrom tensorboard.backend.event_processing import data_provider\nfrom tensorboard.backend.event_processing import (\n plugin_event_multiplexer as event_multiplexer,\n)\nfrom tensorboard.data import provider\nfrom tensorboard.plugins import base_plugin\nfrom tensorboard.plugins.core import core_plugin\nfrom tensorboard.util import test_util\n\nFAKE_INDEX_HTML = b\"<!doctype html><title>fake-index</title>\"\nFAKE_INDEX_JS = b\"console.log('hello');\"\nNO_CACHE_CONTROL_VALUE = \"no-cache, must-revalidate\"\nONE_DAY_CACHE_CONTROL_VALUE = \"private, max-age=86400\"\n\n\nclass FakeFlags(object):\n def __init__(\n self,\n bind_all=False,\n host=None,\n inspect=False,\n version_tb=False,\n logdir=\"\",\n logdir_spec=\"\",\n event_file=\"\",\n db=\"\",\n path_prefix=\"\",\n generic_data=\"true\",\n grpc_data_provider=\"\",\n reuse_port=False,\n ):\n self.bind_all = bind_all\n self.host = host\n self.inspect = inspect\n self.version_tb = version_tb\n self.logdir = logdir\n self.logdir_spec = logdir_spec\n self.event_file = event_file\n self.db = db\n self.path_prefix = path_prefix\n self.generic_data = generic_data\n self.grpc_data_provider = grpc_data_provider\n self.reuse_port = reuse_port\n\n\nclass CorePluginFlagsTest(tf.test.TestCase):\n def testFlag(self):\n loader = core_plugin.CorePluginLoader()\n loader.fix_flags(FakeFlags(version_tb=True))\n loader.fix_flags(FakeFlags(inspect=True, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=True, event_file=\"/tmp/event.out\"))\n loader.fix_flags(FakeFlags(inspect=False, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=False, db=\"sqlite:foo\"))\n # User can pass both, although the behavior is not clearly defined.\n loader.fix_flags(\n FakeFlags(inspect=False, logdir=\"/tmp\", db=\"sqlite:foo\")\n )\n\n logdir_or_db_req = r\"A logdir or db must be specified\"\n one_of_event_or_logdir_req = (\n r\"Must specify either --logdir.*but not both.$\"\n )\n event_or_logdir_req = r\"Must specify either --logdir or --event_file.$\"\n\n with self.assertRaisesRegex(ValueError, event_or_logdir_req):\n loader.fix_flags(FakeFlags(inspect=True))\n with self.assertRaisesRegex(ValueError, one_of_event_or_logdir_req):\n loader.fix_flags(\n FakeFlags(\n inspect=True, logdir=\"/tmp\", event_file=\"/tmp/event.out\"\n )\n )\n with self.assertRaisesRegex(ValueError, logdir_or_db_req):\n loader.fix_flags(FakeFlags(inspect=False))\n with self.assertRaisesRegex(ValueError, logdir_or_db_req):\n loader.fix_flags(\n FakeFlags(inspect=False, event_file=\"/tmp/event.out\")\n )\n\n def testPathPrefix_stripsTrailingSlashes(self):\n loader = core_plugin.CorePluginLoader()\n for path_prefix in (\"/hello\", \"/hello/\", \"/hello//\", \"/hello///\"):\n flag = FakeFlags(\n inspect=False, logdir=\"/tmp\", path_prefix=path_prefix\n )\n loader.fix_flags(flag)\n self.assertEqual(\n flag.path_prefix,\n \"/hello\",\n \"got %r (input %r)\" % (flag.path_prefix, path_prefix),\n )\n\n def testPathPrefix_mustStartWithSlash(self):\n loader = core_plugin.CorePluginLoader()\n flag = FakeFlags(inspect=False, logdir=\"/tmp\", path_prefix=\"noslash\")\n with self.assertRaises(base_plugin.FlagsError) as cm:\n loader.fix_flags(flag)\n msg = str(cm.exception)\n self.assertIn(\"must start with slash\", msg)\n self.assertIn(repr(\"noslash\"), msg)\n\n\nclass CorePluginTest(tf.test.TestCase):\n def setUp(self):\n super().setUp()\n self.multiplexer = event_multiplexer.EventMultiplexer()\n self.logdir = self.get_temp_dir()\n provider = data_provider.MultiplexerDataProvider(\n self.multiplexer, self.logdir\n )\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=self.logdir,\n data_provider=provider,\n window_title=\"title foo\",\n )\n self.plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n def _add_run(self, run_name):\n run_path = os.path.join(self.logdir, run_name)\n with test_util.FileWriter(run_path) as writer:\n writer.add_test_summary(\"foo\")\n self.multiplexer.AddRunsFromDirectory(self.logdir)\n self.multiplexer.Reload()\n\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testRoutesProvided(self):\n \"\"\"Tests that the plugin offers the correct routes.\"\"\"\n routes = self.plugin.get_plugin_apps()\n self.assertIsInstance(routes[\"/data/logdir\"], collections.abc.Callable)\n self.assertIsInstance(routes[\"/data/runs\"], collections.abc.Callable)\n\n def testIndex_returnsActualHtml(self):\n \"\"\"Test the format of the root / endpoint.\"\"\"\n response = self.server.get(\"/\")\n self.assertEqual(200, response.status_code)\n self.assertStartsWith(response.headers.get(\"Content-Type\"), \"text/html\")\n html = response.get_data()\n self.assertEqual(\n html,\n b'<!doctype html><meta name=\"tb-relative-root\" content=\"./\">'\n + FAKE_INDEX_HTML,\n )\n\n def test_js_no_cache(self):\n response = self.server.get(\"/index.js?foo=bar\")\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n NO_CACHE_CONTROL_VALUE, response.headers.get(\"Cache-Control\")\n )\n\n def test_js_cache(self):\n response = self.server.get(\"/index.js?_file_hash=meow\")\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n ONE_DAY_CACHE_CONTROL_VALUE, response.headers.get(\"Cache-Control\")\n )\n\n def test_html_no_cache(self):\n response = self.server.get(\"/index.html?_file_hash=meow\")\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n NO_CACHE_CONTROL_VALUE, response.headers.get(\"Cache-Control\")\n )\n\n def testDataPaths_disableAllCaching(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n for path in (\"/data/runs\", \"/data/logdir\"):\n response = self.server.get(path)\n self.assertEqual(200, response.status_code, msg=path)\n self.assertEqual(\"0\", response.headers.get(\"Expires\"), msg=path)\n\n def testEnvironmentForWindowTitle(self):\n \"\"\"Test that the environment route correctly returns the window\n title.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"window_title\"], \"title foo\")\n\n def testEnvironmentForLogdir(self):\n \"\"\"Test that the environment route correctly returns the logdir.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], self.get_temp_dir())\n\n def testEnvironmentWithExperimentMetadata(self):\n class FakeDataProvider(object):\n def experiment_metadata(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return provider.ExperimentMetadata(\n data_location=\"/tmp/logs\",\n experiment_name=\"Experiment #5 (実験#5)\",\n experiment_description=\"Take five (😊)\",\n creation_time=1234.5,\n )\n\n self.context = base_plugin.TBContext(\n flags=FakeFlags(generic_data=\"true\"),\n data_provider=FakeDataProvider(),\n )\n\n self.plugin = core_plugin.CorePlugin(self.context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], \"/tmp/logs\")\n self.assertEqual(parsed_object[\"window_title\"], None)\n self.assertEqual(\n parsed_object[\"experiment_name\"], \"Experiment #5 (実験#5)\"\n )\n self.assertEqual(\n parsed_object[\"experiment_description\"], \"Take five (😊)\"\n )\n self.assertEqual(parsed_object[\"creation_time\"], 1234.5)\n\n def testEnvironmentDebugOffByDefault(self):\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertNotIn(\"debug\", parsed_object)\n\n def testEnvironmentDebugOnExplicitly(self):\n multiplexer = event_multiplexer.EventMultiplexer()\n logdir = self.get_temp_dir()\n provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=logdir,\n data_provider=provider,\n window_title=\"title foo\",\n )\n plugin = core_plugin.CorePlugin(context, include_debug_info=True)\n app = application.TensorBoardWSGI([plugin])\n server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(server, \"/data/environment\")\n self.assertIn(\"debug\", parsed_object)\n\n def testLogdir(self):\n \"\"\"Test the format of the data/logdir endpoint.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/logdir\")\n self.assertEqual(parsed_object, {\"logdir\": self.get_temp_dir()})\n\n def testRuns(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n self._add_run(\"run1\")\n run_json = self._get_json(self.server, \"/data/runs\")\n self.assertEqual(run_json, [\"run1\"])\n\n def testRunsAppendOnly(self):\n \"\"\"Test that new runs appear after old ones in /data/runs.\"\"\"\n fake_wall_times = {\n \"run1\": 1234.0,\n \"avocado\": 2345.0,\n \"zebra\": 3456.0,\n \"ox\": 4567.0,\n \"mysterious\": None,\n \"enigmatic\": None,\n }\n\n def FirstEventTimestamp_stub(run_name):\n matches = [\n candidate_name\n for candidate_name in fake_wall_times\n if run_name.endswith(candidate_name)\n ]\n self.assertEqual(len(matches), 1, \"%s (%s)\" % (matches, run_name))\n wall_time = fake_wall_times[matches[0]]\n if wall_time is None:\n raise ValueError(\"No event timestamp could be found\")\n else:\n return wall_time\n\n with mock.patch.object(\n self.multiplexer, \"FirstEventTimestamp\"\n ) as mock_first_event_timestamp:\n mock_first_event_timestamp.side_effect = FirstEventTimestamp_stub\n # Start with a single run.\n self._add_run(\"run1\")\n\n # Add one run: it should come last.\n self._add_run(\"avocado\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\"],\n )\n\n # Add another run: it should come last, too.\n self._add_run(\"zebra\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\"],\n )\n\n # And maybe there's a run for which we somehow have no timestamp.\n self._add_run(\"mysterious\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"mysterious\"],\n )\n\n # Add another timestamped run: it should come before the timestamp-less one.\n self._add_run(\"ox\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"mysterious\"],\n )\n\n # Add another timestamp-less run, lexicographically before the other one:\n # it should come after all timestamped runs but first among timestamp-less.\n self._add_run(\"enigmatic\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"enigmatic\", \"mysterious\"],\n )\n\n def testNotifications(self):\n \"\"\"Test the format of the /data/notifications endpoint.\"\"\"\n notifications_json = self._get_json(self.server, \"/data/notifications\")\n self.assertEqual(notifications_json, {\"notifications\": []})\n\n\nclass CorePluginPathPrefixTest(tf.test.TestCase):\n def _send_request(self, path_prefix, pathname):\n multiplexer = event_multiplexer.EventMultiplexer()\n logdir = self.get_temp_dir()\n provider = data_provider.MultiplexerDataProvider(multiplexer, logdir)\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=logdir,\n data_provider=provider,\n window_title=\"\",\n flags=FakeFlags(path_prefix=path_prefix),\n )\n plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([plugin], path_prefix=path_prefix)\n server = werkzeug_test.Client(app, wrappers.BaseResponse)\n return server.get(pathname)\n\n def _assert_index(self, response, expected_tb_relative_root):\n self.assertEqual(200, response.status_code)\n self.assertStartsWith(response.headers.get(\"Content-Type\"), \"text/html\")\n html = response.get_data()\n\n expected_meta = (\n '<!doctype html><meta name=\"tb-relative-root\" content=\"%s\">'\n % expected_tb_relative_root\n ).encode()\n self.assertEqual(\n html,\n expected_meta + FAKE_INDEX_HTML,\n )\n\n def testIndex_no_path_prefix(self):\n self._assert_index(self._send_request(\"\", \"/\"), \"./\")\n self._assert_index(self._send_request(\"\", \"/index.html\"), \"./\")\n\n def testIndex_path_prefix_foo(self):\n self._assert_index(self._send_request(\"/foo\", \"/foo/\"), \"./\")\n self._assert_index(self._send_request(\"/foo\", \"/foo/index.html\"), \"./\")\n\n def testIndex_path_prefix_foo_exp_route(self):\n self._assert_index(\n self._send_request(\"/foo\", \"/foo/experiment/123/\"), \"../../\"\n )\n\n def testIndex_path_prefix_foo_incorrect_route(self):\n self.assertEqual(\n 404, (self._send_request(\"/foo\", \"/foo/meow/\").status_code)\n )\n self.assertEqual(404, (self._send_request(\"/foo\", \"/\").status_code))\n self.assertEqual(\n 404, (self._send_request(\"/foo\", \"/index.html\").status_code)\n )\n\n # Missing trailing \"/\" causes redirection.\n self.assertEqual(301, (self._send_request(\"/foo\", \"/foo\").status_code))\n self.assertEqual(\n 301, (self._send_request(\"/foo\", \"/foo/experiment/123\").status_code)\n )\n\n def testIndex_path_prefix_foo_bar(self):\n self._assert_index(self._send_request(\"/foo/bar\", \"/foo/bar/\"), \"./\")\n self._assert_index(\n self._send_request(\"/foo/bar\", \"/foo/bar/index.html\"), \"./\"\n )\n\n def testIndex_path_prefix_foo_bar_exp_route(self):\n self._assert_index(\n self._send_request(\"/foo/bar\", \"/foo/bar/experiment/123/\"), \"../../\"\n )\n\n\ndef get_test_assets_zip_provider():\n memfile = io.BytesIO()\n with zipfile.ZipFile(\n memfile, mode=\"w\", compression=zipfile.ZIP_DEFLATED\n ) as zf:\n zf.writestr(\"index.html\", FAKE_INDEX_HTML)\n zf.writestr(\"index.js\", FAKE_INDEX_JS)\n return lambda: contextlib.closing(io.BytesIO(memfile.getvalue()))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
veya2ztn/mltool
[ "4ed151152845ebe3de128e1f53c478581c1492e4" ]
[ "ModelArchi/GANModel/SNdcgan.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import autograd\nimport time as t\nimport os\nfrom itertools import chain\nfrom torchvision import utils\nfrom .spectral_normalization import SpectralNorm\n\nclass WassersteinLoss(torch.nn.Module):\n def forward(self, x , target):\n loss = -target.mean()*x.mean()\n return loss\n\nclass Generator(torch.nn.Module):\n def __init__(self, channels):\n super().__init__()\n self.main_module = nn.Sequential(\n\n nn.ConvTranspose2d(in_channels=100, out_channels=1024, kernel_size=4, stride=1, padding=0),\n nn.BatchNorm2d(num_features=1024),\n nn.ReLU(True),\n\n\n nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(num_features=512),\n nn.ReLU(True),\n\n\n nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(num_features=256),\n nn.ReLU(True),\n\n\n nn.ConvTranspose2d(in_channels=256, out_channels=channels, kernel_size=4, stride=2, padding=1))\n\n\n self.output = nn.Tanh()\n\n def forward(self, x):\n x = self.main_module(x)\n return self.output(x)\n\nclass Discriminator(torch.nn.Module):\n def __init__(self, channels,version=\"DCGAN_M\"):\n super().__init__()\n self.version = version\n self.main_module = nn.Sequential(\n\n SpectralNorm(nn.Conv2d(in_channels=1, out_channels=256, kernel_size=3, stride=1, padding=1)),\n nn.LeakyReLU(0.2, inplace=True),\n\n\n SpectralNorm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1)),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True),\n\n\n SpectralNorm(nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=4, stride=2, padding=1)),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.2, inplace=True),\n\n\n )\n\n if version == \"DCGAN_L\":\n self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)))\n self.metric = torch.nn.BCEWithLogitsLoss()\n elif version == \"WGAN_GP\":\n self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)))\n self.metric = WassersteinLoss()\n else:\n self.output = nn.Sequential(SpectralNorm(nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)),\n nn.Sigmoid())\n if version == \"DCGAN\":self.metric = torch.nn.BCELoss()\n elif version == \"DCGAN_M\":self.metric = torch.nn.MSELoss()\n else:\n raise NotImplementedError\n def forward(self, x, target=None):\n x = self.main_module(x)\n x = self.output(x)\n return x.reshape(x.size(0),x.size(1)) #(b,1)\n\n def calculate_gradient_penalty(self, real_images, fake_images,GP_lambda= 10):\n batch_size = len(real_images)\n device = next(self.parameters()).device\n\n eta = torch.FloatTensor(batch_size,1,1,1).uniform_(0,1)\n eta = eta.expand(batch_size, real_images.size(1), real_images.size(2), real_images.size(3))\n eta = eta.to(device)\n\n interpolated = eta * real_images + ((1 - eta) * fake_images)\n interpolated = interpolated.to(device)\n interpolated = eta * real_images + ((1 - eta) * fake_images)\n\n interpolated = Variable(interpolated, requires_grad=True)\n prob_interpolated = self(interpolated)\n gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,\n grad_outputs=torch.ones(prob_interpolated.size()).to(device),\n create_graph=True, retain_graph=True)[0]\n grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * GP_lambda\n return grad_penalty\n\nclass Binary_Checker(nn.Module):\n def __init__(self):\n super().__init__()\n self.weight = nn.Parameter(torch.Tensor(1))\n def forward(self,x):\n\n shape=tuple(range(1,len(x.shape)))\n return (x**2).mean(shape).unsqueeze(1)\n\nclass DCGAN_MODEL(object):\n def __init__(self, args):\n print(\"DCGAN model initalization.\")\n self.G = Generator(args.channels)\n if args.GAN_TYPE == \"ForceBINARY\":\n self.D = Binary_Checker()\n else:\n self.D = Discriminator(args.channels,args.GAN_TYPE)\n self.D.version = args.GAN_TYPE\n self.C = args.channels\n self.check_cuda(True)\n\n def check_cuda(self, cuda_flag=False):\n print(cuda_flag)\n if cuda_flag:\n self.cuda_index = 0\n self.cuda = True\n self.D.cuda(self.cuda_index)\n self.G.cuda(self.cuda_index)\n print(\"Cuda enabled flag: {}\".format(self.cuda))\n else:\n self.cuda = False\n\n def save_to(self,path,mode=\"full\"):\n checkpoint = self.all_state_dict(mode=mode)\n torch.save(checkpoint,path)\n\n def all_state_dict(self,epoch=None,mode=\"full\"):\n checkpoint={}\n checkpoint['epoch'] = epoch\n checkpoint['D_state_dict'] = self.D.state_dict()\n checkpoint['G_state_dict'] = self.G.state_dict()\n if mode != \"light\":\n if hasattr(self,\"I2C\"):checkpoint['C_state_dict'] = self.I2C.state_dict()\n if hasattr(self,\"D_optimizer\"):checkpoint['D_optimizer'] = self.d_optimizer.state_dict()\n if hasattr(self,\"G_optimizer\"):checkpoint['G_optimizer'] = self.g_optimizer.state_dict()\n if hasattr(self,\"C_optimizer\"):checkpoint['C_optimizer'] = self.c_optimizer.state_dict()\n return checkpoint\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.FloatTensor", "torch.nn.LeakyReLU", "torch.nn.MSELoss", "torch.save", "torch.autograd.Variable", "torch.nn.Tanh", "torch.nn.Conv2d", "torch.nn.BCEWithLogitsLoss", "torch.nn.BCELoss", "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.Tensor", "torch.nn.ConvTranspose2d" ] ]
augeas/NeverMindTheMolluscs
[ "829185ad1d9239368d5b6f3572fc07b7825deb49" ]
[ "oliva.py" ]
[ "import numpy as np\n\n\nclass Oliva(object):\n def __init__(self,width=640, skip=12, act_diff=0.015, act_decay=0.1,\n act_prod=0.1, sat=0.25, in_diff=0.0, in_decay=0.014, in_mm=0.1,\n h_decay=0.1, hormone=0.5):\n \n self.width = width\n self.cells = np.zeros((2,2,self.width))\n \n self.skip = skip\n \n self.act_diff = act_diff\n self.act_decay = act_decay\n self.act_prod = act_prod\n self.sat = sat\n \n self.in_diff = in_diff\n self.in_decay = in_decay\n self.in_mm = in_mm\n self.h_decay = h_decay\n self.h_fac = 1-self.h_decay\n self.hormone = hormone\n \n self.tick = False\n \n self.cells[0,1,:] = 0.1\n \n self.fluct = self.act_decay * (0.96 +\n 0.08 *np.random.random(self.width))\n \n seeds = np.random.choice(np.arange(self.width),30,replace=False)\n self.cells[0,0,seeds] = 1.0\n \n self.act_diff_const = 1.0 - self.act_decay -2*self.act_diff\n self.in_diff_const = 1.0 - self.in_decay -2*self.in_diff\n\n\n def step(self):\n if self.tick:\n old = 1\n new = 0\n else:\n old = 0\n new = 1\n \n l_bound = np.copy(self.cells[old,:,0])\n r_bound = np.copy(self.cells[old,:,-1])\n \n act_sq = np.square(self.cells[old,0,:])\n auto_cat = self.fluct * act_sq / (1 + self.sat * act_sq)\n \n left_cells = np.roll(self.cells[old,:,:],-1,axis=1)\n right_cells = np.roll(self.cells[old,:,:],1,axis=1)\n\n left_cells[:,0] = l_bound\n right_cells[:,-1] = r_bound\n \n self.cells[new,0,:] = self.cells[old,0,:] * self.act_diff_const + self.act_diff * (left_cells[0,:] + right_cells[0,:]) + auto_cat / (self.in_mm + self.cells[old,1,:])\n \n self.cells[new,1,:] = self.cells[old,1,:] * self.in_diff_const + self.in_diff * (left_cells[1,:] + right_cells[1,:]) + auto_cat\n \n hormone_prod = (self.cells[old,0,:] * self.h_decay).sum()\n \n self.hormone = self.hormone * self.h_fac + hormone_prod / self.width\n \n self.in_diff_const = 1.0 - 2 * self.in_diff - self.in_decay / self.hormone\n \n self.tick = not self.tick\n \n \n def __iter__(self):\n return self\n \n \n def __next__(self):\n self.step()\n if self.tick:\n out = np.copy(self.cells[0,:,:])\n else:\n out = np.copy(self.cells[1,:,:])\n for i in range(self.skip):\n self.step()\n return out\n \n " ]
[ [ "numpy.roll", "numpy.zeros", "numpy.copy", "numpy.arange", "numpy.random.random", "numpy.square" ] ]
MElody9120/UESTC-FinalRepo
[ "f7271e76090d92866a4c9346da19e4b7464f5f0b" ]
[ "BikeShare/bikeShareData/parseCSV.py" ]
[ "# Author Melody\n# Data 2021-06-03 16:27:40\n\nimport pandas as pd\nimport geopy.distance\n\n\n# Pytest is an automated testing module on Python,Use Pytest to test the legitimacy on Bikeshare Data\n# import pytest as pt\n\n# Coords is a data structures to save How Bikeshare Date,Coord just like a List\n\n\ndef getStartEndCoords():\n # load CSV File by using pandas API\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", index_col='Ending Station ID', low_memory=False)\n # Created a list, here in the interest of saving time,as the csv will not change\n # Numbers represents the Bikeshare stations ID\n allStations = [3005, 3006, 3007, 3008, 3009, 3010, 3011, 3014, 3016, 3018, 3019, 3020, 3021, 3022, 3023, 3024,\n 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3042,\n 3045, 3046, 3047, 3048, 3049, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3062, 3063,\n 3064, 3065, 3066, 3067, 3068, 3069, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 4108]\n # geoCodeEnd meaning geography ID just like 3005 -> 7th & Flower\n geoCodeEnd = []\n for station in allStations:\n temp = []\n # freq -> frequency,frequency must be an INT Type\n freq = df.at[float(station), \"Ending Station Latitude\"].size\n assert type(freq) == int\n lat = (df.at[float(station), \"Ending Station Latitude\"]).values[0]\n lng = (df.at[float(station), \"Ending Station Longitude\"]).values[0]\n # Determining data legitimacy, NaN -> Not a Number\n if str(lat) != 'NaN' and str(lng) != 'NaN':\n temp.append([lat, lng])\n temp.append(freq)\n geoCodeEnd.append(temp)\n dfS = pd.read_csv(filename, sep=\",\", index_col='Starting Station ID', low_memory=False)\n geoCodeStart = []\n for station in allStations:\n tempS = []\n freqS = dfS.at[float(station), \"Starting Station Latitude\"].size\n assert type(freqS) == int\n lat = (dfS.at[float(station), \"Starting Station Latitude\"]).values[0]\n lng = (dfS.at[float(station), \"Starting Station Longitude\"]).values[0]\n if str(lat) != 'NaN' and str(lng) != 'NaN':\n tempS.append([lat, lng])\n tempS.append(freqS)\n geoCodeStart.append(tempS)\n return geoCodeEnd, allStations, geoCodeStart\n\n\n# This will calculate the usage of the users\ndef getRegularRiders():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n a = df.loc[df['Passholder Type'] != \"Walk-up\"]\n return len(a.index) / 182 # 182 is the number of days in the given period minus weekends and holidays\n\n\n# This will count the Average distance of the users\ndef avgDistance():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n df = df[[\"Duration\", \"Starting Station Latitude\", \"Starting Station Longitude\", \"Ending Station Latitude\",\n \"Ending Station Longitude\"]]\n # Distance calculation modified method from StackOverflow\n sum = 0\n count = 0\n time = 0\n for index, row in df.iterrows():\n lat1 = row[\"Starting Station Latitude\"]\n lat2 = row[\"Ending Station Latitude\"]\n lon1 = row[\"Starting Station Longitude\"]\n lon2 = row[\"Ending Station Longitude\"]\n if str(lat1) != 'nan' and str(lat2) != 'nan' and str(lon1) != 'nan' and str(lon2) != 'nan':\n coords_1 = (lat1, lon1)\n coords_2 = (lat2, lon2)\n dist = geopy.distance.geodesic(coords_1, coords_2).miles\n if dist > 0:\n time = time + row[\"Duration\"]\n sum = sum + dist\n count = count + 1\n return (sum / count), (time / count)\n\n\n# This will calculate User`s passes type\ndef pieChartPassHolder():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n df = df[[\"Passholder Type\"]]\n w = len((df.loc[df['Passholder Type'] == \"Walk-up\"]).index)\n f = len((df.loc[df['Passholder Type'] == \"Flex Pass\"]).index)\n m = len((df.loc[df['Passholder Type'] == \"Monthly Pass\"]).index)\n s = len((df.loc[df['Passholder Type'] == \"Staff Annual\"]).index)\n return [w, f, m, s]\n\n\n# This will calculate whether the user is a one-way or round trip\ndef pieChartTripRoute():\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", low_memory=False)\n df = df[[\"Trip Route Category\"]]\n o = len((df.loc[df[\"Trip Route Category\"] == \"One Way\"]).index)\n r = len((df.loc[df[\"Trip Route Category\"] == \"Round Trip\"]).index)\n return [o, r]\n\n\n# this will show a line to represent the Relationship between temperature and number of bicycles lent\ndef lineByMonth():\n totals = {7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n filename = \"2019.7-2020.3-Bikeshare-Los_Angeles.csv\"\n df = pd.read_csv(filename, sep=\",\", converters={'Start Time': pd.to_datetime}, low_memory=False)\n for index, row in df.iterrows():\n totals[((row[\"Start Time\"]).month)] += 1\n return totals\n" ]
[ [ "pandas.read_csv" ] ]
CFM-MSG/Code_LEORN
[ "fabea1e1ded973a4db692e51e2df442bde55f626" ]
[ "lib/models/frame_modules/frame_pool.py" ]
[ "import torch\nfrom torch import nn\n\n\nclass FrameAvgPool(nn.Module):\n\n def __init__(self, cfg):\n super(FrameAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n vis_h = torch.relu(self.vis_conv(visual_input))\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n return vis_h # batchsize * 512 * 16\n\n\nclass MultiFeatureAvgPool_C(nn.Module):\n def __init__(self, cfg):\n super(MultiFeatureAvgPool_C, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n # self.global_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_conv = nn.Conv1d(hidden_size + input_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n # self.norm1 = nn.BatchNorm1d(hidden_size)\n self.norm = nn.BatchNorm1d(hidden_size)\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n assert isinstance(visual_input, list)\n rcnn_feature = visual_input[0].transpose(1, 2)\n global_feature = visual_input[1].transpose(1, 2)\n # global_feature = self.global_conv(global_feature)\n # global_feature = self.norm1(global_feature)\n # global_feature = torch.relu(global_feature)\n\n vis_h = torch.cat([rcnn_feature, global_feature], dim=1)\n vis_h = self.vis_conv(vis_h)\n vis_h = torch.relu(vis_h)\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n vis_h = self.norm(vis_h)\n\n return vis_h # batchsize * 512 * 16\n\n\nclass MultiFeatureAvgPool(nn.Module):\n def __init__(self, cfg):\n super(MultiFeatureAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n self.global_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_conv = nn.Conv1d(hidden_size + hidden_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n # self.norm1 = nn.BatchNorm1d(hidden_size)\n # self.norm2 = nn.BatchNorm1d(hidden_size)\n self.norm = nn.BatchNorm1d(hidden_size)\n # self.__init_fuse_conv__(hidden_size)\n\n def __init_fuse_conv__(self, hidden_size):\n weight1 = torch.eye(hidden_size, hidden_size)\n weight2 = torch.zeros(hidden_size, hidden_size)\n weight = torch.cat([weight1, weight2], dim=1).unsqueeze(2)\n weight = nn.Parameter(weight)\n bias = nn.Parameter(torch.zeros(hidden_size))\n self.vis_conv.weight = weight\n self.vis_conv.bias = bias\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n assert isinstance(visual_input, list)\n rcnn_feature = visual_input[0].transpose(1, 2)\n global_feature = visual_input[1].transpose(1, 2)\n global_feature = self.global_conv(global_feature)\n global_feature = torch.relu(global_feature)\n\n vis_h = torch.cat([rcnn_feature, global_feature], dim=1)\n vis_h = self.vis_conv(vis_h)\n vis_h = self.norm(vis_h)\n vis_h = torch.relu(vis_h)\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n return vis_h # batchsize * 512 * 16\n\nclass MultiFeaturePoolAvg(nn.Module):\n def __init__(self, cfg):\n super(MultiFeatureAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE\n self.global_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_conv = nn.Conv1d(hidden_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n self.fuse_conv = nn.Conv1d(hidden_size + hidden_size, hidden_size, 1, 1)\n self.__init_fuse_conv__(hidden_size)\n\n def __init_fuse_conv__(self, hidden_size):\n weight1 = torch.eye(hidden_size, hidden_size)\n weight2 = torch.zeros(hidden_size, hidden_size)\n weight = torch.cat([weight1, weight2], dim=1).unsqueeze(2)\n weight = nn.Parameter(weight)\n bias = nn.Parameter(torch.zeros(hidden_size))\n self.fuse_conv.weight = weight\n self.fuse_conv.bias = bias\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n assert isinstance(visual_input, list)\n rcnn_feature = visual_input[0].transpose(1, 2)\n global_feature = visual_input[1].transpose(1, 2)\n\n global_feature = self.global_conv(global_feature)\n global_feature = torch.relu(global_feature)\n global_feature = self.avg_pool(global_feature)\n\n vis_h = self.vis_conv(rcnn_feature)\n vis_h = torch.relu(vis_h)\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n vis_h = torch.cat([vis_h, global_feature], dim=1)\n vis_h = torch.relu(self.fuse_conv(vis_h))\n return vis_h # batchsize * 512 * 16\n\nclass FrameMaxPool(nn.Module):\n\n def __init__(self, input_size, hidden_size, stride):\n super(FrameMaxPool, self).__init__()\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.max_pool = nn.MaxPool1d(stride)\n\n def forward(self, visual_input):\n vis_h = torch.relu(self.vis_conv(visual_input))\n vis_h = self.max_pool(vis_h)\n return vis_h\n\n\nclass SequentialFrameAttentionPool(nn.Module):\n\n def __init__(self, cfg):\n super(SequentialFrameAttentionPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n self.hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n self.stride = cfg.STRIDE # 16\n self.sqn = cfg.SQN_NUM\n # self.sqn = 2\n att_hidden_size = 256\n\n self.vis_conv = nn.Conv1d(input_size, self.hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, self.stride)\n\n self.global_emb_fn = nn.ModuleList([nn.Linear(self.hidden_size, self.hidden_size) for i in range(self.sqn)])\n self.guide_emb_fn = nn.Sequential(*[\n nn.Linear(2 * self.hidden_size, self.hidden_size),\n nn.ReLU()\n ])\n\n self.att_fn1 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn2 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn3 = nn.Linear(att_hidden_size, 1)\n self.softmax = nn.Softmax(dim=1)\n # self.drop = nn.Dropout()\n\n self.vis_out_conv = nn.Conv1d(self.hidden_size * self.sqn, self.hidden_size, 1, 1)\n\n def forward(self, visual_input):\n B, _, v_len = visual_input.shape\n vis_h = torch.relu(self.vis_conv(visual_input))\n\n avg_vis = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n seg_list = []\n att_seg_list = []\n for i in range(v_len // self.stride):\n vis_seg = vis_h[:, :, self.stride * i: self.stride * (i + 1)].transpose(1, 2) # batchsize * 16 * 512\n avg_seg = avg_vis[:, :, i]\n prev_se = avg_seg.new_zeros(B, self.hidden_size)\n\n sqn_list = []\n att_list = []\n for m in range(self.sqn):\n v_n = self.global_emb_fn[m](avg_seg)\n g_n = torch.relu(self.guide_emb_fn(torch.cat([v_n, prev_se], dim=1))) # batchsize * 512\n\n att = torch.tanh(self.att_fn1(g_n).unsqueeze(1).expand(-1, self.stride, -1) + self.att_fn2(vis_seg))\n att = self.att_fn3(att)\n\n att = self.softmax(att) # batchsize * 16 * 1\n # TODO 使用sigmoid还是softmax\n # att = torch.sigmoid(att) * 2 - 1\n\n prev_se = torch.sum(vis_seg * att, dim=1) # batchsize * 512\n sqn_list.append(prev_se)\n att_list.append(att)\n\n vis_new = torch.cat(sqn_list, dim=1)\n seg_list.append(vis_new)\n att_seg_list.append(torch.cat(att_list, dim=2)) # batchsize * 16 * sqn\n\n vis_out = torch.relu(self.vis_out_conv(torch.stack(seg_list, dim=2)))\n att_out = torch.stack(att_seg_list, dim=1) # batchsize * 16 * 16 * sqn\n\n return vis_out, att_out\n\n\nclass SequentialFrameWordAttentionPool(nn.Module):\n\n def __init__(self, cfg):\n super(SequentialFrameWordAttentionPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n self.hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n self.stride = cfg.STRIDE # 16\n # self.sqn = cfg.SQN_NUM\n self.sqn = 3\n att_hidden_size = 256\n\n self.vis_conv = nn.Conv1d(input_size, self.hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, self.stride)\n\n self.global_emb_fn = nn.ModuleList([nn.Linear(self.hidden_size, self.hidden_size) for i in range(self.sqn)])\n self.guide_emb_fn = nn.Sequential(*[\n nn.Linear(2 * self.hidden_size, self.hidden_size),\n nn.ReLU()\n ])\n\n self.att_fn1 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn2 = nn.Linear(self.hidden_size, att_hidden_size)\n self.att_fn3 = nn.Linear(att_hidden_size, 1)\n self.softmax = nn.Softmax(dim=1)\n # self.drop = nn.Dropout()\n\n self.vis_out_conv = nn.Conv1d(self.hidden_size, self.hidden_size, 1, 1)\n\n self.text_linear = nn.Linear(self.hidden_size, self.hidden_size)\n\n def forward(self, visual_input, text_feature):\n B, _, v_len = visual_input.shape\n vis_h = torch.relu(self.vis_conv(visual_input))\n\n avg_vis = self.avg_pool(vis_h) # batchsize * 512 * 16\n\n text_att = self.text_linear(text_feature) # batchsize * 512\n\n seg_list = []\n att_seg_list = []\n for i in range(v_len // self.stride):\n vis_seg = vis_h[:, :, self.stride * i: self.stride * (i + 1)].transpose(1, 2) # batchsize * 16 * 512\n avg_seg = avg_vis[:, :, i].squeeze()\n prev_se = avg_seg.new_zeros(B, self.hidden_size)\n\n sqn_list = []\n att_list = []\n for m in range(self.sqn):\n v_n = self.global_emb_fn[m](avg_seg)\n g_n = torch.relu(self.guide_emb_fn(torch.cat([v_n, prev_se], dim=1))) # batchsize * 512\n\n att = torch.tanh(self.att_fn1(g_n).unsqueeze(1).expand(-1, 16, -1) + self.att_fn2(vis_seg))\n att = self.att_fn3(att)\n att = self.softmax(att) # batchsize * 16 * 1\n\n prev_se = torch.sum(vis_seg * att, dim=1) # batchsize * 512\n sqn_list.append(prev_se)\n att_list.append(att)\n\n vis_for_att = torch.stack(sqn_list, dim=1) # batch * sqn * hidden_size\n fuse_att = torch.softmax(torch.matmul(vis_for_att, text_att.unsqueeze(2)), dim=1) # batch * sqn * 1\n\n vis_new = torch.sum(vis_for_att * fuse_att, dim=1)\n seg_list.append(vis_new)\n att_seg_list.append(torch.cat(att_list, dim=2)) # batchsize * 16 * sqn\n # TODO 使用加权后的attention还是原始的attention\n\n vis_out = torch.relu(self.vis_out_conv(torch.stack(seg_list, dim=2)))\n att_out = torch.stack(att_seg_list, dim=1) # batchsize * 16 * 16 * sqn\n\n return vis_out, att_out\n\n\nclass WordAttentionPool(nn.Module):\n\n def __init__(self, cfg):\n super(WordAttentionPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n self.stride = cfg.STRIDE # 16\n\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.text_linear = nn.Linear(hidden_size, hidden_size)\n\n def forward(self, visual_input, text_feature):\n _, _, v_len = visual_input.shape # batchsize * 4096 * 256\n\n vis_att = torch.relu(self.vis_conv(visual_input)) # batchsize * 512 * 256\n text_att = torch.relu(self.text_linear(text_feature)) # batch * 512\n\n att = torch.matmul(text_att.unsqueeze(1), vis_att).transpose(1, 2) # batchsize * 256 * 1\n\n seg_list = []\n for i in range(v_len // self.stride):\n vis_seg = visual_input[:, :, self.stride * i: self.stride * (i + 1)].transpose(1,\n 2) # batchsize * 16 * 4096\n att_seg = torch.softmax(att[:, self.stride * i: self.stride * (i + 1), :], dim=1) # batchsize * 16 * 1\n vis_new = torch.sum(vis_seg * att_seg, dim=1) # batchsize * 4096\n seg_list.append(vis_new)\n\n vis_out = torch.relu(self.vis_conv(torch.stack(seg_list, dim=2))) # batchsize * 512 * 16\n\n return vis_out\n\n\nclass MovementFlowAvgPool(nn.Module):\n def __init__(self, cfg):\n super(MovementFlowAvgPool, self).__init__()\n input_size = cfg.INPUT_SIZE # 4096\n hidden_size = cfg.HIDDEN_SIZE # 512\n kernel_size = cfg.KERNEL_SIZE # 16\n stride = cfg.STRIDE # 16\n self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.vis_flow_conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.avg_pool = nn.AvgPool1d(kernel_size, stride)\n\n self.fusion_conv = nn.Conv1d(hidden_size * 2, hidden_size, 1, 1)\n\n def forward(self, visual_input): # batchsize * 4096 * 256\n B, H, l = visual_input.size()\n vis_flow = torch.zeros(B, H, l).type_as(visual_input)\n for i in range(l - 1):\n vis_flow[:, :, i] = visual_input[:, :, i + 1] - visual_input[:, :, i]\n vis_flow[:, :, l - 1] = vis_flow[:, :, l - 2]\n vis_h = torch.relu(self.vis_conv(visual_input))\n vis_flow_h = torch.relu(self.vis_conv(vis_flow))\n vis_h = self.avg_pool(vis_h) # batchsize * 512 * 16\n vis_flow_h = self.avg_pool(vis_flow_h)\n\n vis_h = torch.relu(self.fusion_conv(torch.cat([vis_h, vis_flow_h], dim=1)))\n\n return vis_h # batchsize * 512 * 16\n" ]
[ [ "torch.sum", "torch.nn.AvgPool1d", "torch.nn.MaxPool1d", "torch.nn.Linear", "torch.stack", "torch.nn.BatchNorm1d", "torch.nn.Softmax", "torch.nn.Parameter", "torch.relu", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.zeros", "torch.eye", "torch.cat", "torch.softmax" ] ]
crtrentz/MONAI
[ "355db48e46047a18e3bb9dbd83f424a8ad0a2622" ]
[ "tests/test_png_rw.py" ]
[ "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\nfrom skimage import io\n\nfrom monai.data import write_png\n\n\nclass TestPngWrite(unittest.TestCase):\n def test_write_gray(self):\n out_dir = tempfile.mkdtemp()\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 3, 1)\n img_save_val = 255 * img\n # saving with io.imsave (h, w, 1) will only give us (h,w) while reading it back.\n img_save_val = img_save_val[:, :, 0].astype(np.uint8)\n write_png(img, image_name, scale=True)\n out = io.imread(image_name)\n np.testing.assert_allclose(out, img_save_val)\n shutil.rmtree(out_dir)\n\n def test_write_rgb(self):\n out_dir = tempfile.mkdtemp()\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 3, 3)\n img_save_val = (255 * img).astype(np.uint8)\n write_png(img, image_name, scale=True)\n out = io.imread(image_name)\n np.testing.assert_allclose(out, img_save_val)\n shutil.rmtree(out_dir)\n\n def test_write_output_shape(self):\n out_dir = tempfile.mkdtemp()\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 2, 3)\n write_png(img, image_name, (4, 4), scale=True)\n out = io.imread(image_name)\n np.testing.assert_allclose(out.shape, (4, 4, 3))\n shutil.rmtree(out_dir)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.rand", "numpy.testing.assert_allclose" ] ]
Ckst123/KoBERT-events
[ "68eb22845b179bcaf13771fea776be3d9772306f" ]
[ "run.py" ]
[ "from data_loader import load_data, tokenizer\nfrom models import BertForMultipleSequenceClassification\n\nfrom transformers import AutoConfig\nimport torch\nfrom tqdm.auto import tqdm\nfrom transformers import get_scheduler\nfrom transformers import AdamW\nfrom sklearn.metrics import accuracy_score, f1_score\n\nlabel_list = ['확진자수','완치자수','사망여부','집단감염','백신관련','방역지침','경제지원','마스크','국제기구','병원관련']\n\ndef train(model, optimizer, lr_scheduler, train_dataloader, num_epochs, num_training_steps, device):\n \n progress_bar = tqdm(range(num_training_steps))\n\n model.train()\n for epoch in range(num_epochs):\n for batch in train_dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n outputs = model(**batch)\n loss = outputs.loss\n loss.backward()\n \n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n\n\ndef eval(model, eval_dataloader, metric, device):\n model.eval()\n preds = []\n targets = []\n probs = []\n for batch in eval_dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n with torch.no_grad():\n outputs = model(**batch)\n \n logits = outputs.logits\n predictions = torch.stack([torch.argmax(logit, dim=-1) for logit in logits], dim=1)\n preds.append(predictions)\n targets.append(batch[\"labels\"])\n\n\n preds = torch.cat(preds, dim=0).cpu().numpy()\n targets = torch.cat(targets, dim=0).cpu().numpy()\n N, M = preds.shape\n for i in range(M):\n print(\"%s results\" % label_list[i])\n acc = accuracy_score(targets[:,i], preds[:,i])\n f1 = f1_score(targets[:,i], preds[:,i], average='binary')\n\n print('accuracy', acc * 100)\n print('f1 score', f1 * 100)\n\n \n \n\n\ndef main():\n checkpoint = \"klue/bert-base\"\n train_dataloader, eval_dataloader = load_data()\n config = AutoConfig.from_pretrained(checkpoint)\n config.num_classes=[2] * 10\n model = BertForMultipleSequenceClassification.from_pretrained(checkpoint, config=config)\n \n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n model.to(device)\n\n\n optimizer = AdamW(model.parameters(), lr=5e-5)\n num_epochs = 3\n num_training_steps = num_epochs * len(train_dataloader)\n lr_scheduler = get_scheduler(\n \"linear\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=num_training_steps\n )\n\n train(model, optimizer, lr_scheduler, train_dataloader, num_epochs, num_training_steps, device)\n print()\n\n eval(model, eval_dataloader, 'metric', device)\n \n\nif __name__ == '__main__':\n main()" ]
[ [ "torch.argmax", "torch.no_grad", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score", "torch.cuda.is_available", "torch.device", "torch.cat" ] ]
liytt85/gail-tf-pro
[ "ad92f41c26c34e8fabc536664fb11b44f25956cf" ]
[ "gailtf/baselines/ppo1/pposgd_simple.py" ]
[ "from gailtf.baselines.common import Dataset, explained_variance, fmt_row, zipsame\nfrom gailtf.baselines import logger\nimport gailtf.baselines.common.tf_util as U\nimport tensorflow as tf, numpy as np\nimport time, os, sys\nfrom gailtf.baselines.common.mpi_adam import MpiAdam\nfrom gailtf.baselines.common.mpi_moments import mpi_moments\nfrom mpi4py import MPI\nfrom collections import deque\nimport pickle as pkl\n\n# Sample one trajectory (until trajectory end)\ndef traj_episode_generator(pi, env, horizon, stochastic):\n t = 0\n ac = env.action_space.sample() # not used, just so we have the datatype\n new = True # marks if we're on first timestep of an episode\n\n ob = env.reset()\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n\n # Initialize history arrays\n obs = []; rews = []; news = []; acs = []\n\n while True:\n prevac = ac\n ac, vpred = pi.act(stochastic, ob)\n obs.append(ob)\n news.append(new)\n acs.append(ac)\n ob, rew, new, _ = env.step(ac)\n rews.append(rew)\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if t > 0 and (new or t % horizon == 0):\n # convert list into numpy array\n obs = np.array(obs)\n rews = np.array(rews)\n news = np.array(news)\n acs = np.array(acs)\n yield {\"ob\":obs, \"rew\":rews, \"new\":news, \"ac\":acs,\n \"ep_ret\":cur_ep_ret, \"ep_len\":cur_ep_len}\n ob = env.reset()\n cur_ep_ret = 0; cur_ep_len = 0; t = 0\n\n # Initialize history arrays\n obs = []; rews = []; news = []; acs = []\n t += 1\n\ndef traj_segment_generator(pi, env, horizon, stochastic):\n t = 0\n ac = env.action_space.sample() # not used, just so we have the datatype\n new = True # marks if we're on first timestep of an episode\n ob = env.reset()\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n obs = np.array([ob for _ in range(horizon)])\n rews = np.zeros(horizon, 'float32')\n vpreds = np.zeros(horizon, 'float32')\n news = np.zeros(horizon, 'int32')\n acs = np.array([ac for _ in range(horizon)])\n prevacs = acs.copy()\n\n while True:\n prevac = ac\n ac, vpred = pi.act(stochastic, ob)\n # Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct\n # terminal value\n if t > 0 and t % horizon == 0:\n yield {\"ob\" : obs, \"rew\" : rews, \"vpred\" : vpreds, \"new\" : news,\n \"ac\" : acs, \"prevac\" : prevacs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\" : ep_rets, \"ep_lens\" : ep_lens}\n # Be careful!!! if you change the downstream algorithm to aggregate\n # several of these batches, then be sure to do a deepcopy\n ep_rets = []\n ep_lens = []\n i = t % horizon\n obs[i] = ob\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = env.step(ac)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = env.reset()\n t += 1\n\ndef add_vtarg_and_adv(seg, gamma, lam):\n \"\"\"\n Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)\n \"\"\"\n new = np.append(seg[\"new\"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1\n vpred = np.append(seg[\"vpred\"], seg[\"nextvpred\"])\n T = len(seg[\"rew\"])\n seg[\"adv\"] = gaelam = np.empty(T, 'float32')\n rew = seg[\"rew\"]\n lastgaelam = 0\n for t in reversed(range(T)):\n nonterminal = 1-new[t+1]\n delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n seg[\"tdlamret\"] = seg[\"adv\"] + seg[\"vpred\"]\n\ndef learn(env, policy_func, *,\n timesteps_per_batch, # timesteps per actor per update\n clip_param, entcoeff, # clipping parameter epsilon, entropy coeff\n optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers\n gamma, lam, # advantage estimation\n max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint\n callback=None, # you can do anything in the callback, since it takes locals(), globals()\n adam_epsilon=1e-5,\n schedule='constant', # annealing for stepsize parameters (epsilon and adam)\n save_per_iter=100,\n ckpt_dir=None, task=\"train\",\n sample_stochastic=True,\n load_model_path=None, task_name=None, max_sample_traj=1500\n ):\n # Setup losses and stuff\n # ----------------------------------------\n ob_space = env.observation_space\n ac_space = env.action_space\n pi = policy_func(\"pi\", ob_space, ac_space) # Construct network for new policy\n oldpi = policy_func(\"oldpi\", ob_space, ac_space) # Network for old policy\n atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)\n ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return\n\n lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule\n clip_param = clip_param * lrmult # Annealed cliping parameter epislon\n\n ob = U.get_placeholder_cached(name=\"ob\")\n ac = pi.pdtype.sample_placeholder([None])\n\n kloldnew = oldpi.pd.kl(pi.pd)\n ent = pi.pd.entropy()\n meankl = U.mean(kloldnew)\n meanent = U.mean(ent)\n pol_entpen = (-entcoeff) * meanent\n\n ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold\n surr1 = ratio * atarg # surrogate from conservative policy iteration\n surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #\n pol_surr = - U.mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)\n vf_loss = U.mean(tf.square(pi.vpred - ret))\n total_loss = pol_surr + pol_entpen + vf_loss\n losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]\n loss_names = [\"pol_surr\", \"pol_entpen\", \"vf_loss\", \"kl\", \"ent\"]\n\n var_list = pi.get_trainable_variables()\n lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])\n adam = MpiAdam(var_list, epsilon=adam_epsilon)\n\n assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)\n for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])\n compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)\n\n U.initialize()\n adam.sync()\n\n # Prepare for rollouts\n # ----------------------------------------\n seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)\n traj_gen = traj_episode_generator(pi, env, timesteps_per_batch, stochastic=sample_stochastic)\n\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n tstart = time.time()\n lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths\n rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards\n\n assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, \"Only one time constraint permitted\"\n\n if task == 'sample_trajectory':\n # not elegant, i know :(\n sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic)\n sys.exit()\n\n while True:\n if callback: callback(locals(), globals())\n if max_timesteps and timesteps_so_far >= max_timesteps:\n break\n elif max_episodes and episodes_so_far >= max_episodes:\n break\n elif max_iters and iters_so_far >= max_iters:\n break\n elif max_seconds and time.time() - tstart >= max_seconds:\n break\n\n if schedule == 'constant':\n cur_lrmult = 1.0\n elif schedule == 'linear':\n cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)\n else:\n raise NotImplementedError\n\n # Save model\n if iters_so_far % save_per_iter == 0 and ckpt_dir is not None:\n U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far)\n\n logger.log(\"********** Iteration %i ************\"%iters_so_far)\n\n seg = seg_gen.__next__()\n add_vtarg_and_adv(seg, gamma, lam)\n\n # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))\n ob, ac, atarg, tdlamret = seg[\"ob\"], seg[\"ac\"], seg[\"adv\"], seg[\"tdlamret\"]\n vpredbefore = seg[\"vpred\"] # predicted value function before udpate\n atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate\n d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)\n optim_batchsize = optim_batchsize or ob.shape[0]\n\n if hasattr(pi, \"ob_rms\"): pi.ob_rms.update(ob) # update running mean/std for policy\n\n assign_old_eq_new() # set old parameter values to new parameter values\n logger.log(\"Optimizing...\")\n logger.log(fmt_row(13, loss_names))\n # Here we do a bunch of optimization epochs over the data\n for _ in range(optim_epochs):\n losses = [] # list of tuples, each of which gives the loss for a minibatch\n for batch in d.iterate_once(optim_batchsize):\n *newlosses, g = lossandgrad(batch[\"ob\"], batch[\"ac\"], batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult)\n adam.update(g, optim_stepsize * cur_lrmult) \n losses.append(newlosses)\n logger.log(fmt_row(13, np.mean(losses, axis=0)))\n\n logger.log(\"Evaluating losses...\")\n losses = []\n for batch in d.iterate_once(optim_batchsize):\n newlosses = compute_losses(batch[\"ob\"], batch[\"ac\"], batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult)\n losses.append(newlosses) \n meanlosses,_,_ = mpi_moments(losses, axis=0)\n logger.log(fmt_row(13, meanlosses))\n for (lossval, name) in zipsame(meanlosses, loss_names):\n logger.record_tabular(\"loss_\"+name, lossval)\n logger.record_tabular(\"ev_tdlam_before\", explained_variance(vpredbefore, tdlamret))\n lrlocal = (seg[\"ep_lens\"], seg[\"ep_rets\"]) # local values\n listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples\n lens, rews = map(flatten_lists, zip(*listoflrpairs))\n lenbuffer.extend(lens)\n rewbuffer.extend(rews)\n logger.record_tabular(\"EpLenMean\", np.mean(lenbuffer))\n logger.record_tabular(\"EpRewMean\", np.mean(rewbuffer))\n logger.record_tabular(\"EpThisIter\", len(lens))\n episodes_so_far += len(lens)\n timesteps_so_far += sum(lens)\n iters_so_far += 1\n logger.record_tabular(\"EpisodesSoFar\", episodes_so_far)\n logger.record_tabular(\"TimestepsSoFar\", timesteps_so_far)\n logger.record_tabular(\"TimeElapsed\", time.time() - tstart)\n if MPI.COMM_WORLD.Get_rank()==0:\n logger.dump_tabular()\n\ndef sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic):\n\n assert load_model_path is not None\n U.load_state(load_model_path)\n sample_trajs = []\n for iters_so_far in range(max_sample_traj):\n logger.log(\"********** Iteration %i ************\"%iters_so_far)\n traj = traj_gen.__next__()\n ob, new, ep_ret, ac, rew, ep_len = traj['ob'], traj['new'], traj['ep_ret'], traj['ac'], traj['rew'], traj['ep_len']\n logger.record_tabular(\"ep_ret\", ep_ret)\n logger.record_tabular(\"ep_len\", ep_len)\n logger.record_tabular(\"immediate reward\", np.mean(rew))\n if MPI.COMM_WORLD.Get_rank()==0:\n logger.dump_tabular()\n traj_data = {\"ob\":ob, \"ac\":ac, \"rew\": rew, \"ep_ret\":ep_ret}\n sample_trajs.append(traj_data)\n\n sample_ep_rets = [traj[\"ep_ret\"] for traj in sample_trajs]\n logger.log(\"Average total return: %f\"%(sum(sample_ep_rets)/len(sample_ep_rets)))\n if sample_stochastic:\n task_name = 'stochastic.' + task_name\n else:\n task_name = 'deterministic.' + task_name\n pkl.dump(sample_trajs, open(task_name+\".pkl\", \"wb\"))\n\ndef flatten_lists(listoflists):\n return [el for list_ in listoflists for el in list_]\n" ]
[ [ "tensorflow.placeholder", "tensorflow.minimum", "numpy.empty", "numpy.append", "numpy.zeros", "tensorflow.assign", "tensorflow.square", "numpy.array", "numpy.mean" ] ]
tianjianjiang/allennlp
[ "35b285585e0677b1025eac1c19b5eefe7e2a70db", "35b285585e0677b1025eac1c19b5eefe7e2a70db", "35b285585e0677b1025eac1c19b5eefe7e2a70db" ]
[ "allennlp/training/metrics/entropy.py", "allennlp/modules/similarity_functions/linear.py", "allennlp/tests/data/dataset_readers/multiprocess_dataset_reader_test.py" ]
[ "from typing import Optional\n\nfrom overrides import overrides\nimport torch\n\nfrom allennlp.training.metrics.metric import Metric\n\n\[email protected](\"entropy\")\nclass Entropy(Metric):\n def __init__(self) -> None:\n self._entropy = 0.0\n self._count = 0\n\n @overrides\n def __call__(\n self, # type: ignore\n logits: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n ):\n \"\"\"\n Parameters\n ----------\n logits : ``torch.Tensor``, required.\n A tensor of unnormalized log probabilities of shape (batch_size, ..., num_classes).\n mask: ``torch.Tensor``, optional (default = None).\n A masking tensor of shape (batch_size, ...).\n \"\"\"\n logits, mask = self.unwrap_to_tensors(logits, mask)\n\n if mask is None:\n mask = torch.ones(logits.size()[:-1])\n\n log_probs = torch.nn.functional.log_softmax(logits, dim=-1)\n probabilities = torch.exp(log_probs) * mask.unsqueeze(-1)\n weighted_negative_likelihood = -log_probs * probabilities\n entropy = weighted_negative_likelihood.sum(-1)\n\n self._entropy += entropy.sum() / mask.sum()\n self._count += 1\n\n @overrides\n def get_metric(self, reset: bool = False):\n \"\"\"\n Returns\n -------\n The scalar average entropy.\n \"\"\"\n average_value = self._entropy / self._count if self._count > 0 else 0\n if reset:\n self.reset()\n return average_value\n\n @overrides\n def reset(self):\n self._entropy = 0.0\n self._count = 0\n", "import math\n\nfrom overrides import overrides\nimport torch\nfrom torch.nn.parameter import Parameter\n\nfrom allennlp.modules.similarity_functions.similarity_function import SimilarityFunction\nfrom allennlp.nn import Activation, util\n\n\[email protected](\"linear\")\nclass LinearSimilarity(SimilarityFunction):\n \"\"\"\n This similarity function performs a dot product between a vector of weights and some\n combination of the two input vectors, followed by an (optional) activation function. The\n combination used is configurable.\n\n If the two vectors are ``x`` and ``y``, we allow the following kinds of combinations: ``x``,\n ``y``, ``x*y``, ``x+y``, ``x-y``, ``x/y``, where each of those binary operations is performed\n elementwise. You can list as many combinations as you want, comma separated. For example, you\n might give ``x,y,x*y`` as the ``combination`` parameter to this class. The computed similarity\n function would then be ``w^T [x; y; x*y] + b``, where ``w`` is a vector of weights, ``b`` is a\n bias parameter, and ``[;]`` is vector concatenation.\n\n Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the\n similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can\n accomplish that with this class by using \"x*y\" for `combination`.\n\n Parameters\n ----------\n tensor_1_dim : ``int``\n The dimension of the first tensor, ``x``, described above. This is ``x.size()[-1]`` - the\n length of the vector that will go into the similarity computation. We need this so we can\n build weight vectors correctly.\n tensor_2_dim : ``int``\n The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the\n length of the vector that will go into the similarity computation. We need this so we can\n build weight vectors correctly.\n combination : ``str``, optional (default=\"x,y\")\n Described above.\n activation : ``Activation``, optional (default=linear (i.e. no activation))\n An activation function applied after the ``w^T * [x;y] + b`` calculation. Default is no\n activation.\n \"\"\"\n\n def __init__(\n self,\n tensor_1_dim: int,\n tensor_2_dim: int,\n combination: str = \"x,y\",\n activation: Activation = None,\n ) -> None:\n super().__init__()\n self._combination = combination\n combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])\n self._weight_vector = Parameter(torch.Tensor(combined_dim))\n self._bias = Parameter(torch.Tensor(1))\n self._activation = activation or Activation.by_name(\"linear\")()\n self.reset_parameters()\n\n def reset_parameters(self):\n std = math.sqrt(6 / (self._weight_vector.size(0) + 1))\n self._weight_vector.data.uniform_(-std, std)\n self._bias.data.fill_(0)\n\n @overrides\n def forward(self, tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:\n combined_tensors = util.combine_tensors(self._combination, [tensor_1, tensor_2])\n dot_product = torch.matmul(combined_tensors, self._weight_vector)\n return self._activation(dot_product + self._bias)\n", "from collections import Counter\nfrom multiprocessing import Queue, Process\nfrom queue import Empty\nfrom typing import Tuple\n\nimport numpy as np\n\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.data.dataset_readers import MultiprocessDatasetReader, SequenceTaggingDatasetReader\nfrom allennlp.data.dataset_readers.multiprocess_dataset_reader import QIterable\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.iterators import BasicIterator\nfrom allennlp.data.vocabulary import Vocabulary\n\n\ndef fingerprint(instance: Instance) -> Tuple[str, ...]:\n \"\"\"\n Get a hashable representation of a sequence tagging instance\n that can be put in a Counter.\n \"\"\"\n text_tuple = tuple(t.text for t in instance.fields[\"tokens\"].tokens) # type: ignore\n labels_tuple = tuple(instance.fields[\"tags\"].labels) # type: ignore\n return text_tuple + labels_tuple\n\n\nclass TestMultiprocessDatasetReader(AllenNlpTestCase):\n def setUp(self) -> None:\n super().setUp()\n\n # use SequenceTaggingDatasetReader as the base reader\n self.base_reader = SequenceTaggingDatasetReader(lazy=True)\n base_file_path = AllenNlpTestCase.FIXTURES_ROOT / \"data\" / \"sequence_tagging.tsv\"\n\n # Make 100 copies of the data\n raw_data = open(base_file_path).read()\n for i in range(100):\n file_path = self.TEST_DIR / f\"identical_{i}.tsv\"\n with open(file_path, \"w\") as f:\n f.write(raw_data)\n\n self.all_distinct_path = str(self.TEST_DIR / \"all_distinct.tsv\")\n with open(self.all_distinct_path, \"w\") as all_distinct:\n for i in range(100):\n file_path = self.TEST_DIR / f\"distinct_{i}.tsv\"\n line = f\"This###DT\\tis###VBZ\\tsentence###NN\\t{i}###CD\\t.###.\\n\"\n with open(file_path, \"w\") as f:\n f.write(line)\n all_distinct.write(line)\n\n self.identical_files_glob = str(self.TEST_DIR / \"identical_*.tsv\")\n self.distinct_files_glob = str(self.TEST_DIR / \"distinct_*.tsv\")\n\n # For some of the tests we need a vocab, we'll just use the base_reader for that.\n self.vocab = Vocabulary.from_instances(self.base_reader.read(str(base_file_path)))\n\n def test_multiprocess_read(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=4)\n\n all_instances = []\n\n for instance in reader.read(self.identical_files_glob):\n all_instances.append(instance)\n\n # 100 files * 4 sentences / file\n assert len(all_instances) == 100 * 4\n\n counts = Counter(fingerprint(instance) for instance in all_instances)\n\n # should have the exact same data 100 times\n assert len(counts) == 4\n assert counts[(\"cats\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"dogs\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"snakes\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"birds\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n\n def test_multiprocess_read_partial_does_not_hang(self):\n # Use a small queue size such that the processes generating the data will block.\n reader = MultiprocessDatasetReader(\n base_reader=self.base_reader, num_workers=4, output_queue_size=10\n )\n\n all_instances = []\n\n # Half of 100 files * 4 sentences / file\n i = 0\n for instance in reader.read(self.identical_files_glob):\n # Stop early such that the processes generating the data remain\n # active (given the small queue size).\n if i == 200:\n break\n i += 1\n all_instances.append(instance)\n\n # This should be trivially true. The real test here is that we exit\n # normally and don't hang due to the still active processes.\n assert len(all_instances) == 200\n\n def test_multiprocess_read_with_qiterable(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=4)\n\n all_instances = []\n qiterable = reader.read(self.identical_files_glob)\n assert isinstance(qiterable, QIterable)\n\n # Essentially QIterable.__iter__. Broken out here as we intend it to be\n # a public interface.\n qiterable.start()\n while qiterable.num_active_workers.value > 0 or qiterable.num_inflight_items.value > 0:\n while True:\n try:\n all_instances.append(qiterable.output_queue.get(block=False, timeout=1.0))\n with qiterable.num_inflight_items.get_lock():\n qiterable.num_inflight_items.value -= 1\n except Empty:\n break\n qiterable.join()\n\n # 100 files * 4 sentences / file\n assert len(all_instances) == 100 * 4\n\n counts = Counter(fingerprint(instance) for instance in all_instances)\n\n # should have the exact same data 100 times\n assert len(counts) == 4\n assert counts[(\"cats\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"dogs\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"snakes\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n assert counts[(\"birds\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 100\n\n def test_multiprocess_read_in_subprocess_is_deterministic(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=1)\n q = Queue()\n\n def read():\n for instance in reader.read(self.distinct_files_glob):\n q.put(fingerprint(instance))\n\n # Ensure deterministic shuffling.\n np.random.seed(0)\n p = Process(target=read)\n p.start()\n p.join()\n\n # Convert queue to list.\n actual_fingerprints = []\n while not q.empty():\n actual_fingerprints.append(q.get(block=False))\n\n assert len(actual_fingerprints) == 100\n\n expected_fingerprints = []\n for instance in self.base_reader.read(self.all_distinct_path):\n expected_fingerprints.append(fingerprint(instance))\n\n np.random.seed(0)\n expected_fingerprints.sort()\n # This should be shuffled into exactly the same order as actual_fingerprints.\n np.random.shuffle(expected_fingerprints)\n\n assert actual_fingerprints == expected_fingerprints\n\n def test_multiple_epochs(self):\n reader = MultiprocessDatasetReader(\n base_reader=self.base_reader, num_workers=2, epochs_per_read=3\n )\n\n all_instances = []\n\n for instance in reader.read(self.identical_files_glob):\n all_instances.append(instance)\n\n # 100 files * 4 sentences per file * 3 epochs\n assert len(all_instances) == 100 * 4 * 3\n\n counts = Counter(fingerprint(instance) for instance in all_instances)\n\n # should have the exact same data 100 * 3 times\n assert len(counts) == 4\n assert counts[(\"cats\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n assert counts[(\"dogs\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n assert counts[(\"snakes\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n assert counts[(\"birds\", \"are\", \"animals\", \".\", \"N\", \"V\", \"N\", \"N\")] == 300\n\n def test_with_iterator(self):\n reader = MultiprocessDatasetReader(base_reader=self.base_reader, num_workers=2)\n instances = reader.read(self.identical_files_glob)\n\n iterator = BasicIterator(batch_size=32)\n iterator.index_with(self.vocab)\n\n batches = [batch for batch in iterator(instances, num_epochs=1)]\n\n # 400 instances / batch_size 32 = 12 full batches + 1 batch of 16\n sizes = sorted([len(batch[\"tags\"]) for batch in batches])\n assert sizes == [16] + 12 * [32]\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.exp" ], [ "torch.Tensor", "torch.matmul" ], [ "numpy.random.shuffle", "numpy.random.seed" ] ]
y-veys/iGibson
[ "1442c50187f8fcef118b097c195fef707eef04cb" ]
[ "gibson2/core/physics/drivers/minitaur.py" ]
[ "\"\"\"This file implements the functionalities of a minitaur using pybullet.\n\n\"\"\"\nimport copy\nimport math\nimport numpy as np\nfrom gibson2.core.physics.drivers import motor\nfrom gibson2.core.physics.robot_locomotors import LocomotorRobot\nfrom gibson2.core.physics.robot_bases import Joint, BodyPart\nimport os, sys\nimport pybullet as p\nimport gym\nfrom transforms3d.euler import euler2quat\n\ntracking_camera = {'yaw': 20, 'z_offset': 0.3, 'distance': 2, 'pitch': -20}\n\n\nclass MinitaurBase(LocomotorRobot):\n model_type = \"URDF\"\n default_scale = 1\n\n KNEE_CONSTRAINT_POINT_RIGHT = [0, 0.005, 0.2]\n KNEE_CONSTRAINT_POINT_LEFT = [0, 0.01, 0.2]\n OVERHEAT_SHUTDOWN_TORQUE = 2.45\n OVERHEAT_SHUTDOWN_TIME = 1.0\n LEG_POSITION = [\"front_left\", \"back_left\", \"front_right\", \"back_right\"]\n MOTOR_NAMES = [\n \"motor_front_leftL_joint\", \"motor_front_leftR_joint\", \"motor_back_leftL_joint\",\n \"motor_back_leftR_joint\", \"motor_front_rightL_joint\", \"motor_front_rightR_joint\",\n \"motor_back_rightL_joint\", \"motor_back_rightR_joint\"\n ]\n LEG_LINK_ID = [2, 3, 5, 6, 8, 9, 11, 12, 15, 16, 18, 19, 21, 22, 24, 25]\n MOTOR_LINK_ID = [1, 4, 7, 10, 14, 17, 20, 23]\n FOOT_LINK_ID = [3, 6, 9, 12, 16, 19, 22, 25]\n BASE_LINK_ID = -1\n OBSERVATION_DIM = 3 * len(MOTOR_NAMES) + 4 # VELOCITY, ANGLE, TORQUES\n\n self_collision_enabled = True\n motor_velocity_limit = np.inf\n\n #accurate_motor_model_enabled=False ## (hzyjerry): affect speed?\n motor_kp = 1.00\n motor_kd = 0.2\n torque_control_enabled = False\n motor_overheat_protection = True\n on_rack = False\n kd_for_pd_controllers = 0.3\n mjcf_scaling = 1\n num_motors = 8\n num_legs = int(num_motors / 2)\n motor_direction = [-1, -1, -1, -1, 1, 1, 1, 1]\n observed_motor_torques = np.zeros(num_motors)\n applied_motor_torques = np.zeros(num_motors)\n max_force = 5.5\n joint_name_to_id = None\n \"\"\"The minitaur class that simulates a quadruped robot from Ghost Robotics.\n \"\"\"\n\n def __init__(self, config, env=None, pd_control_enabled=True,\n accurate_motor_model_enabled=True):\n \"\"\"Constructs a minitaur and reset it to the initial states.\n\n Properties:\n self_collision_enabled: Whether to enable self collision.\n motor_velocity_limit: The upper limit of the motor velocity.\n pd_control_enabled: Whether to use PD control for the motors. If true, need smaller time step to stablize (1/500.0 timestep)\n accurate_motor_model_enabled: Whether to use the accurate DC motor model.\n motor_kp: proportional gain for the accurate motor model\n motor_kd: derivative gain for the acurate motor model\n torque_control_enabled: Whether to use the torque control, if set to\n False, pose control will be used.\n motor_overheat_protection: Whether to shutdown the motor that has exerted\n large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time\n (OVERHEAT_SHUTDOWN_TIME). See apply_action() in minitaur.py for more\n details.\n on_rack: Whether to place the minitaur on rack. This is only used to debug\n the walking gait. In this mode, the minitaur's base is hanged midair so\n that its walking gait is clearer to visualize.\n kd_for_pd_controllers: kd value for the pd controllers of the motors.\n \"\"\"\n self.config = config\n self.model_type = \"URDF\"\n #self.robot_name = \"quadruped\"\n self.robot_name = \"base_chassis_link\"\n scale = config[\"robot_scale\"] if \"robot_scale\" in config.keys() else self.default_scale\n\n LocomotorRobot.__init__(self,\n \"quadruped/minitaur.urdf\",\n self.robot_name,\n action_dim=8,\n sensor_dim=self.OBSERVATION_DIM,\n power=5,\n scale=scale,\n initial_pos=config['initial_pos'],\n target_pos=config[\"target_pos\"],\n resolution=config[\"resolution\"],\n env=env)\n\n self.r_f = 0.1\n self.time_step = config[\"speed\"][\"timestep\"]\n self.pd_control_enabled = pd_control_enabled\n self.minitaur = None ## TODO: fix this\n self.accurate_motor_model_enabled = accurate_motor_model_enabled\n if self.accurate_motor_model_enabled:\n self._kp = self.motor_kp\n self._kd = self.motor_kd\n self._motor_model = motor.MotorModel(torque_control_enabled=self.torque_control_enabled,\n kp=self._kp,\n kd=self._kd)\n elif self.pd_control_enabled:\n self._kp = 8\n self._kd = self.kd_for_pd_controllers\n else:\n self._kp = 1\n self._kd = 1\n\n if config[\"is_discrete\"]:\n self.action_space = gym.spaces.Discrete(17)\n self.torque = 10\n ## Hip_1, Ankle_1, Hip_2, Ankle_2, Hip_3, Ankle_3, Hip_4, Ankle_4\n self.action_list = [[self.r_f * self.torque, 0, 0, 0, 0, 0, 0, 0],\n [0, self.r_f * self.torque, 0, 0, 0, 0, 0, 0],\n [0, 0, self.r_f * self.torque, 0, 0, 0, 0, 0],\n [0, 0, 0, self.r_f * self.torque, 0, 0, 0, 0],\n [0, 0, 0, 0, self.r_f * self.torque, 0, 0, 0],\n [0, 0, 0, 0, 0, self.r_f * self.torque, 0, 0],\n [0, 0, 0, 0, 0, 0, self.r_f * self.torque, 0],\n [0, 0, 0, 0, 0, 0, 0, self.r_f * self.torque],\n [-self.r_f * self.torque, 0, 0, 0, 0, 0, 0, 0],\n [0, -self.r_f * self.torque, 0, 0, 0, 0, 0, 0],\n [0, 0, -self.r_f * self.torque, 0, 0, 0, 0, 0],\n [0, 0, 0, -self.r_f * self.torque, 0, 0, 0, 0],\n [0, 0, 0, 0, -self.r_f * self.torque, 0, 0, 0],\n [0, 0, 0, 0, 0, -self.r_f * self.torque, 0, 0],\n [0, 0, 0, 0, 0, 0, -self.r_f * self.torque, 0],\n [0, 0, 0, 0, 0, 0, 0, -self.r_f * self.torque],\n [0, 0, 0, 0, 0, 0, 0, 0]]\n self.setup_keys_to_action()\n self.debug_count = 0\n self.qmax = [0] * 8\n self.fmax = [0] * 8\n\n def _RecordMassInfoFromURDF(self):\n self._base_mass_urdf = p.getDynamicsInfo(self.minitaur, self.BASE_LINK_ID)[0]\n self._leg_masses_urdf = []\n self._leg_masses_urdf.append(p.getDynamicsInfo(self.minitaur, self.LEG_LINK_ID[0])[0])\n self._leg_masses_urdf.append(p.getDynamicsInfo(self.minitaur, self.MOTOR_LINK_ID[0])[0])\n\n def _BuildJointNameToIdDict(self):\n num_joints = p.getNumJoints(self.minitaur)\n self.joint_name_to_id = {}\n for i in range(num_joints):\n joint_info = p.getJointInfo(self.minitaur, i)\n self.joint_name_to_id[joint_info[1].decode(\"UTF-8\")] = joint_info[0]\n\n def _BuildMotorIdList(self):\n self._motor_id_list = [self.joint_name_to_id[motor_name] for motor_name in self.MOTOR_NAMES]\n\n def robot_specific_reset(self, reload_urdf=True):\n \"\"\"Reset the minitaur to its initial states.\n\n Args:\n reload_urdf: Whether to reload the urdf file. If not, Reset() just place\n the minitaur back to its starting position.\n \"\"\"\n if self.minitaur is None:\n self.minitaur = self.robot_ids[0]\n\n if self.joint_name_to_id is None:\n self._BuildJointNameToIdDict()\n self._BuildMotorIdList()\n self._RecordMassInfoFromURDF()\n self.ResetPose(add_constraint=True)\n self._overheat_counter = np.zeros(self.num_motors)\n self._motor_enabled_list = [True] * self.num_motors\n if self.on_rack:\n p.createConstraint(self.minitaur, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0],\n [0, 0, 1])\n self.ResetPose(add_constraint=True)\n\n def _SetMotorTorqueById(self, motor_id, torque):\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=motor_id,\n controlMode=p.TORQUE_CONTROL,\n force=torque)\n\n def _SetDesiredMotorAngleById(self, motor_id, desired_angle):\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=motor_id,\n controlMode=p.POSITION_CONTROL,\n targetPosition=desired_angle,\n positionGain=self._kp,\n velocityGain=self._kd,\n force=self.max_force)\n\n def _SetDesiredMotorAngleByName(self, motor_name, desired_angle):\n self._SetDesiredMotorAngleById(self.joint_name_to_id[motor_name], desired_angle)\n\n def calc_potential(self):\n return 0\n\n def setup_keys_to_action(self):\n self.keys_to_action = {\n (ord('s'), ): 0, ## backward\n (ord('w'), ): 1, ## forward\n (ord('d'), ): 2, ## turn right\n (ord('a'), ): 3, ## turn left\n (): 4\n }\n\n def ResetPose(self, add_constraint):\n \"\"\"Reset the pose of the minitaur.\n\n Args:\n add_constraint: Whether to add a constraint at the joints of two feet.\n \"\"\"\n for i in range(self.num_legs):\n self._ResetPoseForLeg(i, add_constraint)\n\n def _ResetPoseForLeg(self, leg_id, add_constraint):\n \"\"\"Reset the initial pose for the leg.\n\n Args:\n leg_id: It should be 0, 1, 2, or 3, which represents the leg at\n front_left, back_left, front_right and back_right.\n add_constraint: Whether to add a constraint at the joints of two feet.\n \"\"\"\n knee_friction_force = 0\n half_pi = math.pi / 2.0\n knee_angle = -2.1834\n\n leg_position = self.LEG_POSITION[leg_id]\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"motor_\" + leg_position + \"L_joint\"],\n self.motor_direction[2 * leg_id] * half_pi,\n targetVelocity=0)\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"L_link\"],\n self.motor_direction[2 * leg_id] * knee_angle,\n targetVelocity=0)\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"motor_\" + leg_position + \"R_joint\"],\n self.motor_direction[2 * leg_id + 1] * half_pi,\n targetVelocity=0)\n p.resetJointState(self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"R_link\"],\n self.motor_direction[2 * leg_id + 1] * knee_angle,\n targetVelocity=0)\n if add_constraint:\n p.createConstraint(self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"R_link\"],\n self.minitaur,\n self.joint_name_to_id[\"knee_\" + leg_position + \"L_link\"],\n p.JOINT_POINT2POINT, [0, 0, 0], self.KNEE_CONSTRAINT_POINT_RIGHT,\n self.KNEE_CONSTRAINT_POINT_LEFT)\n\n if self.accurate_motor_model_enabled or self.pd_control_enabled:\n # Disable the default motor in pybullet.\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"motor_\" + leg_position +\n \"L_joint\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"motor_\" + leg_position +\n \"R_joint\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n\n else:\n self._SetDesiredMotorAngleByName(\"motor_\" + leg_position + \"L_joint\",\n self.motor_direction[2 * leg_id] * half_pi)\n self._SetDesiredMotorAngleByName(\"motor_\" + leg_position + \"R_joint\",\n self.motor_direction[2 * leg_id + 1] * half_pi)\n\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"knee_\" + leg_position +\n \"L_link\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n p.setJointMotorControl2(bodyIndex=self.minitaur,\n jointIndex=(self.joint_name_to_id[\"knee_\" + leg_position +\n \"R_link\"]),\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=knee_friction_force)\n\n def GetBasePosition(self):\n \"\"\"Get the position of minitaur's base.\n\n Returns:\n The position of minitaur's base.\n \"\"\"\n position, _ = (p.getBasePositionAndOrientation(self.minitaur))\n return position\n\n def GetBaseOrientation(self):\n \"\"\"Get the orientation of minitaur's base, represented as quaternion.\n\n Returns:\n The orientation of minitaur's base.\n \"\"\"\n _, orientation = (p.getBasePositionAndOrientation(self.minitaur))\n return orientation\n\n def GetActionDimension(self):\n \"\"\"Get the length of the action list.\n\n Returns:\n The length of the action list.\n \"\"\"\n return self.num_motors\n\n def GetObservationUpperBound(self):\n \"\"\"Get the upper bound of the observation.\n\n Returns:\n The upper bound of an observation. See GetObservation() for the details\n of each element of an observation.\n \"\"\"\n upper_bound = np.array([0.0] * self.GetObservationDimension())\n upper_bound[0:self.num_motors] = math.pi # Joint angle.\n upper_bound[self.num_motors:2 * self.num_motors] = (motor.MOTOR_SPEED_LIMIT\n ) # Joint velocity.\n upper_bound[2 * self.num_motors:3 * self.num_motors] = (motor.OBSERVED_TORQUE_LIMIT\n ) # Joint torque.\n upper_bound[3 * self.num_motors:] = 1.0 # Quaternion of base orientation.\n return upper_bound\n\n def GetObservationLowerBound(self):\n \"\"\"Get the lower bound of the observation.\"\"\"\n return -self.GetObservationUpperBound()\n\n def GetObservationDimension(self):\n \"\"\"Get the length of the observation list.\n\n Returns:\n The length of the observation list.\n \"\"\"\n return len(self.GetObservation())\n\n def calc_state(self):\n return self.GetObservation()\n\n def GetObservation(self):\n \"\"\"Get the observations of minitaur.\n\n It includes the angles, velocities, torques and the orientation of the base.\n\n Returns:\n The observation list. observation[0:8] are motor angles. observation[8:16]\n are motor velocities, observation[16:24] are motor torques.\n observation[24:28] is the orientation of the base, in quaternion form.\n \"\"\"\n observation = []\n observation.extend(self.GetMotorAngles().tolist())\n observation.extend(self.GetMotorVelocities().tolist())\n observation.extend(self.GetMotorTorques().tolist())\n observation.extend(list(self.GetBaseOrientation()))\n return observation\n\n def apply_action(self, motor_commands):\n \"\"\"Set the desired motor angles to the motors of the minitaur.\n\n Note (hzyjerry): motor commands are set based on desired angles, not torques\n\n The desired motor angles are clipped based on the maximum allowed velocity.\n If the pd_control_enabled is True, a torque is calculated according to\n the difference between current and desired joint angle, as well as the joint\n velocity. This torque is exerted to the motor. For more information about\n PD control, please refer to: https://en.wikipedia.org/wiki/PID_controller.\n\n Args:\n motor_commands: The eight desired motor angles.\n \"\"\"\n #print(\"motor commands 1\", motor_commands)\n if self.motor_velocity_limit < np.inf:\n current_motor_angle = self.GetMotorAngles()\n motor_commands_max = (current_motor_angle + self.time_step * self.motor_velocity_limit)\n motor_commands_min = (current_motor_angle - self.time_step * self.motor_velocity_limit)\n #motor_commands = np.clip(motor_commands, motor_commands_min, motor_commands_max)\n #print(\"motor commands 2\", motor_commands)\n if self.accurate_motor_model_enabled or self.pd_control_enabled:\n q = self.GetMotorAngles()\n qdot = self.GetMotorVelocities()\n if self.accurate_motor_model_enabled:\n actual_torque, observed_torque = self._motor_model.convert_to_torque(\n motor_commands, q, qdot)\n #print(\"q\", q, \"qdot\", qdot)\n #print(\"motor commands\", motor_commands)\n #print(\"actual torque\", actual_torque, \"observed torque\", observed_torque)\n self.debug_count += 1\n #if self.debug_count == 30:\n # sys.exit()\n for i in range(len(self.qmax)):\n if q[i] > self.qmax[i]:\n self.qmax[i] = q[i]\n #print(\"Q max\", self.qmax)\n\n if self.motor_overheat_protection:\n for i in range(self.num_motors):\n if abs(actual_torque[i]) > self.OVERHEAT_SHUTDOWN_TORQUE:\n self._overheat_counter[i] += 1\n else:\n self._overheat_counter[i] = 0\n if (self._overheat_counter[i] >\n self.OVERHEAT_SHUTDOWN_TIME / self.time_step):\n self._motor_enabled_list[i] = False\n # The torque is already in the observation space because we use\n # GetMotorAngles and GetMotorVelocities.\n self.observed_motor_torques = observed_torque\n #actual_torque.fill(0.0)\n\n # Transform into the motor space when applying the torque.\n self.applied_motor_torques = np.multiply(actual_torque, self.motor_direction)\n for motor_id, motor_torque, motor_enabled in zip(self._motor_id_list,\n self.applied_motor_torques,\n self._motor_enabled_list):\n if motor_enabled:\n self._SetMotorTorqueById(motor_id, motor_torque)\n else:\n self._SetMotorTorqueById(motor_id, 0)\n #print(\"Apply motor\", self.applied_motor_torques)\n for i in range(len(self.fmax)):\n if motor_commands[i] > self.fmax[i]:\n self.fmax[i] = motor_commands[i]\n #print(\"F max\", self.fmax)\n\n else:\n torque_commands = -self._kp * (q - motor_commands) - self._kd * qdot\n\n # The torque is already in the observation space because we use\n # GetMotorAngles and GetMotorVelocities.\n self.observed_motor_torques = torque_commands\n\n # Transform into the motor space when applying the torque.\n self.applied_motor_torques = np.multiply(self.observed_motor_torques,\n self.motor_direction)\n\n for motor_id, motor_torque in zip(self._motor_id_list, self.applied_motor_torques):\n self._SetMotorTorqueById(motor_id, motor_torque)\n print(\"Apply motor\", self.applied_motor_torques)\n else:\n motor_commands_with_direction = np.multiply(motor_commands, self.motor_direction)\n for motor_id, motor_command_with_direction in zip(self._motor_id_list,\n motor_commands_with_direction):\n print(\"command\", motor_command_with_direction)\n self._SetDesiredMotorAngleById(motor_id, motor_command_with_direction)\n\n def GetMotorAngles(self):\n \"\"\"Get the eight motor angles at the current moment.\n\n Returns:\n Motor angles.\n \"\"\"\n motor_angles = [\n p.getJointState(self.minitaur, motor_id)[0] for motor_id in self._motor_id_list\n ]\n motor_angles = np.multiply(motor_angles, self.motor_direction)\n return motor_angles\n\n def GetMotorVelocities(self):\n \"\"\"Get the velocity of all eight motors.\n\n Returns:\n Velocities of all eight motors.\n \"\"\"\n motor_velocities = [\n p.getJointState(self.minitaur, motor_id)[1] for motor_id in self._motor_id_list\n ]\n motor_velocities = np.multiply(motor_velocities, self.motor_direction)\n return motor_velocities\n\n def GetMotorTorques(self):\n \"\"\"Get the amount of torques the motors are exerting.\n\n Returns:\n Motor torques of all eight motors.\n \"\"\"\n if self.accurate_motor_model_enabled or self.pd_control_enabled:\n return self.observed_motor_torques\n else:\n motor_torques = [\n p.getJointState(self.minitaur, motor_id)[3] for motor_id in self._motor_id_list\n ]\n motor_torques = np.multiply(motor_torques, self.motor_direction)\n return motor_torques\n\n def ConvertFromLegModel(self, actions):\n \"\"\"Convert the actions that use leg model to the real motor actions.\n\n Args:\n actions: The theta, phi of the leg model.\n Returns:\n The eight desired motor angles that can be used in ApplyAction().\n \"\"\"\n motor_angle = copy.deepcopy(actions)\n scale_for_singularity = 1\n offset_for_singularity = 1.5\n half_num_motors = int(self.num_motors / 2)\n quater_pi = math.pi / 4\n for i in range(self.num_motors):\n action_idx = i // 2\n forward_backward_component = (\n -scale_for_singularity * quater_pi *\n (actions[action_idx + half_num_motors] + offset_for_singularity))\n extension_component = (-1)**i * quater_pi * actions[action_idx]\n if i >= half_num_motors:\n extension_component = -extension_component\n motor_angle[i] = (math.pi + forward_backward_component + extension_component)\n return motor_angle\n\n def GetBaseMassFromURDF(self):\n \"\"\"Get the mass of the base from the URDF file.\"\"\"\n return self._base_mass_urdf\n\n def GetLegMassesFromURDF(self):\n \"\"\"Get the mass of the legs from the URDF file.\"\"\"\n return self._leg_masses_urdf\n\n def SetBaseMass(self, base_mass):\n p.changeDynamics(self.minitaur, self.BASE_LINK_ID, mass=base_mass)\n\n def SetLegMasses(self, leg_masses):\n \"\"\"Set the mass of the legs.\n\n A leg includes leg_link and motor. All four leg_links have the same mass,\n which is leg_masses[0]. All four motors have the same mass, which is\n leg_mass[1].\n\n Args:\n leg_masses: The leg masses. leg_masses[0] is the mass of the leg link.\n leg_masses[1] is the mass of the motor.\n \"\"\"\n for link_id in self.LEG_LINK_ID:\n p.changeDynamics(self.minitaur, link_id, mass=leg_masses[0])\n for link_id in self.MOTOR_LINK_ID:\n p.changeDynamics(self.minitaur, link_id, mass=leg_masses[1])\n\n def SetFootFriction(self, foot_friction):\n \"\"\"Set the lateral friction of the feet.\n\n Args:\n foot_friction: The lateral friction coefficient of the foot. This value is\n shared by all four feet.\n \"\"\"\n for link_id in self.FOOT_LINK_ID:\n p.changeDynamics(self.minitaur, link_id, lateralFriction=foot_friction)\n\n def SetBatteryVoltage(self, voltage):\n if self.accurate_motor_model_enabled:\n self._motor_model.set_voltage(voltage)\n\n def SetMotorViscousDamping(self, viscous_damping):\n if self.accurate_motor_model_enabled:\n self._motor_model.set_viscous_damping(viscous_damping)\n\n\nclass Minitaur(MinitaurBase):\n '''Wrapper class for gibson interface\n \n Attribtues:\n self.eyes\n self.resolution\n self.walk_target_x, self.walk_target_y\n self.mjcf_scaling\n self.observation_space\n self.action_space\n self.sensor_space\n\n Interface:\n self.apply_action()\n self.calc_state()\n self.addToScene()\n '''\n\n def __init__(self, config, env, pd_control_enabled=True, accurate_motor_model_enabled=True):\n MinitaurBase.__init__(self, config, env, pd_control_enabled, accurate_motor_model_enabled)\n\n def calc_state(self):\n MinitaurBase.GetObservation(self)\n" ]
[ [ "numpy.multiply", "numpy.zeros" ] ]
YuJungHeo/kbvqa-public
[ "c04bed5c60085ac3a551a8c196e6269befce1e5b" ]
[ "main.py" ]
[ "import os\nimport time\nimport torch\nimport argparse\nfrom tqdm import tqdm\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils import (\n load_files,\n save_pickle,\n fix_seed,\n print_model,\n CosineAnnealingWarmUpRestarts,\n)\nfrom model import (\n BAN,\n HAN,\n GGNN,\n GCN,\n MemNet,\n HypergraphTransformer,\n HypergraphTransformer_wohe,\n HypergraphTransformer_qsetkhe,\n HypergraphTransformer_qhekset,\n)\nfrom modules.logger import setup_logger, get_rank\nfrom dataloader import KVQA, PQnPQL, load_PQnPQL_data, FVQA, load_FVQA_data\n\n\ndef eval_epoch(model, loader, args):\n model.eval()\n total_right = 0\n total_right_aset = 0\n total_num = 0\n\n for b_idx, batch in enumerate(tqdm(loader)):\n batch = [b.cuda() for b in batch]\n labels = batch[-1]\n\n pred = model(batch)\n pred_score, pred_ans = pred.max(1)\n\n nz_idxs = labels.nonzero()\n right = labels[nz_idxs] == pred_ans[nz_idxs]\n total_right += right.sum().item()\n total_num += len(labels)\n\n if \"fvqa\" in args.data_name:\n _, top3_indices = torch.topk(pred, 3)\n for idx, indices in enumerate(top3_indices):\n if labels[idx] in indices:\n total_right_aset += 1\n\n if \"pq\" in args.data_name:\n aset = batch[-2]\n for idx, pred in enumerate(pred_ans):\n if pred in aset[idx]:\n total_right_aset += 1\n\n return total_right, total_right_aset, total_num\n\n\ndef inference(model, test_loader, ckpt_path, args, task_idx=-1, res=False):\n last_ckpt = os.path.join(ckpt_path, \"ckpt_best.pth.tar\")\n checkpoint = torch.load(last_ckpt)\n\n if list(checkpoint[\"state_dict\"].keys())[0].startswith(\"module.\"):\n checkpoint[\"state_dict\"] = {\n k[7:]: v for k, v in checkpoint[\"state_dict\"].items()\n }\n\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\"load: %s\" % (last_ckpt))\n\n total_right, total_right_aset, total_num = eval_epoch(model, test_loader, args)\n accuracy = total_right / total_num\n\n if \"pq\" in args.data_name:\n accuracy = total_right_aset / total_num\n\n return accuracy\n\n\ndef main():\n \"\"\"parse config file\"\"\"\n parser = argparse.ArgumentParser(description=\"experiments\")\n parser.add_argument(\"--model_name\", default=\"ht\")\n parser.add_argument(\"--data_name\", default=\"kvqa\")\n parser.add_argument(\"--cfg\", default=\"ht\")\n parser.add_argument(\"--exp_name\", default=\"dev\")\n parser.add_argument(\"--inference\", action=\"store_true\")\n parser.add_argument(\"--per_cate\", action=\"store_true\")\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--schedule\", action=\"store_true\")\n parser.add_argument(\"--selected\", action=\"store_true\")\n parser.add_argument(\"--abl_only_ga\", action=\"store_true\")\n parser.add_argument(\"--abl_only_sa\", action=\"store_true\")\n parser.add_argument(\"--abl_ans_fc\", action=\"store_true\")\n parser.add_argument(\"--split_seed\", type=int, default=1234)\n parser.add_argument(\"--wd\", type=float, default=0.0)\n parser.add_argument(\"--num_workers\", type=int, default=4)\n parser.add_argument(\"--max_epoch\", type=int, default=1000)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n parser.add_argument(\"--q_opt\", type=str, default=\"org\")\n parser.add_argument(\"--n_hop\", type=int, default=1)\n args = parser.parse_args()\n\n config_file = \"configs/%s.yaml\" % (args.cfg)\n model_cfg = load_files(config_file)\n\n fix_seed(model_cfg[\"MODEL\"][\"SEED\"])\n\n if args.debug == False:\n summary_path = model_cfg[\"RES\"][\"TB\"] + args.exp_name\n summary = SummaryWriter(summary_path)\n\n log_path = model_cfg[\"RES\"][\"LOG\"] + args.exp_name\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n\n ckpt_path = model_cfg[\"RES\"][\"CKPT\"] + args.exp_name\n if not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n\n logger = setup_logger(args.exp_name, log_path, get_rank())\n logger.info(model_cfg[\"MODEL\"])\n logger.info(args)\n\n # ------------ Construct Dataset Class ------------------------------------\n datasets = {}\n if args.data_name == \"kvqa\":\n modes = [\"train\", \"val\", \"test\"]\n n_node_lists = []\n for mode in modes:\n fname = ckpt_path + \"/%s_cache.pkl\" % (mode)\n if os.path.isfile(fname):\n datasets[mode] = load_files(fname)\n else:\n data = KVQA(model_cfg, args, mode)\n datasets[mode] = data\n save_pickle(data, fname)\n n_node_lists.append(max(datasets[mode].n_node))\n max_n_node = max(n_node_lists)\n\n for mode in modes:\n datasets[mode].max_n_node = max_n_node\n\n elif \"fvqa\" in args.data_name:\n train, test = load_FVQA_data(model_cfg, args)\n datasets[\"train\"] = FVQA(model_cfg, args, train)\n datasets[\"test\"] = FVQA(model_cfg, args, test)\n\n elif \"pq\" in args.data_name:\n train, val, test = load_PQnPQL_data(model_cfg, args)\n datasets[\"train\"] = PQnPQL(model_cfg, args, train)\n datasets[\"val\"] = PQnPQL(model_cfg, args, val)\n datasets[\"test\"] = PQnPQL(model_cfg, args, test)\n\n train_loader = DataLoader(\n datasets[\"train\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=True,\n )\n if \"fvqa\" in args.data_name:\n val_loader = DataLoader(\n datasets[\"test\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=True,\n )\n else:\n val_loader = DataLoader(\n datasets[\"val\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=True,\n )\n test_loader = DataLoader(\n datasets[\"test\"],\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=False,\n )\n\n # ------------ Model -----------------------\n if args.model_name == \"ht\":\n model = HypergraphTransformer(model_cfg, args).cuda()\n elif args.model_name == \"ht_abl_wohe\":\n model = HypergraphTransformer_wohe(model_cfg, args).cuda()\n elif args.model_name == \"ht_abl_qset_khe\":\n model = HypergraphTransformer_qsetkhe(model_cfg, args).cuda()\n elif args.model_name == \"ht_abl_qhe_kset\":\n model = HypergraphTransformer_qhekset(model_cfg, args).cuda()\n elif args.model_name == \"ggnn\":\n model = GGNN(model_cfg, args, max_n_node).cuda()\n elif args.model_name == \"han\":\n model = HAN(model_cfg, args).cuda()\n elif args.model_name == \"ban\":\n model = BAN(model_cfg, args).cuda()\n elif args.model_name == \"memnet\":\n model = MemNet(model_cfg, args).cuda()\n elif args.model_name == \"gcn\":\n model = GCN(model_cfg, args).cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n lr_scheduler = CosineAnnealingWarmUpRestarts(\n optimizer, T_0=150, T_mult=1, eta_max=0.001, T_up=10, gamma=0.5\n )\n model.cuda()\n\n # ------------ Evaluate -----------------------\n if args.inference == True:\n if args.per_cate == False:\n test_acc_final = inference(model, test_loader, ckpt_path, args, res=False)\n logger.info(\"test accuracy (final) : %f\" % (test_acc_final))\n\n else: # analysis on question types (KVQA only)\n if args.data_name == \"kvqa\":\n cate_accu_test = []\n qtypes = load_files(model_cfg[\"DATASET\"][\"IDX2QTYPE\"])\n for task_idx in range(10):\n test = KVQA(model_cfg, args, \"test\", task_idx)\n test.max_n_node = max_n_node\n test_loader = DataLoader(\n test,\n batch_size=model_cfg[\"MODEL\"][\"BATCH_SIZE\"],\n num_workers=args.num_workers,\n shuffle=False,\n )\n accu = inference(\n model, test_loader, ckpt_path, args, task_idx=task_idx, res=True\n )\n cate_accu_test.append(accu)\n print(qtypes[:10])\n print(cate_accu_test)\n else:\n raise NotImplementedError(\n \"Datasets except KVQA do not have categories for questions. Set per_cate as False.\"\n )\n return 0\n\n # ------------ Training -----------------------\n train_loss = []\n best_acc = 0.0\n\n for e_idx in range(0, args.max_epoch):\n model.train()\n total_right = 0\n total_num = 0\n total_right_aset = 0\n for b_idx, batch in enumerate(tqdm(train_loader)):\n batch = [b.cuda() for b in batch]\n labels = batch[-1]\n pred = model(batch)\n pred_score, pred_ans = pred.max(1)\n loss = F.nll_loss(pred, labels)\n train_loss.append(loss.item())\n\n nz_idxs = labels.nonzero()\n right = labels[nz_idxs] == pred_ans[nz_idxs]\n total_right += right.sum().item()\n total_num += len(labels)\n\n if \"fvqa\" in args.data_name:\n _, top3_indices = torch.topk(pred, 3)\n for idx, indices in enumerate(top3_indices):\n if labels[idx] in indices:\n if labels[idx] != 0:\n total_right_aset += 1 # top-3 accuracy\n\n if \"pq\" in args.data_name:\n aset = batch[-2]\n for idx, pred in enumerate(pred_ans):\n if pred in aset[idx]:\n total_right_aset += 1\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if args.debug == False:\n summary.add_scalar(\n \"loss/train\", loss.item(), e_idx * len(train_loader) + b_idx\n )\n\n if args.schedule:\n lr_scheduler.step()\n\n if args.debug == False:\n tr_accu = total_right / total_num\n tr_accu_aset = total_right_aset / total_num\n summary.add_scalar(\"accu/train\", tr_accu, e_idx)\n\n if \"pq\" in args.data_name:\n summary.add_scalar(\"accu_aset/train\", tr_accu_aset, e_idx)\n logger.info(\n \"epoch %i train accuracy : %f, %i/%i / %f, %i/%i \"\n % (\n e_idx,\n tr_accu,\n total_right,\n total_num,\n tr_accu_aset,\n total_right_aset,\n total_num,\n )\n )\n else:\n logger.info(\n \"epoch %i train accuracy : %f, %i/%i\"\n % (e_idx, tr_accu, total_right, total_num)\n )\n\n with torch.no_grad():\n total_right_val, total_right_aset_val, total_num_val = eval_epoch(\n model, val_loader, args\n )\n\n if args.debug == False:\n val_acc = total_right_val / total_num_val\n val_acc_aset = total_right_aset_val / total_num_val\n summary.add_scalar(\"accu/val\", val_acc, e_idx)\n\n if \"pq\" in args.data_name:\n summary.add_scalar(\"accu_aset/val\", val_acc_aset, e_idx)\n logger.info(\n \"epoch %i val accuracy : %f, %i/%i / %f, %i/%i\"\n % (\n e_idx,\n val_acc,\n total_right_val,\n total_num_val,\n val_acc_aset,\n total_right_aset_val,\n total_num_val,\n )\n )\n val_acc = val_acc_aset\n else:\n logger.info(\n \"epoch %i val accuracy : %f, %i/%i\"\n % (e_idx, val_acc, total_right_val, total_num_val)\n )\n\n if val_acc >= best_acc:\n best_acc = val_acc\n torch.save(\n {\n \"epoch_idx\": e_idx,\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n },\n os.path.join(ckpt_path, \"ckpt_best.pth.tar\"),\n )\n logger.info(\"## Current VAL Best : %f\" % (best_acc))\n\n test_acc_final = inference(model, test_loader, ckpt_path, args)\n logger.info(\"## Test accuracy : %f\" % (test_acc_final))\n if \"pq\" in args.data_name:\n summary.add_scalar(\"accu_aset/test\", test_acc_final, 0)\n else:\n summary.add_scalar(\"accu/test\", test_acc_final, 0)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "torch.nn.functional.nll_loss", "torch.no_grad", "torch.topk", "torch.utils.tensorboard.SummaryWriter" ] ]
zfar-/BaselineWithNoise
[ "ca18d1f2aed36d571c50ed8e630eb38a87c79265" ]
[ "baselines/run.py" ]
[ "import sys\nimport multiprocessing\nimport os.path as osp\nimport gym\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\n\nfrom baselines.common.vec_env.vec_video_recorder import VecVideoRecorder\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nfrom baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\nfrom baselines.common.tf_util import get_session\nfrom baselines import logger\nfrom importlib import import_module\n\nfrom baselines.common.vec_env.vec_normalize import VecNormalize\n\ntry:\n from mpi4py import MPI\nexcept ImportError:\n print(\"can't import MPI \")\n MPI = None\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\n\ntry:\n import roboschool\nexcept ImportError:\n roboschool = None\n\n_game_envs = defaultdict(set)\nfor env in gym.envs.registry.all():\n # TODO: solve this with regexes\n env_type = env._entry_point.split(':')[0].split('.')[-1]\n _game_envs[env_type].add(env.id)\n\n# reading benchmark names directly from retro requires\n# importing retro here, and for some reason that crashes tensorflow\n# in ubuntu\n_game_envs['retro'] = {\n 'BubbleBobble-Nes',\n 'SuperMarioBros-Nes',\n 'TwinBee3PokoPokoDaimaou-Nes',\n 'SpaceHarrier-Nes',\n 'SonicTheHedgehog-Genesis',\n 'Vectorman-Genesis',\n 'FinalFight-Snes',\n 'SpaceInvaders-Snes',\n}\n\n\ndef train(args, extra_args):\n\n env_type, env_id = get_env_type(args.env)\n print(\"In the train function with env_type {} env_id {}\".format(env_type , env_id))\n # print('env_type: {}'.format(env_type))\n\n total_timesteps = int(args.num_timesteps)\n seed = args.seed\n\n learn = get_learn_function(args.alg)\n alg_kwargs = get_learn_function_defaults(args.alg, env_type)\n alg_kwargs.update(extra_args)\n\n\n\n print(\"Now called build_env env function with arg :: \",args)\n env = build_env(args)\n if args.save_video_interval != 0:\n env = VecVideoRecorder(env, osp.join(logger.Logger.CURRENT.dir, \"videos\"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)\n\n if args.network:\n alg_kwargs['network'] = args.network\n else:\n if alg_kwargs.get('network') is None:\n alg_kwargs['network'] = get_default_network(env_type)\n\n print('Training {} on {}:{} with arguments \\n{}'.format(args.alg, env_type, env_id, alg_kwargs))\n\n model = learn(\n env=env,\n seed=seed,\n total_timesteps=total_timesteps,\n **alg_kwargs\n )\n\n return model, env\n\n\ndef random_agent_ob_mean_std(env, nsteps=10000):\n ob = np.asarray(env.reset())\n if MPI.COMM_WORLD.Get_rank() == 0:\n obs = [ob]\n print(\"::: Entered the random action for 1000 steps ::: \")\n for _ in range(nsteps):\n ac = env.action_space.sample() # random action \n ob, _, done, _ = env.step(ac)\n if done:\n ob = env.reset()\n obs.append(np.asarray(ob))\n mean = np.mean(obs, 0).astype(np.float32)\n std = np.std(obs, 0).mean().astype(np.float32)\n else:\n mean = np.empty(shape=ob.shape, dtype=np.float32)\n std = np.empty(shape=(), dtype=np.float32)\n MPI.COMM_WORLD.Bcast(mean, root=0)\n MPI.COMM_WORLD.Bcast(std, root=0)\n return mean, std\n\ndef build_env(args):\n\n ncpu = multiprocessing.cpu_count()\n if sys.platform == 'darwin': ncpu //= 2\n nenv = args.num_env or ncpu\n alg = args.alg\n seed = args.seed\n\n env_type, env_id = get_env_type(args.env)\n print(\"In the build_env function with alg :: \",alg)\n if env_type in {'atari', 'retro'}:\n if alg == 'deepq':\n env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})\n elif alg == 'trpo_mpi':\n env = make_env(env_id, env_type, seed=seed)\n else:\n frame_stack_size = 4\n print(\"make_vec_env arguments env_id {} , env_type {} , nenv {} ,seed {} , gamestate {} reward_scale {}\".format(\n env_id , env_type , nenv , seed , args.gamestate , args.reward_scale))\n \n #>\n # print(\"Called environment for mean and std\")\n # env = make_vec_env(env_id, env_type, 1, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)\n # # env = VecFrameStack(env, frame_stack_size) ## No need for frame stacking while calculation of mean and std\n # ob_mean, ob_std = random_agent_ob_mean_std(env)\n # print(\" environment complete with mean {} and std {}\".format(ob_mean , ob_std))\n # del env \n #>\n\n env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)\n \n # print(\"Received env from make_vec_env type env {} and env \".format(\n # type(env) , env))\n print(\"ob_space {} and ac_space {} \".format(env.observation_space, env.action_space))\n env = VecFrameStack(env, frame_stack_size)\n\n\n print(\"After Frame stacking env would become \" )\n\n else:\n config = tf.ConfigProto(allow_soft_placement=True,\n intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n config.gpu_options.allow_growth = True\n get_session(config=config)\n\n env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale)\n\n if env_type == 'mujoco':\n env = VecNormalize(env)\n\n return env #, ob_mean, ob_std\n\n\ndef get_env_type(env_id):\n if env_id in _game_envs.keys():\n env_type = env_id\n env_id = [g for g in _game_envs[env_type]][0]\n else:\n env_type = None\n for g, e in _game_envs.items():\n if env_id in e:\n env_type = g\n break\n assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())\n\n return env_type, env_id\n\n\ndef get_default_network(env_type):\n if env_type in {'atari', 'retro'}:\n return 'cnn'\n else:\n return 'mlp'\n\ndef get_alg_module(alg, submodule=None):\n submodule = submodule or alg\n try:\n # first try to import the alg module from baselines\n alg_module = import_module('.'.join(['baselines', alg, submodule]))\n except ImportError:\n # then from rl_algs\n alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))\n\n return alg_module\n\n\ndef get_learn_function(alg):\n return get_alg_module(alg).learn\n\n\ndef get_learn_function_defaults(alg, env_type):\n try:\n alg_defaults = get_alg_module(alg, 'defaults')\n kwargs = getattr(alg_defaults, env_type)()\n except (ImportError, AttributeError):\n kwargs = {}\n return kwargs\n\n\ndef parse_cmdline_kwargs(args):\n '''\n convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible\n '''\n def parse(v):\n\n assert isinstance(v, str)\n try:\n return eval(v)\n except (NameError, SyntaxError):\n return v\n\n return {k: parse(v) for k,v in parse_unknown_args(args).items()}\n\n\n\ndef main(args):\n # configure logger, disable logging in child MPI processes (with rank > 0)\n\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args(args)\n extra_args = parse_cmdline_kwargs(unknown_args)\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n rank = 0\n logger.configure()\n else:\n logger.configure(format_strs=[])\n rank = MPI.COMM_WORLD.Get_rank()\n\n print(\"Called the trained function\")\n model, env = train(args, extra_args)\n\n env.close()\n\n if args.save_path is not None and rank == 0:\n save_path = osp.expanduser(args.save_path)\n model.save(save_path)\n\n if args.play:\n logger.log(\"Running trained model\")\n env ,ob_mean , ob_std = build_env(args)\n obs = env.reset()\n def initialize_placeholders(nlstm=128,**kwargs):\n return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))\n state, dones = initialize_placeholders(**extra_args)\n while True:\n actions, _, state, _ = model.step(obs,S=state, M=dones)\n obs, _, done, _ = env.step(actions)\n env.render()\n done = done.any() if isinstance(done, np.ndarray) else done\n\n if done:\n obs = env.reset()\n\n env.close()\n\n return model\n\nif __name__ == '__main__':\n main(sys.argv)\n" ]
[ [ "numpy.empty", "numpy.zeros", "numpy.asarray", "numpy.std", "tensorflow.ConfigProto", "numpy.mean" ] ]
99starman/fairseq
[ "a098a52f5c961dffd06fd9a14c4cf6b657f2f52d" ]
[ "fairseq/tasks/translation.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom dataclasses import dataclass, field\nimport itertools\nimport json\nimport logging\nimport os\nfrom typing import Optional\nfrom argparse import Namespace\nfrom omegaconf import II\n\nimport numpy as np\nfrom fairseq import metrics, utils\nfrom fairseq.data import (\n AppendTokenDataset,\n ConcatDataset,\n LanguagePairDataset,\n PrependTokenDataset,\n StripTokenDataset,\n TruncateDataset,\n data_utils,\n encoders,\n indexed_dataset,\n)\nfrom fairseq.data.indexed_dataset import get_available_dataset_impl\nfrom fairseq.dataclass import ChoiceEnum, FairseqDataclass\nfrom fairseq.tasks import FairseqTask, register_task\n\n\nEVAL_BLEU_ORDER = 4\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_langpair_dataset(\n data_path,\n split,\n src,\n src_dict,\n tgt,\n tgt_dict,\n combine,\n dataset_impl,\n upsample_primary,\n left_pad_source,\n left_pad_target,\n max_source_positions,\n max_target_positions,\n prepend_bos=False,\n load_alignments=False,\n truncate_source=False,\n append_source_id=False,\n num_buckets=0,\n shuffle=True,\n pad_to_multiple=1,\n prepend_bos_src=None,\n):\n def split_exists(split, src, tgt, lang, data_path):\n filename = os.path.join(data_path, \"{}.{}-{}.{}\".format(split, src, tgt, lang))\n return indexed_dataset.dataset_exists(filename, impl=dataset_impl)\n\n src_datasets = []\n tgt_datasets = []\n\n for k in itertools.count():\n split_k = split + (str(k) if k > 0 else \"\")\n\n # infer langcode\n if split_exists(split_k, src, tgt, src, data_path):\n prefix = os.path.join(data_path, \"{}.{}-{}.\".format(split_k, src, tgt))\n elif split_exists(split_k, tgt, src, src, data_path):\n prefix = os.path.join(data_path, \"{}.{}-{}.\".format(split_k, tgt, src))\n else:\n if k > 0:\n break\n else:\n raise FileNotFoundError(\n \"Dataset not found: {} ({})\".format(split, data_path)\n )\n\n src_dataset = data_utils.load_indexed_dataset(\n prefix + src, src_dict, dataset_impl\n )\n if truncate_source:\n src_dataset = AppendTokenDataset(\n TruncateDataset(\n StripTokenDataset(src_dataset, src_dict.eos()),\n max_source_positions - 1,\n ),\n src_dict.eos(),\n )\n src_datasets.append(src_dataset)\n\n tgt_dataset = data_utils.load_indexed_dataset(\n prefix + tgt, tgt_dict, dataset_impl\n )\n if tgt_dataset is not None:\n tgt_datasets.append(tgt_dataset)\n\n logger.info(\n \"{} {} {}-{} {} examples\".format(\n data_path, split_k, src, tgt, len(src_datasets[-1])\n )\n )\n\n if not combine:\n break\n\n assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0\n\n if len(src_datasets) == 1:\n src_dataset = src_datasets[0]\n tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None\n else:\n sample_ratios = [1] * len(src_datasets)\n sample_ratios[0] = upsample_primary\n src_dataset = ConcatDataset(src_datasets, sample_ratios)\n if len(tgt_datasets) > 0:\n tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)\n else:\n tgt_dataset = None\n\n if prepend_bos:\n assert hasattr(src_dict, \"bos_index\") and hasattr(tgt_dict, \"bos_index\")\n src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())\n if tgt_dataset is not None:\n tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())\n elif prepend_bos_src is not None:\n logger.info(f\"prepending src bos: {prepend_bos_src}\")\n src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)\n\n eos = None\n if append_source_id:\n src_dataset = AppendTokenDataset(\n src_dataset, src_dict.index(\"[{}]\".format(src))\n )\n if tgt_dataset is not None:\n tgt_dataset = AppendTokenDataset(\n tgt_dataset, tgt_dict.index(\"[{}]\".format(tgt))\n )\n eos = tgt_dict.index(\"[{}]\".format(tgt))\n\n align_dataset = None\n if load_alignments:\n align_path = os.path.join(data_path, \"{}.align.{}-{}\".format(split, src, tgt))\n if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):\n align_dataset = data_utils.load_indexed_dataset(\n align_path, None, dataset_impl\n )\n\n tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None\n return LanguagePairDataset(\n src_dataset,\n src_dataset.sizes,\n src_dict,\n tgt_dataset,\n tgt_dataset_sizes,\n tgt_dict,\n left_pad_source=left_pad_source,\n left_pad_target=left_pad_target,\n align_dataset=align_dataset,\n eos=eos,\n num_buckets=num_buckets,\n shuffle=shuffle,\n pad_to_multiple=pad_to_multiple,\n )\n\n\n@dataclass\nclass TranslationConfig(FairseqDataclass):\n data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"colon separated path to data directories list, will be iterated upon during epochs \"\n \"in round-robin manner; however, valid and test data are always in the first directory \"\n \"to avoid the need for repeating them in all directories\"\n },\n )\n source_lang: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"source language\",\n \"argparse_alias\": \"-s\",\n },\n )\n target_lang: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"target language\",\n \"argparse_alias\": \"-t\",\n },\n )\n lang: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"customized argument: language\",\n \"argparse_alias\": \"-lan\",\n },\n )\n load_alignments: bool = field(\n default=False, metadata={\"help\": \"load the binarized alignments\"}\n )\n left_pad_source: bool = field(\n default=True, metadata={\"help\": \"pad the source on the left\"}\n )\n left_pad_target: bool = field(\n default=False, metadata={\"help\": \"pad the target on the left\"}\n )\n max_source_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the source sequence\"}\n )\n max_target_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the target sequence\"}\n )\n upsample_primary: int = field(\n default=-1, metadata={\"help\": \"the amount of upsample primary dataset\"}\n )\n truncate_source: bool = field(\n default=False, metadata={\"help\": \"truncate source to max-source-positions\"}\n )\n num_batch_buckets: int = field(\n default=0,\n metadata={\n \"help\": \"if >0, then bucket source and target lengths into \"\n \"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations\"\n },\n )\n train_subset: str = II(\"dataset.train_subset\")\n dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(\n \"dataset.dataset_impl\"\n )\n required_seq_len_multiple: int = II(\"dataset.required_seq_len_multiple\")\n\n # options for reporting BLEU during validation\n eval_bleu: bool = field(\n default=False, metadata={\"help\": \"evaluation with BLEU scores\"}\n )\n eval_bleu_args: Optional[str] = field(\n default=\"{}\",\n metadata={\n \"help\": 'generation args for BLUE scoring, e.g., \\'{\"beam\": 4, \"lenpen\": 0.6}\\', as JSON string'\n },\n )\n eval_bleu_detok: str = field(\n default=\"space\",\n metadata={\n \"help\": \"detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; \"\n \"use 'space' to disable detokenization; see fairseq.data.encoders for other options\"\n },\n )\n eval_bleu_detok_args: Optional[str] = field(\n default=\"{}\",\n metadata={\"help\": \"args for building the tokenizer, if needed, as JSON string\"},\n )\n eval_tokenized_bleu: bool = field(\n default=False, metadata={\"help\": \"compute tokenized BLEU instead of sacrebleu\"}\n )\n eval_bleu_remove_bpe: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"remove BPE before computing BLEU\",\n \"argparse_const\": \"@@ \",\n },\n )\n eval_bleu_print_samples: bool = field(\n default=False, metadata={\"help\": \"print sample generations during validation\"}\n )\n\n\n@register_task(\"translation\", dataclass=TranslationConfig)\nclass TranslationTask(FairseqTask):\n \"\"\"\n Translate from one (source) language to another (target) language.\n\n Args:\n src_dict (~fairseq.data.Dictionary): dictionary for the source language\n tgt_dict (~fairseq.data.Dictionary): dictionary for the target language\n\n .. note::\n\n The translation task is compatible with :mod:`fairseq-train`,\n :mod:`fairseq-generate` and :mod:`fairseq-interactive`.\n \"\"\"\n\n cfg: TranslationConfig\n\n def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):\n super().__init__(cfg)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, cfg: TranslationConfig, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n paths = utils.split_paths(cfg.data)\n assert len(paths) > 0\n # find language pair automatically\n if cfg.source_lang is None or cfg.target_lang is None:\n cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])\n if cfg.source_lang is None or cfg.target_lang is None:\n raise Exception(\n \"Could not infer language pair, please provide it explicitly\"\n )\n\n # load dictionaries\n src_dict = cls.load_dictionary(\n os.path.join(paths[0], \"dict.{}.txt\".format(cfg.source_lang))\n )\n tgt_dict = cls.load_dictionary(\n os.path.join(paths[0], \"dict.{}.txt\".format(cfg.target_lang))\n )\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n logger.info(\"[{}] dictionary: {} types\".format(cfg.source_lang, len(src_dict)))\n logger.info(\"[{}] dictionary: {} types\".format(cfg.target_lang, len(tgt_dict)))\n\n return cls(cfg, src_dict, tgt_dict)\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n paths = utils.split_paths(self.cfg.data)\n assert len(paths) > 0\n if split != self.cfg.train_subset:\n # if not training data set, use the first shard for valid and test\n paths = paths[:1]\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n src, tgt = self.cfg.source_lang, self.cfg.target_lang\n\n self.datasets[split] = load_langpair_dataset(\n data_path,\n split,\n src,\n self.src_dict,\n tgt,\n self.tgt_dict,\n combine=combine,\n dataset_impl=self.cfg.dataset_impl,\n upsample_primary=self.cfg.upsample_primary,\n left_pad_source=self.cfg.left_pad_source,\n left_pad_target=self.cfg.left_pad_target,\n max_source_positions=self.cfg.max_source_positions,\n max_target_positions=self.cfg.max_target_positions,\n load_alignments=self.cfg.load_alignments,\n truncate_source=self.cfg.truncate_source,\n num_buckets=self.cfg.num_batch_buckets,\n shuffle=(split != \"test\"),\n pad_to_multiple=self.cfg.required_seq_len_multiple,\n )\n\n def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):\n return LanguagePairDataset(\n src_tokens,\n src_lengths,\n self.source_dictionary,\n tgt_dict=self.target_dictionary,\n constraints=constraints,\n )\n\n def build_model(self, cfg, from_checkpoint=False):\n model = super().build_model(cfg, from_checkpoint)\n if self.cfg.eval_bleu:\n detok_args = json.loads(self.cfg.eval_bleu_detok_args)\n self.tokenizer = encoders.build_tokenizer(\n Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)\n )\n\n gen_args = json.loads(self.cfg.eval_bleu_args)\n self.sequence_generator = self.build_generator(\n [model], Namespace(**gen_args)\n )\n return model\n\n def valid_step(self, sample, model, criterion):\n loss, sample_size, logging_output = super().valid_step(sample, model, criterion)\n if self.cfg.eval_bleu:\n bleu = self._inference_with_bleu(self.sequence_generator, sample, model)\n logging_output[\"_bleu_sys_len\"] = bleu.sys_len\n logging_output[\"_bleu_ref_len\"] = bleu.ref_len\n # we split counts into separate entries so that they can be\n # summed efficiently across workers using fast-stat-sync\n assert len(bleu.counts) == EVAL_BLEU_ORDER\n for i in range(EVAL_BLEU_ORDER):\n logging_output[\"_bleu_counts_\" + str(i)] = bleu.counts[i]\n logging_output[\"_bleu_totals_\" + str(i)] = bleu.totals[i]\n return loss, sample_size, logging_output\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n if self.cfg.eval_bleu:\n\n def sum_logs(key):\n import torch\n\n result = sum(log.get(key, 0) for log in logging_outputs)\n if torch.is_tensor(result):\n result = result.cpu()\n return result\n\n counts, totals = [], []\n for i in range(EVAL_BLEU_ORDER):\n counts.append(sum_logs(\"_bleu_counts_\" + str(i)))\n totals.append(sum_logs(\"_bleu_totals_\" + str(i)))\n\n if max(totals) > 0:\n # log counts as numpy arrays -- log_scalar will sum them correctly\n metrics.log_scalar(\"_bleu_counts\", np.array(counts))\n metrics.log_scalar(\"_bleu_totals\", np.array(totals))\n metrics.log_scalar(\"_bleu_sys_len\", sum_logs(\"_bleu_sys_len\"))\n metrics.log_scalar(\"_bleu_ref_len\", sum_logs(\"_bleu_ref_len\"))\n\n def compute_bleu(meters):\n import inspect\n\n try:\n from sacrebleu.metrics import BLEU\n\n comp_bleu = BLEU.compute_bleu\n except ImportError:\n # compatibility API for sacrebleu 1.x\n import sacrebleu\n\n comp_bleu = sacrebleu.compute_bleu\n\n fn_sig = inspect.getfullargspec(comp_bleu)[0]\n if \"smooth_method\" in fn_sig:\n smooth = {\"smooth_method\": \"exp\"}\n else:\n smooth = {\"smooth\": \"exp\"}\n bleu = comp_bleu(\n correct=meters[\"_bleu_counts\"].sum,\n total=meters[\"_bleu_totals\"].sum,\n sys_len=meters[\"_bleu_sys_len\"].sum,\n ref_len=meters[\"_bleu_ref_len\"].sum,\n **smooth,\n )\n return round(bleu.score, 2)\n\n metrics.log_derived(\"bleu\", compute_bleu)\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.cfg.max_source_positions, self.cfg.max_target_positions)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict\n\n def _inference_with_bleu(self, generator, sample, model):\n import sacrebleu\n\n def decode(toks, escape_unk=False):\n s = self.tgt_dict.string(\n toks.int().cpu(),\n self.cfg.eval_bleu_remove_bpe,\n # The default unknown string in fairseq is `<unk>`, but\n # this is tokenized by sacrebleu as `< unk >`, inflating\n # BLEU scores. Instead, we use a somewhat more verbose\n # alternative that is unlikely to appear in the real\n # reference, but doesn't get split into multiple tokens.\n unk_string=(\"UNKNOWNTOKENINREF\" if escape_unk else \"UNKNOWNTOKENINHYP\"),\n )\n if self.tokenizer:\n s = self.tokenizer.decode(s)\n return s\n\n gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)\n hyps, refs = [], []\n for i in range(len(gen_out)):\n hyps.append(decode(gen_out[i][0][\"tokens\"]))\n refs.append(\n decode(\n utils.strip_pad(sample[\"target\"][i], self.tgt_dict.pad()),\n escape_unk=True, # don't count <unk> as matches to the hypo\n )\n )\n if self.cfg.eval_bleu_print_samples:\n logger.info(\"example hypothesis: \" + hyps[0])\n logger.info(\"example reference: \" + refs[0])\n if self.cfg.eval_tokenized_bleu:\n return sacrebleu.corpus_bleu(hyps, [refs], tokenize=\"none\")\n else:\n return sacrebleu.corpus_bleu(hyps, [refs])\n" ]
[ [ "numpy.array", "torch.is_tensor" ] ]
fietensen/FlappyAI
[ "f8bff24e2ee62edf97a9b061183e28bf4924db09" ]
[ "game/game.py" ]
[ "from game.pole import PolesObject\nfrom game.agent import Agent\nfrom pygame import Rect\nimport pygame, struct\nimport numpy as np\n\nclass Game:\n def __init__(self, resolution):\n self.resolution = resolution\n self.screen = pygame.display.set_mode(resolution) # init window\n self.playerpos = (0, resolution[1]) # initial player position\n self.poles = []\n self.poles.append(PolesObject(resolution))\n self.agents = []\n self.birdimg = pygame.image.load(\"graphics/flappybird.png\")\n self.birdimg = pygame.transform.scale(self.birdimg, (resolution[0]//20, resolution[0]//25))\n\n def step(self):\n self.screen.fill((51,255,255))\n remove_poles = []\n for index, pole in enumerate(self.poles):\n if pole.x+pole.width < 0:\n remove_poles.append(index)\n else:\n pole.move()\n pole.display(self.screen)\n\n for remove_pole in remove_poles:\n self.poles.pop(remove_pole)\n\n if self.poles[-1].x+self.poles[-1].width < self.resolution[0]-np.random.uniform(\n self.resolution[0]//3,\n self.resolution[0]//2):\n self.poles.append(PolesObject(self.resolution))\n\n #view = pygame.surfarray.array2d(self.screen)&0xFF\n for agent in self.agents:\n agent.move()\n for pole in self.poles:\n pole_upper = Rect((pole.x, 0), (pole.width, pole.height))\n pole_lower = Rect((pole.x, pole.height+pole.gapsize),\n (pole.width, pole.resolution[1] - pole.height+pole.gapsize))\n\n if Rect(agent.rect).colliderect(pole_upper) or Rect(agent.rect).colliderect(pole_lower):\n agent.dead = True\n elif agent.y < 0 or agent.y > self.resolution[1]:\n agent.dead = True\n elif not agent.dead:\n agent.fitness += .001\n self.screen.blit(self.birdimg, agent.rect)\n\n pygame.display.flip()\n" ]
[ [ "numpy.random.uniform" ] ]
soraros/nutils
[ "91119b12bdebf12a85eecb6a2247be2415f60e6f" ]
[ "nutils/evaluable.py" ]
[ "# Copyright (c) 2014 Evalf\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nThe function module defines the :class:`Evaluable` class and derived objects,\ncommonly referred to as nutils functions. They represent mappings from a\n:mod:`nutils.topology` onto Python space. The notabe class of :class:`Array`\nobjects map onto the space of Numpy arrays of predefined dimension and shape.\nMost functions used in nutils applicatons are of this latter type, including the\ngeometry and function bases for analysis.\n\nNutils functions are essentially postponed python functions, stored in a tree\nstructure of input/output dependencies. Many :class:`Array` objects have\ndirectly recognizable numpy equivalents, such as :class:`Sin` or\n:class:`Inverse`. By not evaluating directly but merely stacking operations,\ncomplex operations can be defined prior to entering a quadrature loop, allowing\nfor a higher level style programming. It also allows for automatic\ndifferentiation and code optimization.\n\nIt is important to realize that nutils functions do not map for a physical\nxy-domain but from a topology, where a point is characterized by the combination\nof an element and its local coordinate. This is a natural fit for typical finite\nelement operations such as quadrature. Evaluation from physical coordinates is\npossible only via inverting of the geometry function, which is a fundamentally\nexpensive and currently unsupported operation.\n\"\"\"\n\nimport typing\nif typing.TYPE_CHECKING:\n from typing_extensions import Protocol\nelse:\n Protocol = object\n\nfrom . import debug_flags, util, types, numeric, cache, warnings, parallel, sparse\nfrom ._graph import Node, RegularNode, DuplicatedLeafNode, InvisibleNode, Subgraph\nimport numpy, sys, itertools, functools, operator, inspect, numbers, builtins, re, types as builtin_types, abc, collections.abc, math, treelog as log, weakref, time, contextlib, subprocess, os\n\ngraphviz = os.environ.get('NUTILS_GRAPHVIZ')\n\nisevaluable = lambda arg: isinstance(arg, Evaluable)\n\ndef strictevaluable(value):\n if not isinstance(value, Evaluable):\n raise ValueError('expected an object of type {!r} but got {!r} with type {!r}'.format(Evaluable.__qualname__, value, type(value).__qualname__))\n return value\n\ndef simplified(value):\n return strictevaluable(value).simplified\n\nasdtype = lambda arg: arg if any(arg is dtype for dtype in (bool, int, float, complex)) else {'f': float, 'i': int, 'b': bool, 'c': complex}[numpy.dtype(arg).kind]\n\ndef asarray(arg):\n if hasattr(type(arg), 'as_evaluable_array'):\n return arg.as_evaluable_array\n if _containsarray(arg):\n return stack(arg, axis=0)\n else:\n return Constant(arg)\n\nasarrays = types.tuple[asarray]\n\ndef asindex(arg):\n arg = asarray(arg)\n if arg.ndim or arg.dtype != int:\n raise ValueError('argument is not an index: {}'.format(arg))\n if arg._intbounds[0] < 0:\n raise ValueError('index must be non-negative')\n return arg\n\[email protected]_annotations\ndef equalindex(n:asindex, m:asindex):\n '''Compare two array indices.\n\n Returns `True` if the two indices are certainly equal, `False` if they are\n certainly not equal, or `None` if equality cannot be determined at compile\n time.\n '''\n\n if n is m:\n return True\n n = n.simplified\n m = m.simplified\n if n is m:\n return True\n if n.arguments != m.arguments:\n return False\n if n.isconstant: # implies m.isconstant\n return int(n) == int(m)\n\nasshape = types.tuple[asindex]\n\[email protected]_annotations\ndef equalshape(N:asshape, M:asshape):\n '''Compare two array shapes.\n\n Returns `True` if all indices are certainly equal, `False` if any indices are\n certainly not equal, or `None` if equality cannot be determined at compile\n time.\n '''\n\n if N == M:\n return True\n if len(N) != len(M):\n return False\n retval = True\n for eq in map(equalindex, N, M):\n if eq == False:\n return False\n if eq == None:\n retval = None\n return retval\n\nclass ExpensiveEvaluationWarning(warnings.NutilsInefficiencyWarning): pass\n\ndef replace(func=None, depthfirst=False, recursive=False, lru=4):\n '''decorator for deep object replacement\n\n Generates a deep replacement method for general objects based on a callable\n that is applied (recursively) on individual constructor arguments.\n\n Args\n ----\n func\n Callable which maps an object onto a new object, or `None` if no\n replacement is made. It must have one positional argument for the object,\n and may have any number of additional positional and/or keyword\n arguments.\n depthfirst : :class:`bool`\n If `True`, decompose each object as far a possible, then apply `func` to\n all arguments as the objects are reconstructed. Otherwise apply `func`\n directly on each new object that is encountered in the decomposition,\n proceding only if the return value is `None`.\n recursive : :class:`bool`\n If `True`, repeat replacement for any object returned by `func` until it\n returns `None`. Otherwise perform a single, non-recursive sweep.\n lru : :class:`int`\n Maximum size of the least-recently-used cache. A persistent weak-key\n dictionary is maintained for every unique set of function arguments. When\n the size of `lru` is reached, the least recently used cache is dropped.\n\n Returns\n -------\n :any:`callable`\n The method that searches the object to perform the replacements.\n '''\n\n if func is None:\n return functools.partial(replace, depthfirst=depthfirst, recursive=recursive, lru=lru)\n\n signature = inspect.signature(func)\n arguments = [] # list of past function arguments, least recently used last\n caches = [] # list of weak-key dictionaries matching arguments (above)\n\n remember = object() # token to signal that rstack[-1] can be cached as the replacement of fstack[-1]\n recreate = object() # token to signal that all arguments for object recreation are ready on rstack\n pending = object() # token to hold the place of a cachable object pending creation\n identity = object() # token to hold the place of the cache value in case it matches key, to avoid circular references\n\n @functools.wraps(func)\n def wrapped(target, *funcargs, **funckwargs):\n\n # retrieve or create a weak-key dictionary\n bound = signature.bind(None, *funcargs, **funckwargs)\n bound.apply_defaults()\n try:\n index = arguments.index(bound.arguments) # by using index, arguments need not be hashable\n except ValueError:\n index = -1\n cache = weakref.WeakKeyDictionary()\n else:\n cache = caches[index]\n if index != 0: # function arguments are not the most recent (possibly new)\n if index > 0 or len(arguments) >= lru:\n caches.pop(index) # pop matching (or oldest) item\n arguments.pop(index)\n caches.insert(0, cache) # insert popped (or new) item to front\n arguments.insert(0, bound.arguments)\n\n fstack = [target] # stack of unprocessed objects and command tokens\n rstack = [] # stack of processed objects\n _stack = fstack if recursive else rstack\n\n try:\n while fstack:\n obj = fstack.pop()\n\n if obj is recreate:\n args = [rstack.pop() for obj in range(fstack.pop())]\n f = fstack.pop()\n r = f(*args)\n if depthfirst:\n newr = func(r, *funcargs, **funckwargs)\n if newr is not None:\n _stack.append(newr)\n continue\n rstack.append(r)\n continue\n\n if obj is remember:\n obj = fstack.pop()\n cache[obj] = rstack[-1] if rstack[-1] is not obj else identity\n continue\n\n if isinstance(obj, (tuple, list, dict, set, frozenset)):\n if not obj:\n rstack.append(obj) # shortcut to avoid recreation of empty container\n else:\n fstack.append(lambda *x, T=type(obj): T(x))\n fstack.append(len(obj))\n fstack.append(recreate)\n fstack.extend(obj if not isinstance(obj, dict) else obj.items())\n continue\n\n try:\n r = cache[obj]\n except KeyError: # object can be weakly cached, but isn't\n cache[obj] = pending\n fstack.append(obj)\n fstack.append(remember)\n except TypeError: # object cannot be referenced or is not hashable\n pass\n else: # object is in cache\n if r is pending:\n pending_objs = [k for k, v in cache.items() if v is pending]\n index = pending_objs.index(obj)\n raise Exception('{}@replace caught in a circular dependence\\n'.format(func.__name__) + Tuple(pending_objs[index:]).asciitree().split('\\n', 1)[1])\n rstack.append(r if r is not identity else obj)\n continue\n\n if not depthfirst:\n newr = func(obj, *funcargs, **funckwargs)\n if newr is not None:\n _stack.append(newr)\n continue\n\n try:\n f, args = obj.__reduce__()\n except: # obj cannot be reduced into a constructor and its arguments\n rstack.append(obj)\n else:\n fstack.append(f)\n fstack.append(len(args))\n fstack.append(recreate)\n fstack.extend(args)\n\n assert len(rstack) == 1\n\n finally:\n while fstack:\n if fstack.pop() is remember:\n assert cache.pop(fstack.pop()) is pending\n\n return rstack[0]\n\n return wrapped\n\nclass Evaluable(types.Singleton):\n 'Base class'\n\n __slots__ = '__args'\n __cache__ = 'dependencies', 'arguments', 'ordereddeps', 'dependencytree', 'optimized_for_numpy', '_loop_concatenate_deps'\n\n @types.apply_annotations\n def __init__(self, args:types.tuple[strictevaluable]):\n super().__init__()\n self.__args = args\n\n def evalf(self, *args):\n raise NotImplementedError('Evaluable derivatives should implement the evalf method')\n\n def evalf_withtimes(self, times, *args):\n with times[self]:\n return self.evalf(*args)\n\n @property\n def dependencies(self):\n '''collection of all function arguments'''\n deps = {}\n for func in self.__args:\n funcdeps = func.dependencies\n deps.update(funcdeps)\n deps[func] = len(funcdeps)\n return types.frozendict(deps)\n\n @property\n def arguments(self):\n 'a frozenset of all arguments of this evaluable'\n return frozenset().union(*(child.arguments for child in self.__args))\n\n @property\n def isconstant(self):\n return EVALARGS not in self.dependencies\n\n @property\n def ordereddeps(self):\n '''collection of all function arguments such that the arguments to\n dependencies[i] can be found in dependencies[:i]'''\n deps = self.dependencies.copy()\n deps.pop(EVALARGS, None)\n return tuple([EVALARGS] + sorted(deps, key=deps.__getitem__))\n\n @property\n def dependencytree(self):\n '''lookup table of function arguments into ordereddeps, such that\n ordereddeps[i].__args[j] == ordereddeps[dependencytree[i][j]], and\n self.__args[j] == ordereddeps[dependencytree[-1][j]]'''\n args = self.ordereddeps\n return tuple(tuple(map(args.index, func.__args)) for func in args+(self,))\n\n @property\n def serialized(self):\n return zip(self.ordereddeps[1:]+(self,), self.dependencytree[1:])\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n args = tuple(arg._node(cache, subgraph, times) for arg in self.__args)\n label = '\\n'.join(filter(None, (type(self).__name__, self._node_details)))\n cache[self] = node = RegularNode(label, args, {}, (type(self).__name__, times[self]), subgraph)\n return node\n\n @property\n def _node_details(self):\n return ''\n\n def asciitree(self, richoutput=False):\n 'string representation'\n\n return self._node({}, None, collections.defaultdict(_Stats)).generate_asciitree(richoutput)\n\n def __str__(self):\n return self.__class__.__name__\n\n def eval(self, **evalargs):\n '''Evaluate function on a specified element, point set.'''\n\n values = [evalargs]\n try:\n values.extend(op.evalf(*[values[i] for i in indices]) for op, indices in self.serialized)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n raise EvaluationError(self, values) from e\n else:\n return values[-1]\n\n def eval_withtimes(self, times, **evalargs):\n '''Evaluate function on a specified element, point set while measure time of each step.'''\n\n values = [evalargs]\n try:\n values.extend(op.evalf_withtimes(times, *[values[i] for i in indices]) for op, indices in self.serialized)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n raise EvaluationError(self, values) from e\n else:\n return values[-1]\n\n @contextlib.contextmanager\n def session(self, graphviz):\n if graphviz is None:\n yield self.eval\n return\n stats = collections.defaultdict(_Stats)\n def eval(**args):\n return self.eval_withtimes(stats, **args)\n with log.context('eval'):\n yield eval\n node = self._node({}, None, stats)\n maxtime = builtins.max(n.metadata[1].time for n in node.walk(set()))\n tottime = builtins.sum(n.metadata[1].time for n in node.walk(set()))\n aggstats = tuple((key, builtins.sum(v.time for v in values), builtins.sum(v.ncalls for v in values)) for key, values in util.gather(n.metadata for n in node.walk(set())))\n fill_color = (lambda node: '0,{:.2f},1'.format(node.metadata[1].time/maxtime)) if maxtime else None\n node.export_graphviz(fill_color=fill_color, dot_path=graphviz)\n log.info('total time: {:.0f}ms\\n'.format(tottime/1e6) + '\\n'.join('{:4.0f} {} ({} calls, avg {:.3f} per call)'.format(t / 1e6, k, n, t / (1e6*n))\n for k, t, n in sorted(aggstats, reverse=True, key=lambda item: item[1]) if n))\n\n def _stack(self, values):\n lines = [' %0 = EVALARGS']\n for (op, indices), v in zip(self.serialized, values):\n lines[-1] += ' --> ' + type(v).__name__\n if numeric.isarray(v):\n lines[-1] += '({})'.format(','.join(map(str, v.shape)))\n try:\n code = op.evalf.__code__\n offset = 1 if getattr(op.evalf, '__self__', None) is not None else 0\n names = code.co_varnames[offset:code.co_argcount]\n names += tuple('{}[{}]'.format(code.co_varnames[code.co_argcount], n) for n in range(len(indices) - len(names)))\n args = map(' {}=%{}'.format, names, indices)\n except:\n args = map(' %{}'.format, indices)\n lines.append(' %{} = {}:{}'.format(len(lines), op, ','.join(args)))\n return lines\n\n @property\n @replace(depthfirst=True, recursive=True)\n def simplified(obj):\n if isinstance(obj, Evaluable):\n retval = obj._simplified()\n if retval is not None and isinstance(obj, Array):\n assert isinstance(retval, Array) and equalshape(retval.shape, obj.shape) and retval.dtype == obj.dtype, '{} --simplify--> {}'.format(obj, retval)\n return retval\n\n def _simplified(self):\n return\n\n @property\n def optimized_for_numpy(self):\n retval = self._optimized_for_numpy1() or self\n return retval._combine_loop_concatenates(frozenset())\n\n @types.apply_annotations\n @replace(depthfirst=True, recursive=True)\n def _optimized_for_numpy1(obj: simplified.fget):\n if isinstance(obj, Evaluable):\n retval = obj._simplified() or obj._optimized_for_numpy()\n if retval is not None and isinstance(obj, Array):\n assert isinstance(retval, Array) and equalshape(retval.shape, obj.shape), '{0}._optimized_for_numpy or {0}._simplified resulted in shape change'.format(type(obj).__name__)\n return retval\n\n def _optimized_for_numpy(self):\n return\n\n @property\n def _loop_concatenate_deps(self):\n deps = []\n for arg in self.__args:\n deps += [dep for dep in arg._loop_concatenate_deps if dep not in deps]\n return tuple(deps)\n\n def _combine_loop_concatenates(self, outer_exclude):\n while True:\n exclude = set(outer_exclude)\n combine = {}\n # Collect all top-level `LoopConcatenate` instances in `combine` and all\n # their dependent `LoopConcatenate` instances in `exclude`.\n for lc in self._loop_concatenate_deps:\n lcs = combine.setdefault(lc.index, [])\n if lc not in lcs:\n lcs.append(lc)\n exclude.update(set(lc._loop_concatenate_deps) - {lc})\n # Combine top-level `LoopConcatenate` instances excluding those in\n # `exclude`.\n replacements = {}\n for index, lcs in combine.items():\n lcs = [lc for lc in lcs if lc not in exclude]\n if not lcs:\n continue\n # We're extracting data from `LoopConcatenate` in favor of using\n # `loop_concatenate_combined(lcs, ...)` because the later requires\n # reapplying simplifications that are already applied in the former.\n # For example, in `loop_concatenate_combined` the offsets (used by\n # start, stop and the concatenation length) are formed by\n # `loop_concatenate`-ing `func.shape[-1]`. If the shape is constant,\n # this can be simplified to a `Range`.\n data = Tuple((Tuple(lc.funcdata) for lc in lcs))\n # Combine `LoopConcatenate` instances in `data` excluding\n # `outer_exclude` and those that will be processed in a subsequent loop\n # (the remainder of `exclude`). The latter consists of loops that are\n # invariant w.r.t. the current loop `index`.\n data = data._combine_loop_concatenates(exclude)\n combined = LoopConcatenateCombined(data, index._name, index.length)\n for i, lc in enumerate(lcs):\n intbounds = dict(zip(('_lower', '_upper'), lc._intbounds)) if lc.dtype == int else {}\n replacements[lc] = ArrayFromTuple(combined, i, lc.shape, lc.dtype, **intbounds)\n if replacements:\n self = replace(lambda key: replacements.get(key) if isinstance(key, LoopConcatenate) else None, recursive=False, depthfirst=False)(self)\n else:\n return self\n\nclass EvaluationError(Exception):\n def __init__(self, f, values):\n super().__init__('evaluation failed in step {}/{}\\n'.format(len(values), len(f.dependencies)) + '\\n'.join(f._stack(values)))\n\nclass EVALARGS(Evaluable):\n def __init__(self):\n super().__init__(args=())\n def _node(self, cache, subgraph, times):\n return InvisibleNode((type(self).__name__, _Stats()))\n\nEVALARGS = EVALARGS()\n\nclass EvaluableConstant(Evaluable):\n '''Evaluate to the given constant value.\n\n Parameters\n ----------\n value\n The return value of ``eval``.\n '''\n\n __slots__ = 'value'\n\n def __init__(self, value):\n self.value = value\n super().__init__(())\n\n def evalf(self):\n return self.value\n\n @property\n def _node_details(self):\n s = repr(self.value)\n if '\\n' in s:\n s = s.split('\\n', 1)[0] + '...'\n if len(s) > 20:\n s = s[:17] + '...'\n return s\n\nclass Tuple(Evaluable):\n\n __slots__ = 'items'\n\n @types.apply_annotations\n def __init__(self, items: types.tuple[strictevaluable]):\n self.items = items\n super().__init__(items)\n\n def evalf(self, *items):\n return items\n\n def __iter__(self):\n 'iterate'\n\n return iter(self.items)\n\n def __len__(self):\n 'length'\n\n return len(self.items)\n\n def __getitem__(self, item):\n 'get item'\n\n return self.items[item]\n\n def __add__(self, other):\n 'add'\n\n return Tuple(self.items + tuple(other))\n\n def __radd__(self, other):\n 'add'\n\n return Tuple(tuple(other) + self.items)\n\nclass SparseArray(Evaluable):\n 'sparse array'\n\n @types.apply_annotations\n def __init__(self, chunks:types.tuple[asarrays], shape:asarrays, dtype:asdtype):\n self._shape = shape\n self._dtype = dtype\n super().__init__(args=[Tuple(shape), *map(Tuple, chunks)])\n\n def evalf(self, shape, *chunks):\n length = builtins.sum(values.size for *indices, values in chunks)\n data = numpy.empty((length,), dtype=sparse.dtype(tuple(map(int, shape)), self._dtype))\n start = 0\n for *indices, values in chunks:\n stop = start + values.size\n d = data[start:stop].reshape(values.shape)\n d['value'] = values\n for idim, ii in enumerate(indices):\n d['index']['i'+str(idim)] = ii\n start = stop\n return data\n\n# ARRAYFUNC\n#\n# The main evaluable. Closely mimics a numpy array.\n\ndef add(a, b):\n a, b = _numpy_align(a, b)\n return Add([a, b])\n\ndef multiply(a, b):\n a, b = _numpy_align(a, b)\n return Multiply([a, b])\n\ndef sum(arg, axis=None):\n '''Sum array elements over a given axis.'''\n\n if axis is None:\n return Sum(arg)\n axes = (axis,) if numeric.isint(axis) else axis\n summed = Transpose.to_end(arg, *axes)\n for i in range(len(axes)):\n summed = Sum(summed)\n return summed\n\ndef product(arg, axis):\n return Product(Transpose.to_end(arg, axis))\n\ndef power(arg, n):\n arg, n = _numpy_align(arg, n)\n return Power(arg, n)\n\ndef dot(a, b, axes):\n '''\n Contract ``a`` and ``b`` along ``axes``.\n '''\n\n return multiply(a, b).sum(axes)\n\ndef transpose(arg, trans=None):\n arg = asarray(arg)\n if trans is None:\n normtrans = range(arg.ndim-1, -1, -1)\n else:\n normtrans = _normdims(arg.ndim, trans)\n assert sorted(normtrans) == list(range(arg.ndim))\n return Transpose(arg, normtrans)\n\ndef swapaxes(arg, axis1, axis2):\n arg = asarray(arg)\n trans = numpy.arange(arg.ndim)\n trans[axis1], trans[axis2] = trans[axis2], trans[axis1]\n return transpose(arg, trans)\n\ndef align(arg, where, shape):\n '''Align array to target shape.\n\n The align operation can be considered the opposite of transpose: instead of\n specifying for each axis of the return value the original position in the\n argument, align specifies for each axis of the argument the new position in\n the return value. In addition, the return value may be of higher dimension,\n with new axes being inserted according to the ``shape`` argument.\n\n Args\n ----\n arg : :class:`Array`\n Original array.\n where : :class:`tuple` of integers\n New axis positions.\n shape : :class:`tuple`\n Shape of the aligned array.\n\n Returns\n -------\n :class:`Array`\n The aligned array.\n '''\n\n where = list(where)\n for i, length in enumerate(shape):\n if i not in where:\n arg = InsertAxis(arg, length)\n where.append(i)\n if where != list(range(len(shape))):\n arg = Transpose(arg, numpy.argsort(where))\n assert equalshape(arg.shape, shape)\n return arg\n\ndef unalign(*args):\n '''Remove (joint) inserted axes.\n\n Given one or more equally shaped array arguments, return the shortest common\n axis vector along with function arguments such that the original arrays can\n be recovered by :func:`align`.\n '''\n\n assert args\n if len(args) == 1:\n return args[0]._unaligned\n if any(arg.ndim != args[0].ndim for arg in args[1:]):\n raise ValueError('varying dimensions in unalign')\n nonins = functools.reduce(operator.or_, [set(arg._unaligned[1]) for arg in args])\n if len(nonins) == args[0].ndim:\n return (*args, tuple(range(args[0].ndim)))\n ret = []\n for arg in args:\n unaligned, where = arg._unaligned\n for i in sorted(nonins - set(where)):\n unaligned = InsertAxis(unaligned, args[0].shape[i])\n where += i,\n if not ret: # first argument\n commonwhere = where\n elif where != commonwhere:\n unaligned = Transpose(unaligned, map(where.index, commonwhere))\n ret.append(unaligned)\n return (*ret, commonwhere)\n\n# ARRAYS\n\n_ArrayMeta = type(Evaluable)\n\nif debug_flags.sparse:\n def _chunked_assparse_checker(orig):\n assert isinstance(orig, property)\n @property\n def _assparse(self):\n chunks = orig.fget(self)\n assert isinstance(chunks, tuple)\n assert all(isinstance(chunk, tuple) for chunk in chunks)\n assert all(all(isinstance(item, Array) for item in chunk) for chunk in chunks)\n if self.ndim:\n for *indices, values in chunks:\n assert len(indices) == self.ndim\n assert all(idx.dtype == int for idx in indices)\n assert all(equalshape(idx.shape, values.shape) for idx in indices)\n elif chunks:\n assert len(chunks) == 1\n chunk, = chunks\n assert len(chunk) == 1\n values, = chunk\n assert values.shape == ()\n return chunks\n return _assparse\n\n class _ArrayMeta(_ArrayMeta):\n def __new__(mcls, name, bases, namespace):\n if '_assparse' in namespace:\n namespace['_assparse'] = _chunked_assparse_checker(namespace['_assparse'])\n return super().__new__(mcls, name, bases, namespace)\n\nif debug_flags.evalf:\n class _evalf_checker:\n def __init__(self, orig):\n self.evalf_obj = getattr(orig, '__get__', lambda *args: orig)\n def __get__(self, instance, owner):\n evalf = self.evalf_obj(instance, owner)\n @functools.wraps(evalf)\n def evalf_with_check(*args, **kwargs):\n res = evalf(*args, **kwargs)\n assert not hasattr(instance, 'dtype') or asdtype(res.dtype) == instance.dtype, ((instance.dtype, res.dtype), instance, res)\n assert not hasattr(instance, 'ndim') or res.ndim == instance.ndim\n assert not hasattr(instance, 'shape') or all(m == n for m, n in zip(res.shape, instance.shape) if isinstance(n, int)), 'shape mismatch'\n return res\n return evalf_with_check\n\n class _ArrayMeta(_ArrayMeta):\n def __new__(mcls, name, bases, namespace):\n if 'evalf' in namespace:\n namespace['evalf'] = _evalf_checker(namespace['evalf'])\n return super().__new__(mcls, name, bases, namespace)\n\nclass AsEvaluableArray(Protocol):\n 'Protocol for conversion into an :class:`Array`.'\n\n @property\n def as_evaluable_array(self) -> 'Array':\n 'Lower this object to a :class:`nutils.evaluable.Array`.'\n\nclass Array(Evaluable, metaclass=_ArrayMeta):\n '''\n Base class for array valued functions.\n\n Attributes\n ----------\n shape : :class:`tuple` of :class:`int`\\\\s\n The shape of this array function.\n ndim : :class:`int`\n The number of dimensions of this array array function. Equal to\n ``len(shape)``.\n dtype : :class:`int`, :class:`float`\n The dtype of the array elements.\n '''\n\n __slots__ = 'shape', 'dtype', '__index'\n __cache__ = 'assparse', '_assparse', '_intbounds'\n\n __array_priority__ = 1. # http://stackoverflow.com/questions/7042496/numpy-coercion-problem-for-left-sided-binary-operator/7057530#7057530\n\n @types.apply_annotations\n def __init__(self, args:types.tuple[strictevaluable], shape:asshape, dtype:asdtype):\n self.shape = shape\n self.dtype = dtype\n super().__init__(args=args)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n def __getitem__(self, item):\n if not isinstance(item, tuple):\n item = item,\n if ... in item:\n iell = item.index(...)\n if ... in item[iell+1:]:\n raise IndexError('an index can have only a single ellipsis')\n # replace ellipsis by the appropriate number of slice(None)\n item = item[:iell] + (slice(None),)*(self.ndim-len(item)+1) + item[iell+1:]\n if len(item) > self.ndim:\n raise IndexError('too many indices for array')\n array = self\n for axis, it in reversed(tuple(enumerate(item))):\n array = get(array, axis, item=it) if numeric.isint(it) \\\n else _takeslice(array, it, axis) if isinstance(it, slice) \\\n else take(array, it, axis)\n return array\n\n def __bool__(self):\n return True\n\n def __len__(self):\n if self.ndim == 0:\n raise TypeError('len() of unsized object')\n return self.shape[0]\n\n def __index__(self):\n try:\n index = self.__index\n except AttributeError:\n if self.ndim or self.dtype not in (int, bool) or not self.isconstant:\n raise TypeError('cannot convert {!r} to int'.format(self))\n index = self.__index = int(self.simplified.eval())\n return index\n\n size = property(lambda self: util.product(self.shape) if self.ndim else 1)\n T = property(lambda self: transpose(self))\n\n __add__ = __radd__ = add\n __sub__ = lambda self, other: subtract(self, other)\n __rsub__ = lambda self, other: subtract(other, self)\n __mul__ = __rmul__ = multiply\n __truediv__ = lambda self, other: divide(self, other)\n __rtruediv__ = lambda self, other: divide(other, self)\n __pos__ = lambda self: self\n __neg__ = lambda self: negative(self)\n __pow__ = power\n __abs__ = lambda self: abs(self)\n __mod__ = lambda self, other: mod(self, other)\n __int__ = __index__\n __str__ = __repr__ = lambda self: '{}.{}<{}>'.format(type(self).__module__, type(self).__name__, self._shape_str(form=str))\n _shape_str = lambda self, form: '{}:{}'.format(self.dtype.__name__[0] if hasattr(self, 'dtype') else '?', ','.join(str(int(length)) if length.isconstant else '?' for length in self.shape) if hasattr(self, 'shape') else '?')\n\n sum = sum\n prod = product\n dot = dot\n swapaxes = swapaxes\n transpose = transpose\n choose = lambda self, choices: Choose(self, choices)\n\n @property\n def assparse(self):\n 'Convert to a :class:`SparseArray`.'\n\n return SparseArray(self.simplified._assparse, self.shape, self.dtype)\n\n @property\n def _assparse(self):\n # Convert to a sequence of sparse COO arrays. The returned data is a tuple\n # of `(*indices, values)` tuples, where `values` is an `Array` with the\n # same dtype as `self`, but this is not enforced yet, and each index in\n # `indices` is an `Array` with dtype `int` and the exact same shape as\n # `values`. The length of `indices` equals `self.ndim`. In addition, if\n # `self` is 0d the length of `self._assparse` is at most one and the\n # `values` array must be 0d as well.\n #\n # The sparse data can be reassembled after evaluation by\n #\n # dense = numpy.zeros(self.shape)\n # for I0,...,Ik,V in self._assparse:\n # for i0,...,ik,v in zip(I0.eval().ravel(),...,Ik.eval().ravel(),V.eval().ravel()):\n # dense[i0,...,ik] = v\n\n indices = [prependaxes(appendaxes(Range(length), self.shape[i+1:]), self.shape[:i]) for i, length in enumerate(self.shape)]\n return (*indices, self),\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n args = tuple(arg._node(cache, subgraph, times) for arg in self._Evaluable__args)\n bounds = '[{},{}]'.format(*self._intbounds) if self.dtype == int else None\n label = '\\n'.join(filter(None, (type(self).__name__, self._node_details, self._shape_str(form=repr), bounds)))\n cache[self] = node = RegularNode(label, args, {}, (type(self).__name__, times[self]), subgraph)\n return node\n\n # simplifications\n _multiply = lambda self, other: None\n _transpose = lambda self, axes: None\n _insertaxis = lambda self, axis, length: None\n _power = lambda self, n: None\n _add = lambda self, other: None\n _sum = lambda self, axis: None\n _take = lambda self, index, axis: None\n _rtake = lambda self, index, axis: None\n _determinant = lambda self, axis1, axis2: None\n _inverse = lambda self, axis1, axis2: None\n _takediag = lambda self, axis1, axis2: None\n _diagonalize = lambda self, axis: None\n _product = lambda self: None\n _sign = lambda self: None\n _eig = lambda self, symmetric: None\n _inflate = lambda self, dofmap, length, axis: None\n _rinflate = lambda self, func, length, axis: None\n _unravel = lambda self, axis, shape: None\n _ravel = lambda self, axis: None\n _loopsum = lambda self, loop_index: None # NOTE: type of `loop_index` is `_LoopIndex`\n\n @property\n def _unaligned(self):\n return self, tuple(range(self.ndim))\n\n _diagonals = ()\n _inflations = ()\n\n def _derivative(self, var, seen):\n if self.dtype in (bool, int) or var not in self.dependencies:\n return Zeros(self.shape + var.shape, dtype=self.dtype)\n raise NotImplementedError('derivative not defined for {}'.format(self.__class__.__name__))\n\n @property\n def as_evaluable_array(self):\n 'return self'\n\n return self\n\n @property\n def _intbounds(self):\n # inclusive lower and upper bounds\n if self.ndim == 0 and self.dtype == int and self.isconstant:\n value = self.__index__()\n return value, value\n else:\n lower, upper = self._intbounds_impl()\n assert isinstance(lower, int) or lower == float('-inf') or lower == float('inf')\n assert isinstance(upper, int) or upper == float('-inf') or upper == float('inf')\n assert lower <= upper\n return lower, upper\n\n def _intbounds_impl(self):\n return float('-inf'), float('inf')\n\nclass NPoints(Array):\n 'The length of the points axis.'\n\n __slots__ = ()\n\n def __init__(self):\n super().__init__(args=[EVALARGS], shape=(), dtype=int)\n\n def evalf(self, evalargs):\n points = evalargs['_points'].coords\n return types.frozenarray(points.shape[0])\n\n def _intbounds_impl(self):\n return 0, float('inf')\n\nclass Points(Array):\n\n __slots__ = ()\n\n def __init__(self, npoints, ndim):\n super().__init__(args=[EVALARGS], shape=(npoints, ndim), dtype=float)\n\n def evalf(self, evalargs):\n return evalargs['_points'].coords\n\nclass Weights(Array):\n\n __slots__ = ()\n\n def __init__(self, npoints):\n super().__init__(args=[EVALARGS], shape=(npoints,), dtype=float)\n\n def evalf(self, evalargs):\n weights = evalargs['_points'].weights\n assert numeric.isarray(weights) and weights.ndim == 1\n return weights\n\nclass Normal(Array):\n 'normal'\n\n __slots__ = 'lgrad',\n\n @types.apply_annotations\n def __init__(self, lgrad:asarray):\n assert lgrad.ndim >= 2 and equalindex(lgrad.shape[-2], lgrad.shape[-1])\n self.lgrad = lgrad\n super().__init__(args=[lgrad], shape=lgrad.shape[:-1], dtype=float)\n\n def _simplified(self):\n if equalindex(self.shape[-1], 1):\n return Sign(Take(self.lgrad, 0))\n unaligned, where = unalign(self.lgrad)\n for axis in self.ndim - 1, self.ndim:\n if axis not in where:\n unaligned = InsertAxis(unaligned, self.lgrad.shape[axis])\n where += axis,\n if len(where) < self.ndim + 1:\n if where[-2:] != (self.ndim - 1, self.ndim):\n unaligned = Transpose(unaligned, numpy.argsort(where))\n where = tuple(sorted(where))\n return align(Normal(unaligned), where[:-1], self.shape)\n\n def evalf(self, lgrad):\n n = lgrad[...,-1]\n # orthonormalize n to G\n G = lgrad[...,:-1]\n GG = numpy.einsum('...ki,...kj->...ij', G, G)\n v1 = numpy.einsum('...ij,...i->...j', G, n)\n v2 = numpy.linalg.solve(GG, v1)\n v3 = numpy.einsum('...ij,...j->...i', G, v2)\n return numeric.normalize(n - v3)\n\n def _derivative(self, var, seen):\n if equalindex(self.shape[-1], 1):\n return zeros(self.shape + var.shape)\n G = self.lgrad[...,:-1]\n invGG = inverse(einsum('Aki,Akj->Aij', G, G))\n return -einsum('Ail,Alj,Ak,AkjB->AiB', G, invGG, self, derivative(G, var, seen))\n\nclass Constant(Array):\n\n __slots__ = 'value',\n __cache__ = '_isunit'\n\n @types.apply_annotations\n def __init__(self, value:types.arraydata):\n self.value = numpy.asarray(value)\n super().__init__(args=[], shape=value.shape, dtype=value.dtype)\n\n def _simplified(self):\n if not self.value.any():\n return zeros_like(self)\n for i, sh in enumerate(self.shape):\n # Find and replace invariant axes with InsertAxis. Since `self.value.any()`\n # is False for arrays with a zero-length axis, we can arrive here only if all\n # axes have at least length one, hence the following statement should work.\n first, *others = numpy.rollaxis(self.value, i)\n if all(numpy.equal(first, other).all() for other in others):\n return insertaxis(Constant(first), i, sh)\n\n def evalf(self):\n return self.value\n\n def _node(self, cache, subgraph, times):\n if self.ndim:\n return super()._node(cache, subgraph, times)\n elif self in cache:\n return cache[self]\n else:\n label = '{}'.format(self.value[()])\n if len(label) > 9:\n label = '~{:.2e}'.format(self.value[()])\n cache[self] = node = DuplicatedLeafNode(label, (type(self).__name__, times[self]))\n return node\n\n @property\n def _isunit(self):\n return numpy.equal(self.value, 1).all()\n\n def _transpose(self, axes):\n return Constant(self.value.transpose(axes))\n\n def _sum(self, axis):\n return Constant(numpy.sum(self.value, axis))\n\n def _add(self, other):\n if isinstance(other, Constant):\n return Constant(numpy.add(self.value, other.value))\n\n def _inverse(self, axis1, axis2):\n value = numpy.transpose(self.value, tuple(i for i in range(self.ndim) if i != axis1 and i != axis2) + (axis1, axis2))\n return Constant(numpy.linalg.inv(value))\n\n def _product(self):\n return Constant(self.value.prod(-1))\n\n def _multiply(self, other):\n if self._isunit:\n return other\n if isinstance(other, Constant):\n return Constant(numpy.multiply(self.value, other.value))\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n return Constant(numpy.einsum('...kk->...k', numpy.transpose(self.value,\n list(range(axis1)) + list(range(axis1+1, axis2)) + list(range(axis2+1, self.ndim)) + [axis1, axis2])))\n\n def _take(self, index, axis):\n if index.isconstant:\n index_ = index.eval()\n return Constant(self.value.take(index_, axis))\n\n def _power(self, n):\n if isinstance(n, Constant):\n return Constant(numpy.power(self.value, n.value))\n\n def _eig(self, symmetric):\n eigval, eigvec = (numpy.linalg.eigh if symmetric else numpy.linalg.eig)(self.value)\n return Tuple((Constant(eigval), Constant(eigvec)))\n\n def _sign(self):\n return Constant(numpy.sign(self.value))\n\n def _unravel(self, axis, shape):\n shape = self.value.shape[:axis] + shape + self.value.shape[axis+1:]\n return Constant(self.value.reshape(shape))\n\n def _determinant(self, axis1, axis2):\n value = numpy.transpose(self.value, tuple(i for i in range(self.ndim) if i != axis1 and i != axis2) + (axis1, axis2))\n return Constant(numpy.linalg.det(value))\n\n def _intbounds_impl(self):\n if self.dtype == int and self.value.size:\n return int(self.value.min()), int(self.value.max())\n else:\n return super()._intbounds_impl()\n\nclass InsertAxis(Array):\n\n __slots__ = 'func', 'length'\n __cache__ = '_unaligned', '_inflations'\n\n @types.apply_annotations\n def __init__(self, func:asarray, length:asindex):\n self.func = func\n self.length = length\n super().__init__(args=[func, length], shape=(*func.shape, length), dtype=func.dtype)\n\n @property\n def _diagonals(self):\n return self.func._diagonals\n\n @property\n def _inflations(self):\n return tuple((axis, types.frozendict((dofmap, InsertAxis(func, self.length)) for dofmap, func in parts.items())) for axis, parts in self.func._inflations)\n\n @property\n def _unaligned(self):\n return self.func._unaligned\n\n def _simplified(self):\n return self.func._insertaxis(self.ndim-1, self.length)\n\n def evalf(self, func, length):\n if length == 1:\n return func[...,numpy.newaxis]\n try:\n return numpy.ndarray(buffer=func, dtype=func.dtype, shape=(*func.shape, length), strides=(*func.strides, 0))\n except ValueError: # non-contiguous data\n return numpy.repeat(func[...,numpy.newaxis], length, -1)\n\n def _derivative(self, var, seen):\n return insertaxis(derivative(self.func, var, seen), self.ndim-1, self.length)\n\n def _sum(self, i):\n if i == self.ndim - 1:\n return self.func * self.length\n return InsertAxis(sum(self.func, i), self.length)\n\n def _product(self):\n return self.func**self.length\n\n def _power(self, n):\n unaligned1, unaligned2, where = unalign(self, n)\n if len(where) != self.ndim:\n return align(unaligned1 ** unaligned2, where, self.shape)\n\n def _add(self, other):\n unaligned1, unaligned2, where = unalign(self, other)\n if len(where) != self.ndim:\n return align(unaligned1 + unaligned2, where, self.shape)\n\n def _diagonalize(self, axis):\n if axis < self.ndim - 1:\n return insertaxis(diagonalize(self.func, axis, self.ndim - 1), self.ndim - 1, self.length)\n\n def _inflate(self, dofmap, length, axis):\n if axis + dofmap.ndim < self.ndim:\n return InsertAxis(_inflate(self.func, dofmap, length, axis), self.length)\n elif axis == self.ndim:\n return insertaxis(Inflate(self.func, dofmap, length), self.ndim - 1, self.length)\n\n def _insertaxis(self, axis, length):\n if axis == self.ndim - 1:\n return InsertAxis(InsertAxis(self.func, length), self.length)\n\n def _take(self, index, axis):\n if axis == self.ndim - 1:\n return appendaxes(self.func, index.shape)\n return InsertAxis(_take(self.func, index, axis), self.length)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 == self.ndim-1:\n return Transpose.to_end(self.func, axis1)\n else:\n return insertaxis(_takediag(self.func, axis1, axis2), self.ndim-3, self.length)\n\n def _unravel(self, axis, shape):\n if axis == self.ndim - 1:\n return InsertAxis(InsertAxis(self.func, shape[0]), shape[1])\n else:\n return InsertAxis(unravel(self.func, axis, shape), self.length)\n\n def _sign(self):\n return InsertAxis(Sign(self.func), self.length)\n\n def _determinant(self, axis1, axis2):\n if axis1 < self.ndim-1 and axis2 < self.ndim-1:\n return InsertAxis(determinant(self.func, (axis1, axis2)), self.length)\n\n def _inverse(self, axis1, axis2):\n if axis1 < self.ndim-1 and axis2 < self.ndim-1:\n return InsertAxis(inverse(self.func, (axis1, axis2)), self.length)\n\n def _loopsum(self, index):\n return InsertAxis(loop_sum(self.func, index), self.length)\n\n @property\n def _assparse(self):\n return tuple((*(InsertAxis(idx, self.length) for idx in indices), prependaxes(Range(self.length), values.shape), InsertAxis(values, self.length)) for *indices, values in self.func._assparse)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Transpose(Array):\n\n __slots__ = 'func', 'axes'\n __cache__ = '_invaxes', '_unaligned', '_diagonals', '_inflations'\n\n @classmethod\n @types.apply_annotations\n def _end(cls, array:asarray, axes, invert=False):\n axes = [numeric.normdim(array.ndim, axis) for axis in axes]\n if all(a == b for a, b in enumerate(axes, start=array.ndim-len(axes))):\n return array\n trans = [i for i in range(array.ndim) if i not in axes]\n trans.extend(axes)\n if len(trans) != array.ndim:\n raise Exception('duplicate axes')\n return cls(array, numpy.argsort(trans) if invert else trans)\n\n @classmethod\n def from_end(cls, array, *axes):\n return cls._end(array, axes, invert=True)\n\n @classmethod\n def to_end(cls, array, *axes):\n return cls._end(array, axes, invert=False)\n\n @types.apply_annotations\n def __init__(self, func:asarray, axes:types.tuple[types.strictint]):\n assert sorted(axes) == list(range(func.ndim))\n self.func = func\n self.axes = axes\n super().__init__(args=[func], shape=[func.shape[n] for n in axes], dtype=func.dtype)\n\n @property\n def _diagonals(self):\n return tuple(frozenset(self._invaxes[i] for i in axes) for axes in self.func._diagonals)\n\n @property\n def _inflations(self):\n return tuple((self._invaxes[axis], types.frozendict((dofmap, Transpose(func, self._axes_for(dofmap.ndim, self._invaxes[axis]))) for dofmap, func in parts.items())) for axis, parts in self.func._inflations)\n\n @property\n def _unaligned(self):\n unaligned, where = unalign(self.func)\n return unaligned, tuple(self._invaxes[i] for i in where)\n\n @property\n def _invaxes(self):\n return tuple(numpy.argsort(self.axes))\n\n def _simplified(self):\n if self.axes == tuple(range(self.ndim)):\n return self.func\n return self.func._transpose(self.axes)\n\n def evalf(self, arr):\n return arr.transpose(self.axes)\n\n @property\n def _node_details(self):\n return ','.join(map(str, self.axes))\n\n def _transpose(self, axes):\n if axes == self._invaxes:\n # NOTE: While we could leave this particular simplification to be dealt\n # with by Transpose, the benefit of handling it directly is that _add and\n # _multiply can rely on _transpose for the right hand side without having\n # to separately account for the trivial case.\n return self.func\n newaxes = [self.axes[i] for i in axes]\n return Transpose(self.func, newaxes)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n orig1, orig2 = sorted(self.axes[axis] for axis in [axis1, axis2])\n if orig1 == self.ndim-2:\n return Transpose(TakeDiag(self.func), (*self.axes[:axis1], *self.axes[axis1+1:axis2], *self.axes[axis2+1:], self.ndim-2))\n trytakediag = self.func._takediag(orig1, orig2)\n if trytakediag is not None:\n return Transpose(trytakediag, [ax-(ax>orig1)-(ax>orig2) for ax in self.axes[:axis1] + self.axes[axis1+1:axis2] + self.axes[axis2+1:]] + [self.ndim-2])\n\n def _sum(self, i):\n axis = self.axes[i]\n trysum = self.func._sum(axis)\n if trysum is not None:\n axes = [ax-(ax>axis) for ax in self.axes if ax != axis]\n return Transpose(trysum, axes)\n if axis == self.ndim - 1:\n return Transpose(Sum(self.func), self._axes_for(0, i))\n\n def _derivative(self, var, seen):\n return transpose(derivative(self.func, var, seen), self.axes+tuple(range(self.ndim, self.ndim+var.ndim)))\n\n def _multiply(self, other):\n other_trans = other._transpose(self._invaxes)\n if other_trans is not None and not isinstance(other_trans, Transpose):\n # The second clause is to avoid infinite recursions; see\n # tests.test_evaluable.simplify.test_multiply_transpose.\n return Transpose(Multiply([self.func, other_trans]), self.axes)\n trymultiply = self.func._multiply(Transpose(other, self._invaxes))\n if trymultiply is not None:\n return Transpose(trymultiply, self.axes)\n\n def _add(self, other):\n other_trans = other._transpose(self._invaxes)\n if other_trans is not None and not isinstance(other_trans, Transpose):\n # The second clause is to avoid infinite recursions\n return Transpose(self.func + other_trans, self.axes)\n tryadd = self.func._add(Transpose(other, self._invaxes))\n if tryadd is not None:\n return Transpose(tryadd, self.axes)\n\n def _take(self, indices, axis):\n trytake = self.func._take(indices, self.axes[axis])\n if trytake is not None:\n return Transpose(trytake, self._axes_for(indices.ndim, axis))\n if self.axes[axis] == self.ndim - 1:\n return Transpose(Take(self.func, indices), self._axes_for(indices.ndim, axis))\n\n def _axes_for(self, ndim, axis):\n funcaxis = self.axes[axis]\n axes = [ax+(ax>funcaxis)*(ndim-1) for ax in self.axes if ax != funcaxis]\n axes[axis:axis] = range(funcaxis, funcaxis + ndim)\n return axes\n\n def _power(self, n):\n n_trans = Transpose(n, self._invaxes)\n return Transpose(Power(self.func, n_trans), self.axes)\n\n def _sign(self):\n return Transpose(Sign(self.func), self.axes)\n\n def _unravel(self, axis, shape):\n orig_axis = self.axes[axis]\n tryunravel = self.func._unravel(orig_axis, shape)\n if tryunravel is not None:\n axes = [ax + (ax>orig_axis) for ax in self.axes]\n axes.insert(axis+1, orig_axis+1)\n return Transpose(tryunravel, axes)\n\n def _product(self):\n if self.axes[-1] == self.ndim-1:\n return Transpose(Product(self.func), self.axes[:-1])\n\n def _determinant(self, axis1, axis2):\n orig1, orig2 = self.axes[axis1], self.axes[axis2]\n trydet = self.func._determinant(orig1, orig2)\n if trydet:\n axes = [ax-(ax>orig1)-(ax>orig2) for ax in self.axes if ax != orig1 and ax != orig2]\n return Transpose(trydet, axes)\n\n def _inverse(self, axis1, axis2):\n tryinv = self.func._inverse(self.axes[axis1], self.axes[axis2])\n if tryinv:\n return Transpose(tryinv, self.axes)\n\n def _ravel(self, axis):\n if self.axes[axis] == self.ndim-2 and self.axes[axis+1] == self.ndim-1:\n return Transpose(Ravel(self.func), self.axes[:-1])\n\n def _inflate(self, dofmap, length, axis):\n i = self.axes[axis] if dofmap.ndim else self.func.ndim\n if self.axes[axis:axis+dofmap.ndim] == tuple(range(i,i+dofmap.ndim)):\n tryinflate = self.func._inflate(dofmap, length, i)\n if tryinflate is not None:\n axes = [ax-(ax>i)*(dofmap.ndim-1) for ax in self.axes]\n axes[axis:axis+dofmap.ndim] = i,\n return Transpose(tryinflate, axes)\n\n def _diagonalize(self, axis):\n trydiagonalize = self.func._diagonalize(self.axes[axis])\n if trydiagonalize is not None:\n return Transpose(trydiagonalize, self.axes + (self.ndim,))\n\n def _insertaxis(self, axis, length):\n return Transpose(InsertAxis(self.func, length), self.axes[:axis] + (self.ndim,) + self.axes[axis:])\n\n def _loopsum(self, index):\n return Transpose(loop_sum(self.func, index), self.axes)\n\n @property\n def _assparse(self):\n return tuple((*(indices[i] for i in self.axes), values) for *indices, values in self.func._assparse)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Product(Array):\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert func.dtype != bool, 'Product({})'.format(func)\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-1], dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.func.shape[-1], 1):\n return get(self.func, self.ndim, 0)\n return self.func._product()\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+1\n return numpy.product(arr, axis=-1)\n\n def _derivative(self, var, seen):\n grad = derivative(self.func, var, seen)\n funcs = Product(insertaxis(self.func, -2, self.func.shape[-1]) + Diagonalize(1 - self.func)) # replace diagonal entries by 1\n return einsum('Ai,AiB->AB', funcs, grad)\n\n def _take(self, indices, axis):\n return Product(_take(self.func, indices, axis))\n\n def _takediag(self, axis1, axis2):\n return product(_takediag(self.func, axis1, axis2), self.ndim-2)\n\nclass Inverse(Array):\n '''\n Matrix inverse of ``func`` over the last two axes. All other axes are\n treated element-wise.\n '''\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert func.ndim >= 2 and equalindex(func.shape[-1], func.shape[-2])\n self.func = func\n super().__init__(args=[func], shape=func.shape, dtype=complex if func.dtype == complex else float)\n\n def _simplified(self):\n result = self.func._inverse(self.ndim-2, self.ndim-1)\n if result is not None:\n return result\n if equalindex(self.func.shape[-1], 1):\n return reciprocal(self.func)\n\n def evalf(self, arr):\n return numeric.inv(arr)\n\n def _derivative(self, var, seen):\n return -einsum('Aij,AjkB,Akl->AilB', self, derivative(self.func, var, seen), self)\n\n def _eig(self, symmetric):\n eigval, eigvec = Eig(self.func, symmetric)\n return Tuple((reciprocal(eigval), eigvec))\n\n def _determinant(self, axis1, axis2):\n if sorted([axis1, axis2]) == [self.ndim-2, self.ndim-1]:\n return reciprocal(Determinant(self.func))\n\n def _take(self, indices, axis):\n if axis < self.ndim - 2:\n return Inverse(_take(self.func, indices, axis))\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 < self.ndim-2:\n return inverse(_takediag(self.func, axis1, axis2), (self.ndim-4, self.ndim-3))\n\n def _unravel(self, axis, shape):\n if axis < self.ndim-2:\n return Inverse(unravel(self.func, axis, shape))\n\nclass Interpolate(Array):\n 'interpolate uniformly spaced data; stepwise for now'\n\n __slots__ = 'xp', 'fp', 'left', 'right'\n\n @types.apply_annotations\n def __init__(self, x:asarray, xp:types.arraydata, fp:types.arraydata, left:types.strictfloat=None, right:types.strictfloat=None):\n xp = numpy.asarray(xp)\n fp = numpy.asarray(fp)\n assert xp.ndim == fp.ndim == 1\n if not numpy.greater(numpy.diff(xp), 0).all():\n warnings.warn('supplied x-values are non-increasing')\n assert x.ndim == 0\n self.xp = xp\n self.fp = fp\n self.left = left\n self.right = right\n super().__init__(args=[x], shape=(), dtype=float)\n\n def evalf(self, x):\n return numpy.interp(x, self.xp, self.fp, self.left, self.right)\n\nclass Determinant(Array):\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert isarray(func) and func.ndim >= 2 and equalindex(func.shape[-1], func.shape[-2])\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-2], dtype=complex if func.dtype == complex else float)\n\n def _simplified(self):\n result = self.func._determinant(self.ndim, self.ndim+1)\n if result is not None:\n return result\n if equalindex(self.func.shape[-1], 1):\n return Take(Take(self.func, zeros((), int)), zeros((), int))\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+2\n return numpy.linalg.det(arr)\n\n def _derivative(self, var, seen):\n return einsum('A,Aji,AijB->AB', self, inverse(self.func), derivative(self.func, var, seen))\n\n def _take(self, index, axis):\n return Determinant(_take(self.func, index, axis))\n\n def _takediag(self, axis1, axis2):\n return determinant(_takediag(self.func, axis1, axis2), (self.ndim-2, self.ndim-1))\n\nclass Multiply(Array):\n\n __slots__ = 'funcs',\n\n @types.apply_annotations\n def __init__(self, funcs:types.frozenmultiset[asarray]):\n self.funcs = funcs\n func1, func2 = funcs\n assert equalshape(func1.shape, func2.shape) and func1.dtype == func2.dtype != bool, 'Multiply({}, {})'.format(func1, func2)\n super().__init__(args=self.funcs, shape=func1.shape, dtype=func1.dtype)\n\n def _simplified(self):\n func1, func2 = self.funcs\n if isuniform(func1, 1):\n return func2\n if isuniform(func2, 1):\n return func1\n unaligned1, unaligned2, where = unalign(func1, func2)\n if len(where) != self.ndim:\n return align(unaligned1 * unaligned2, where, self.shape)\n for axis1, axis2, *other in map(sorted, func1._diagonals or func2._diagonals):\n return diagonalize(Multiply(takediag(func, axis1, axis2) for func in self.funcs), axis1, axis2)\n for i, parts in func1._inflations:\n return util.sum(_inflate(f * _take(func2, dofmap, i), dofmap, self.shape[i], i) for dofmap, f in parts.items())\n for i, parts in func2._inflations:\n return util.sum(_inflate(_take(func1, dofmap, i) * f, dofmap, self.shape[i], i) for dofmap, f in parts.items())\n return func1._multiply(func2) or func2._multiply(func1)\n\n def _optimized_for_numpy(self):\n func1, func2 = self.funcs\n if isuniform(func1, -1) and func2.dtype != bool:\n return Negative(func2)\n if isuniform(func2, -1) and func1.dtype != bool:\n return Negative(func1)\n if func1 == sign(func2):\n return Absolute(func2)\n if func2 == sign(func1):\n return Absolute(func1)\n if not self.ndim:\n return\n unaligned1, where1 = unalign(func1)\n unaligned2, where2 = unalign(func2)\n return Einsum((unaligned1, unaligned2), (where1, where2), tuple(range(self.ndim)))\n\n def evalf(self, arr1, arr2):\n return arr1 * arr2\n\n def _sum(self, axis):\n func1, func2 = self.funcs\n unaligned, where = unalign(func1)\n if axis not in where:\n return align(unaligned, [i-(i>axis) for i in where], self.shape[:axis]+self.shape[axis+1:]) * sum(func2, axis)\n unaligned, where = unalign(func2)\n if axis not in where:\n return sum(func1, axis) * align(unaligned, [i-(i>axis) for i in where], self.shape[:axis]+self.shape[axis+1:])\n\n def _add(self, other):\n func1, func2 = self.funcs\n if isinstance(other, Multiply):\n for common in self.funcs & other.funcs:\n return common * Add(self.funcs + other.funcs - [common, common])\n\n def _determinant(self, axis1, axis2):\n func1, func2 = self.funcs\n axis1, axis2 = sorted([axis1, axis2])\n if equalindex(self.shape[axis1], 1) and equalindex(self.shape[axis2], 1):\n return Multiply([determinant(func1, (axis1, axis2)), determinant(func2, (axis1, axis2))])\n unaligned1, where1 = unalign(func1)\n if {axis1, axis2}.isdisjoint(where1):\n d2 = determinant(func2, (axis1, axis2))\n d1 = align(unaligned1**self.shape[axis1], [i-(i>axis1)-(i>axis2) for i in where1 if i not in (axis1, axis2)], d2.shape)\n return d1 * d2\n unaligned2, where2 = unalign(func2)\n if {axis1, axis2}.isdisjoint(where2):\n d1 = determinant(func1, (axis1, axis2))\n d2 = align(unaligned2**self.shape[axis1], [i-(i>axis1)-(i>axis2) for i in where2 if i not in (axis1, axis2)], d1.shape)\n return d1 * d2\n\n def _product(self):\n func1, func2 = self.funcs\n return Multiply([Product(func1), Product(func2)])\n\n def _multiply(self, other):\n func1, func2 = self.funcs\n func1_other = func1._multiply(other)\n if func1_other is not None:\n return Multiply([func1_other, func2])\n func2_other = func2._multiply(other)\n if func2_other is not None:\n return Multiply([func1, func2_other])\n # Reorder the multiplications such that the amount of flops is minimized.\n # The flops are counted based on the lower int bounds of the shape and loop\n # lengths, excluding common inserted axes and invariant loops of the inner\n # product.\n sizes = []\n unaligned = tuple(map(unalign, (func1, func2, other)))\n for (f1, w1), (f2, w2) in itertools.combinations(unaligned, 2):\n lengths = [self.shape[i] for i in set(w1) | set(w2)]\n lengths += [arg.length for arg in f1.arguments | f2.arguments if isinstance(arg, _LoopIndex)]\n sizes.append(util.product((max(1, length._intbounds[0]) for length in lengths), 1))\n min_size = min(sizes)\n if sizes[0] == min_size:\n return # status quo\n elif sizes[1] == min_size:\n return (func1 * other) * func2\n elif sizes[2] == min_size:\n return (func2 * other) * func1\n\n def _derivative(self, var, seen):\n func1, func2 = self.funcs\n return einsum('A,AB->AB', func1, derivative(func2, var, seen)) \\\n + einsum('A,AB->AB', func2, derivative(func1, var, seen))\n\n def _takediag(self, axis1, axis2):\n func1, func2 = self.funcs\n return Multiply([_takediag(func1, axis1, axis2), _takediag(func2, axis1, axis2)])\n\n def _take(self, index, axis):\n func1, func2 = self.funcs\n return Multiply([_take(func1, index, axis), _take(func2, index, axis)])\n\n def _sign(self):\n return Multiply([Sign(func) for func in self.funcs])\n\n def _unravel(self, axis, shape):\n return Multiply([unravel(func, axis, shape) for func in self.funcs])\n\n def _inverse(self, axis1, axis2):\n func1, func2 = self.funcs\n if set(unalign(func1)[1]).isdisjoint((axis1, axis2)):\n return divide(inverse(func2, (axis1, axis2)), func1)\n if set(unalign(func2)[1]).isdisjoint((axis1, axis2)):\n return divide(inverse(func1, (axis1, axis2)), func2)\n\n @property\n def _assparse(self):\n func1, func2 = self.funcs\n uninserted1, where1 = unalign(func1)\n uninserted2, where2 = unalign(func2)\n if not set(where1) & set(where2):\n sparse = []\n for *indices1, values1 in uninserted1._assparse:\n for *indices2, values2 in uninserted2._assparse:\n indices = [None] * self.ndim\n for i, j in enumerate(where1):\n indices[j] = appendaxes(indices1[i], values2.shape)\n for i, j in enumerate(where2):\n indices[j] = prependaxes(indices2[i], values1.shape)\n assert all(indices)\n values = appendaxes(values1, values2.shape) * prependaxes(values2, values1.shape)\n sparse.append((*indices, values))\n return tuple(sparse)\n return super()._assparse\n\n def _intbounds_impl(self):\n func1, func2 = self.funcs\n extrema = [b1 and b2 and b1 * b2 for b1 in func1._intbounds for b2 in func2._intbounds]\n return min(extrema), max(extrema)\n\nclass Add(Array):\n\n __slots__ = 'funcs',\n __cache__ = '_inflations'\n\n @types.apply_annotations\n def __init__(self, funcs:types.frozenmultiset[asarray]):\n self.funcs = funcs\n func1, func2 = funcs\n assert equalshape(func1.shape, func2.shape) and func1.dtype == func2.dtype != bool, 'Add({}, {})'.format(func1, func2)\n super().__init__(args=self.funcs, shape=func1.shape, dtype=func1.dtype)\n\n @property\n def _inflations(self):\n func1, func2 = self.funcs\n func2_inflations = dict(func2._inflations)\n inflations = []\n for axis, parts1 in func1._inflations:\n if axis not in func2_inflations:\n continue\n parts2 = func2_inflations[axis]\n dofmaps = set(parts1) | set(parts2)\n if (len(parts1) < len(dofmaps) and len(parts2) < len(dofmaps) # neither set is a subset of the other; total may be dense\n and self.shape[axis].isconstant and all(dofmap.isconstant for dofmap in dofmaps)):\n mask = numpy.zeros(int(self.shape[axis]), dtype=bool)\n for dofmap in dofmaps:\n mask[dofmap.eval()] = True\n if mask.all(): # axis adds up to dense\n continue\n inflations.append((axis, types.frozendict((dofmap, util.sum(parts[dofmap] for parts in (parts1, parts2) if dofmap in parts)) for dofmap in dofmaps)))\n return tuple(inflations)\n\n def _simplified(self):\n func1, func2 = self.funcs\n if func1 == func2:\n return multiply(func1, 2)\n for axes1 in func1._diagonals:\n for axes2 in func2._diagonals:\n if len(axes1 & axes2) >= 2:\n axes = sorted(axes1 & axes2)[:2]\n return diagonalize(takediag(func1, *axes) + takediag(func2, *axes), *axes)\n # NOTE: While it is tempting to use the _inflations attribute to push\n # additions through common inflations, doing so may result in infinite\n # recursion in case two or more axes are inflated. This mechanism is\n # illustrated in the following schematic, in which <I> and <J> represent\n # inflations along axis 1 and <K> and <L> inflations along axis 2:\n #\n # A B C D E F G H\n # <I> <J> <I> <J> <I> <J> <I> <J>\n # .-- \\+/ \\+/ \\+/ \\+/ <--.\n # | \\__<K>__/ \\__<L>__/ |\n # | \\_______+_______/ |\n # | |\n # | A E C G B F D H |\n # | <K> <L> <K> <L> <K> <L> <K> <L> |\n # '--> \\+/ \\+/ \\+/ \\+/ --'\n # \\__<I>__/ \\__<J>__/\n # \\_______+_______/\n #\n # We instead rely on Inflate._add to handle this situation.\n return func1._add(func2) or func2._add(func1)\n\n def evalf(self, arr1, arr2=None):\n return arr1 + arr2\n\n def _sum(self, axis):\n return Add([sum(func, axis) for func in self.funcs])\n\n def _derivative(self, var, seen):\n func1, func2 = self.funcs\n return derivative(func1, var, seen) + derivative(func2, var, seen)\n\n def _takediag(self, axis1, axis2):\n func1, func2 = self.funcs\n return Add([_takediag(func1, axis1, axis2), _takediag(func2, axis1, axis2)])\n\n def _take(self, index, axis):\n func1, func2 = self.funcs\n return Add([_take(func1, index, axis), _take(func2, index, axis)])\n\n def _add(self, other):\n func1, func2 = self.funcs\n func1_other = func1._add(other)\n if func1_other is not None:\n return Add([func1_other, func2])\n func2_other = func2._add(other)\n if func2_other is not None:\n return Add([func1, func2_other])\n\n def _unravel(self, axis, shape):\n return Add([unravel(func, axis, shape) for func in self.funcs])\n\n def _loopsum(self, index):\n if any(index not in func.arguments for func in self.funcs):\n return Add([loop_sum(func, index) for func in self.funcs])\n\n def _multiply(self, other):\n func1, func2 = self.funcs\n if (func1._inflations or func1._diagonals) and (func2._inflations or func2._diagonals):\n # NOTE: As this operation is the precise opposite of Multiply._add, there\n # appears to be a great risk of recursion. However, since both factors\n # are sparse, we can be certain that subsequent simpifications will\n # irreversibly process the new terms before reaching this point.\n return (func1 * other) + (func2 * other)\n\n @property\n def _assparse(self):\n func1, func2 = self.funcs\n return _gathersparsechunks(itertools.chain(func1._assparse, func2._assparse))\n\n def _intbounds_impl(self):\n func1, func2 = self.funcs\n lower1, upper1 = func1._intbounds\n lower2, upper2 = func2._intbounds\n return lower1 + lower2, upper1 + upper2\n\nclass Einsum(Array):\n\n __slots__ = 'args', 'out_idx', 'args_idx', '_einsumfmt', '_has_summed_axes'\n\n @types.apply_annotations\n def __init__(self, args:asarrays, args_idx:types.tuple[types.tuple[types.strictint]], out_idx:types.tuple[types.strictint]):\n if len(args_idx) != len(args):\n raise ValueError('Expected one list of indices for every argument, but got {} and {}, respectively.'.format(len(args_idx), len(args)))\n for iarg, (idx, arg) in enumerate(zip(args_idx, args), 1):\n if len(idx) != arg.ndim:\n raise ValueError('Expected one index for every axis of argument {}, but got {} and {}, respectively.'.format(iarg, len(idx), arg.ndim))\n dtype = args[0].dtype\n if dtype == bool or any(arg.dtype != dtype for arg in args[1:]):\n raise ValueError('Inconsistent or invalid dtypes.')\n if len(out_idx) != len(set(out_idx)):\n raise ValueError('Repeated output indices.')\n lengths = {}\n for idx, arg in zip(args_idx, args):\n for i, length in zip(idx, arg.shape):\n if i not in lengths:\n lengths[i] = length\n elif not equalindex(lengths[i], length):\n raise ValueError('Axes with index {} have different lengths.'.format(i))\n try:\n shape = [lengths[i] for i in out_idx]\n except KeyError:\n raise ValueError('Output axis {} is not listed in any of the arguments.'.format(', '.join(i for i in out_idx if i not in lengths)))\n self.args = args\n self.args_idx = args_idx\n self.out_idx = out_idx\n self._einsumfmt = ','.join(''.join(chr(97+i) for i in idx) for idx in args_idx) + '->' + ''.join(chr(97+i) for i in out_idx)\n self._has_summed_axes = len(lengths) > len(out_idx)\n super().__init__(args=self.args, shape=shape, dtype=dtype)\n\n def evalf(self, *args):\n if self._has_summed_axes:\n args = tuple(numpy.asarray(arg, order='F') for arg in args)\n return numpy.core.multiarray.c_einsum(self._einsumfmt, *args)\n\n @property\n def _node_details(self):\n return self._einsumfmt\n\n def _simplified(self):\n for i, arg in enumerate(self.args):\n if isinstance(arg, Transpose): # absorb `Transpose`\n idx = tuple(map(self.args_idx[i].__getitem__, numpy.argsort(arg.axes)))\n return Einsum(self.args[:i]+(arg.func,)+self.args[i+1:], self.args_idx[:i]+(idx,)+self.args_idx[i+1:], self.out_idx)\n\n def _sum(self, axis):\n if not (0 <= axis < self.ndim):\n raise IndexError('Axis out of range.')\n return Einsum(self.args, self.args_idx, self.out_idx[:axis] + self.out_idx[axis+1:])\n\n def _takediag(self, axis1, axis2):\n if not (0 <= axis1 < axis2 < self.ndim):\n raise IndexError('Axis out of range.')\n ikeep, irm = self.out_idx[axis1], self.out_idx[axis2]\n args_idx = tuple(tuple(ikeep if i == irm else i for i in idx) for idx in self.args_idx)\n return Einsum(self.args, args_idx, self.out_idx[:axis1] + self.out_idx[axis1+1:axis2] + self.out_idx[axis2+1:] + (ikeep,))\n\nclass Sum(Array):\n\n __slots__ = 'func'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n assert func.dtype != bool, 'Sum({})'.format(func)\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-1], dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.func.shape[-1], 1):\n return Take(self.func, 0)\n return self.func._sum(self.ndim)\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+1\n return numpy.sum(arr, -1)\n\n def _sum(self, axis):\n trysum = self.func._sum(axis)\n if trysum is not None:\n return Sum(trysum)\n\n def _derivative(self, var, seen):\n return sum(derivative(self.func, var, seen), self.ndim)\n\n @property\n def _assparse(self):\n chunks = []\n for *indices, _rmidx, values in self.func._assparse:\n if self.ndim == 0:\n nsum = values.ndim\n else:\n *indices, where = unalign(*indices)\n values = transpose(values, where + tuple(i for i in range(values.ndim) if i not in where))\n nsum = values.ndim - len(where)\n for i in range(nsum):\n values = Sum(values)\n chunks.append((*indices, values))\n return _gathersparsechunks(chunks)\n\n def _intbounds_impl(self):\n lower_func, upper_func = self.func._intbounds\n lower_length, upper_length = self.func.shape[-1]._intbounds\n if upper_length == 0:\n return 0, 0\n elif lower_length == 0:\n return min(0, lower_func * upper_length), max(0, upper_func * upper_length)\n else:\n return min(lower_func * lower_length, lower_func * upper_length), max(upper_func * lower_length, upper_func * upper_length)\n\nclass TakeDiag(Array):\n\n __slots__ = 'func'\n __cache__ = '_assparse'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n if func.ndim < 2:\n raise Exception('takediag requires an argument of dimension >= 2')\n if not equalindex(func.shape[-1], func.shape[-2]):\n raise Exception('takediag axes do not match')\n self.func = func\n super().__init__(args=[func], shape=func.shape[:-1], dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.shape[-1], 1):\n return Take(self.func, 0)\n return self.func._takediag(self.ndim-1, self.ndim)\n\n def evalf(self, arr):\n assert arr.ndim == self.ndim+1\n return numpy.einsum('...kk->...k', arr, optimize=False)\n\n def _derivative(self, var, seen):\n return takediag(derivative(self.func, var, seen), self.ndim-1, self.ndim)\n\n def _take(self, index, axis):\n if axis < self.ndim - 1:\n return TakeDiag(_take(self.func, index, axis))\n func = _take(Take(self.func, index), index, self.ndim-1)\n for i in reversed(range(self.ndim-1, self.ndim-1+index.ndim)):\n func = takediag(func, i, i+index.ndim)\n return func\n\n def _sum(self, axis):\n if axis != self.ndim - 1:\n return TakeDiag(sum(self.func, axis))\n\n @property\n def _assparse(self):\n chunks = []\n for *indices, values in self.func._assparse:\n if indices[-2] == indices[-1]:\n chunks.append((*indices[:-1], values))\n else:\n *indices, values = map(_flat, (*indices, values))\n mask = Equal(indices[-2], indices[-1])\n chunks.append(tuple(take(arr, mask, 0) for arr in (*indices[:-1], values)))\n return _gathersparsechunks(chunks)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Take(Array):\n\n __slots__ = 'func', 'indices'\n\n @types.apply_annotations\n def __init__(self, func:asarray, indices:asarray):\n if func.ndim == 0:\n raise Exception('cannot take a scalar function')\n if indices.dtype != int:\n raise Exception('invalid indices argument for take')\n self.func = func\n self.indices = indices\n super().__init__(args=[func,indices], shape=func.shape[:-1]+indices.shape, dtype=func.dtype)\n\n def _simplified(self):\n if self.indices.size == 0:\n return zeros_like(self)\n unaligned, where = unalign(self.indices)\n if len(where) < self.indices.ndim:\n n = self.func.ndim-1\n return align(Take(self.func, unaligned), (*range(n), *(n+i for i in where)), self.shape)\n trytake = self.func._take(self.indices, self.func.ndim-1) or \\\n self.indices._rtake(self.func, self.func.ndim-1)\n if trytake:\n return trytake\n for axis, parts in self.func._inflations:\n if axis == self.func.ndim - 1:\n return util.sum(Inflate(func, dofmap, self.func.shape[-1])._take(self.indices, self.func.ndim - 1) for dofmap, func in parts.items())\n\n def evalf(self, arr, indices):\n return arr[...,indices]\n\n def _derivative(self, var, seen):\n return _take(derivative(self.func, var, seen), self.indices, self.func.ndim-1)\n\n def _take(self, index, axis):\n if axis >= self.func.ndim-1:\n return Take(self.func, _take(self.indices, index, axis-self.func.ndim+1))\n trytake = self.func._take(index, axis)\n if trytake is not None:\n return Take(trytake, self.indices)\n\n def _sum(self, axis):\n if axis < self.func.ndim - 1:\n return Take(sum(self.func, axis), self.indices)\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass Power(Array):\n\n __slots__ = 'func', 'power'\n\n @types.apply_annotations\n def __init__(self, func:asarray, power:asarray):\n assert equalshape(func.shape, power.shape) and func.dtype == power.dtype != bool, 'Power({}, {})'.format(func, power)\n if power.dtype == int:\n assert power._intbounds[0] >= 0\n self.func = func\n self.power = power\n super().__init__(args=[func,power], shape=func.shape, dtype=func.dtype)\n\n def _simplified(self):\n if iszero(self.power):\n return ones_like(self)\n elif isuniform(self.power, 1):\n return self.func\n elif isuniform(self.power, 2):\n return self.func * self.func\n else:\n return self.func._power(self.power)\n\n def _optimized_for_numpy(self):\n if isuniform(self.power, -1):\n return Reciprocal(self.func)\n elif isuniform(self.power, -2):\n return Reciprocal(self.func * self.func)\n else:\n return self._simplified()\n\n def evalf(self, base, exp):\n return numpy.power(base, exp)\n\n def _derivative(self, var, seen):\n if self.power.isconstant:\n p = self.power.eval()\n return einsum('A,A,AB->AB', p, power(self.func, p - (p!=0)), derivative(self.func, var, seen))\n # self = func**power\n # ln self = power * ln func\n # self` / self = power` * ln func + power * func` / func\n # self` = power` * ln func * self + power * func` * func**(power-1)\n return einsum('A,A,AB->AB', self.power, power(self.func, self.power - 1), derivative(self.func, var, seen)) \\\n + einsum('A,A,AB->AB', ln(self.func), self, derivative(self.power, var, seen))\n\n def _power(self, n):\n func = self.func\n newpower = Multiply([self.power, n])\n if iszero(self.power % 2) and not iszero(newpower % 2):\n func = abs(func)\n return Power(func, newpower)\n\n def _takediag(self, axis1, axis2):\n return Power(_takediag(self.func, axis1, axis2), _takediag(self.power, axis1, axis2))\n\n def _take(self, index, axis):\n return Power(_take(self.func, index, axis), _take(self.power, index, axis))\n\n def _unravel(self, axis, shape):\n return Power(unravel(self.func, axis, shape), unravel(self.power, axis, shape))\n\nclass Pointwise(Array):\n '''\n Abstract base class for pointwise array functions.\n '''\n\n __slots__ = 'args',\n\n deriv = None\n\n @types.apply_annotations\n def __init__(self, *args:asarrays):\n retval = self.evalf(*[numpy.ones((), dtype=arg.dtype) for arg in args])\n shape0 = args[0].shape\n assert all(equalshape(arg.shape, shape0) for arg in args[1:]), 'pointwise arguments have inconsistent shapes'\n self.args = args\n super().__init__(args=args, shape=shape0, dtype=retval.dtype)\n\n @classmethod\n def outer(cls, *args):\n '''Alternative constructor that outer-aligns the arguments.\n\n The output shape of this pointwise function is the sum of all shapes of its\n arguments. When called with multiple arguments, the first argument will be\n appended with singleton axes to match the output shape, the second argument\n will be prepended with as many singleton axes as the dimension of the\n original first argument and appended to match the output shape, and so\n forth and so on.\n '''\n\n args = tuple(map(asarray, args))\n shape = builtins.sum((arg.shape for arg in args), ())\n offsets = numpy.cumsum([0]+[arg.ndim for arg in args])\n return cls(*(prependaxes(appendaxes(arg, shape[r:]), shape[:l]) for arg, l, r in zip(args, offsets[:-1], offsets[1:])))\n\n def _simplified(self):\n if self.isconstant:\n retval = self.eval()\n return Constant(retval)\n if len(self.args) == 1 and isinstance(self.args[0], Transpose):\n arg, = self.args\n return Transpose(self.__class__(arg.func), arg.axes)\n *uninserted, where = unalign(*self.args)\n if len(where) != self.ndim:\n return align(self.__class__(*uninserted), where, self.shape)\n\n def _derivative(self, var, seen):\n if self.deriv is None:\n return super()._derivative(var, seen)\n return util.sum(einsum('A,AB->AB', deriv(*self.args), derivative(arg, var, seen)) for arg, deriv in zip(self.args, self.deriv))\n\n def _takediag(self, axis1, axis2):\n return self.__class__(*[_takediag(arg, axis1, axis2) for arg in self.args])\n\n def _take(self, index, axis):\n return self.__class__(*[_take(arg, index, axis) for arg in self.args])\n\n def _unravel(self, axis, shape):\n return self.__class__(*[unravel(arg, axis, shape) for arg in self.args])\n\nclass Reciprocal(Pointwise):\n __slots__ = ()\n evalf = functools.partial(numpy.reciprocal, dtype=float)\n\nclass Negative(Pointwise):\n __slots__ = ()\n evalf = numpy.negative\n\n def _intbounds_impl(self):\n lower, upper = self.args[0]._intbounds\n return -upper, -lower\n\nclass FloorDivide(Pointwise):\n __slots__ = ()\n evalf = numpy.floor_divide\n\nclass Absolute(Pointwise):\n __slots__ = ()\n evalf = numpy.absolute\n\n def _intbounds_impl(self):\n lower, upper = self.args[0]._intbounds\n extrema = builtins.abs(lower), builtins.abs(upper)\n if lower <= 0 and upper >= 0:\n return 0, max(extrema)\n else:\n return min(extrema), max(extrema)\n\nclass Cos(Pointwise):\n 'Cosine, element-wise.'\n __slots__ = ()\n evalf = numpy.cos\n deriv = lambda x: -Sin(x),\n\nclass Sin(Pointwise):\n 'Sine, element-wise.'\n __slots__ = ()\n evalf = numpy.sin\n deriv = Cos,\n\nclass Tan(Pointwise):\n 'Tangent, element-wise.'\n __slots__ = ()\n evalf = numpy.tan\n deriv = lambda x: Cos(x)**-2,\n\nclass ArcSin(Pointwise):\n 'Inverse sine, element-wise.'\n __slots__ = ()\n evalf = numpy.arcsin\n deriv = lambda x: reciprocal(sqrt(1-x**2)),\n\nclass ArcCos(Pointwise):\n 'Inverse cosine, element-wise.'\n __slots__ = ()\n evalf = numpy.arccos\n deriv = lambda x: -reciprocal(sqrt(1-x**2)),\n\nclass ArcTan(Pointwise):\n 'Inverse tangent, element-wise.'\n __slots__ = ()\n evalf = numpy.arctan\n deriv = lambda x: reciprocal(1+x**2),\n\nclass Exp(Pointwise):\n __slots__ = ()\n evalf = numpy.exp\n deriv = lambda x: Exp(x),\n\nclass Log(Pointwise):\n __slots__ = ()\n evalf = numpy.log\n deriv = lambda x: reciprocal(x),\n\nclass Mod(Pointwise):\n __slots__ = ()\n evalf = numpy.mod\n\n def _intbounds_impl(self):\n dividend, divisor = self.args\n lower_divisor, upper_divisor = divisor._intbounds\n if lower_divisor > 0:\n lower_dividend, upper_dividend = dividend._intbounds\n if 0 <= lower_dividend and upper_dividend < lower_divisor:\n return lower_dividend, upper_dividend\n else:\n return 0, upper_divisor - 1\n else:\n return super()._intbounds_impl()\n\n def _simplified(self):\n dividend, divisor = self.args\n lower_divisor, upper_divisor = divisor._intbounds\n if lower_divisor > 0:\n lower_dividend, upper_dividend = dividend._intbounds\n if 0 <= lower_dividend and upper_dividend < lower_divisor:\n return dividend\n\nclass ArcTan2(Pointwise):\n __slots__ = ()\n evalf = numpy.arctan2\n deriv = lambda x, y: y / (x**2 + y**2), lambda x, y: -x / (x**2 + y**2)\n\nclass Greater(Pointwise):\n __slots__ = ()\n evalf = numpy.greater\n deriv = (lambda a, b: Zeros(a.shape, dtype=int),) * 2\n\nclass Equal(Pointwise):\n __slots__ = ()\n evalf = numpy.equal\n deriv = (lambda a, b: Zeros(a.shape, dtype=int),) * 2\n\nclass Less(Pointwise):\n __slots__ = ()\n evalf = numpy.less\n deriv = (lambda a, b: Zeros(a.shape, dtype=int),) * 2\n\nclass Minimum(Pointwise):\n __slots__ = ()\n evalf = numpy.minimum\n deriv = lambda x, y: .5 - .5 * Sign(x - y), lambda x, y: .5 + .5 * Sign(x - y)\n\n def _simplified(self):\n if self.dtype == int:\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n if upper1 <= lower2:\n return self.args[0]\n elif upper2 <= lower1:\n return self.args[1]\n return super()._simplified()\n\n def _intbounds_impl(self):\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n return min(lower1, lower2), min(upper1, upper2)\n\nclass Maximum(Pointwise):\n __slots__ = ()\n evalf = numpy.maximum\n deriv = lambda x, y: .5 + .5 * Sign(x - y), lambda x, y: .5 - .5 * Sign(x - y)\n\n def _simplified(self):\n if self.dtype == int:\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n if upper2 <= lower1:\n return self.args[0]\n elif upper1 <= lower2:\n return self.args[1]\n return super()._simplified()\n\n def _intbounds_impl(self):\n lower1, upper1 = self.args[0]._intbounds\n lower2, upper2 = self.args[1]._intbounds\n return max(lower1, lower2), max(upper1, upper2)\n\nclass AsType(Pointwise):\n\n @types.apply_annotations\n def __init__(self, arg: asarray):\n super().__init__(arg)\n dtypes = bool, int, float, complex\n if self.dtype in dtypes[:dtypes.index(arg.dtype)]:\n raise TypeError('invalid cast from {} to {}'.format(arg.dtype, self.dtype))\n\n def _derivative(self, var, seen):\n arg, = self.args\n return self.__class__(derivative(arg, var, seen))\n\n def _simplified(self):\n arg, = self.args\n if arg.dtype == self.dtype:\n return arg\n if iszero(arg):\n return zeros_like(self)\n for axis, parts in arg._inflations:\n return util.sum(_inflate(self.__class__(func), dofmap, self.shape[axis], axis) for dofmap, func in parts.items())\n return super()._simplified()\n\n def _intbounds_impl(self):\n if self.args[0].dtype == bool:\n return 0, 1\n else:\n return self.args[0]._intbounds\n\nclass Int(AsType):\n evalf = functools.partial(numpy.array, copy=False, dtype=int)\nclass Float(AsType):\n evalf = functools.partial(numpy.array, copy=False, dtype=float)\nclass Complex(AsType):\n evalf = functools.partial(numpy.array, copy=False, dtype=complex)\n\nastype = {int: Int, float: Float, complex: Complex}\n\nclass Sign(Array):\n\n __slots__ = 'func',\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n self.func = func\n super().__init__(args=[func], shape=func.shape, dtype=func.dtype)\n\n def _simplified(self):\n return self.func._sign()\n\n def evalf(self, arr):\n return numpy.sign(arr)\n\n def _takediag(self, axis1, axis2):\n return Sign(_takediag(self.func, axis1, axis2))\n\n def _take(self, index, axis):\n return Sign(_take(self.func, index, axis))\n\n def _sign(self):\n return self\n\n def _unravel(self, axis, shape):\n return Sign(unravel(self.func, axis, shape))\n\n def _derivative(self, var, seen):\n return Zeros(self.shape + var.shape, dtype=self.dtype)\n\n def _intbounds_impl(self):\n lower, upper = self.func._intbounds\n return int(numpy.sign(lower)), int(numpy.sign(upper))\n\nclass Sampled(Array):\n '''Basis-like identity operator.\n\n Basis-like function that for every point in a predefined set evaluates to the\n unit vector corresponding to its index.\n\n Args\n ----\n points : 1d :class:`Array`\n Present point coordinates.\n expect : 2d :class:`Array`\n Elementwise constant that evaluates to the predefined point coordinates;\n used for error checking and to inherit the shape.\n '''\n\n __slots__ = ()\n\n @types.apply_annotations\n def __init__(self, points:asarray, expect:asarray):\n assert points.ndim == 2\n super().__init__(args=[points, expect], shape=(points.shape[0], expect.shape[0]), dtype=float)\n\n def evalf(self, points, expect):\n assert numpy.equal(points, expect).all(), 'illegal point set'\n return numpy.eye(len(points))\n\[email protected]_annotations\ndef Elemwise(data:types.tuple[types.arraydata], index:asarray, dtype:asdtype):\n unique, indices = util.unique(data)\n if len(unique) == 1:\n return Constant(unique[0])\n # Create shape from data and index, rather than unique and the modified\n # index, in order to avoid potential shape inconsistencies later on.\n shapes = numpy.array([d.shape for d in data])\n shape = [Take(s, index) for s in shapes.T]\n if len(unique) < len(data):\n index = Take(indices, index)\n # Move all axes with constant shape to the left and ravel the remainder.\n is_constant = numpy.all(shapes[1:] == shapes[0], axis=0)\n nconstant = is_constant.sum()\n reorder = numpy.argsort(~is_constant)\n raveled = [numpy.transpose(d, reorder).reshape(*shapes[0, reorder[:nconstant]], -1) for d in unique]\n # Concatenate the raveled axis, take slices, unravel and reorder the axes to\n # the original position.\n concat = numpy.concatenate(raveled, axis=-1)\n if is_constant.all():\n return Take(concat, index)\n var_shape = tuple(shape[i] for i in reorder[nconstant:])\n cumprod = list(var_shape)\n for i in reversed(range(len(var_shape)-1)):\n cumprod[i] *= cumprod[i+1] # work backwards so that the shape check matches in Unravel\n offsets = _SizesToOffsets(asarray([d.shape[-1] for d in raveled]))\n elemwise = Take(concat, Range(cumprod[0]) + Take(offsets, index))\n for i in range(len(var_shape)-1):\n elemwise = Unravel(elemwise, var_shape[i], cumprod[i+1])\n return Transpose(elemwise, tuple(numpy.argsort(reorder)))\n\nclass Eig(Evaluable):\n\n __slots__ = 'symmetric', 'func', '_w_dtype', '_vt_dtype'\n\n @types.apply_annotations\n def __init__(self, func:asarray, symmetric:bool=False):\n assert func.ndim >= 2 and equalindex(func.shape[-1], func.shape[-2])\n self.symmetric = symmetric\n self.func = func\n self._w_dtype = float if symmetric else complex\n self._vt_dtype = float if symmetric and func.dtype != complex else complex\n super().__init__(args=[func])\n\n def __len__(self):\n return 2\n\n def __iter__(self):\n yield ArrayFromTuple(self, index=0, shape=self.func.shape[:-1], dtype=self._w_dtype)\n yield ArrayFromTuple(self, index=1, shape=self.func.shape, dtype=self._vt_dtype)\n\n def _simplified(self):\n return self.func._eig(self.symmetric)\n\n def evalf(self, arr):\n w, vt = (numpy.linalg.eigh if self.symmetric else numpy.linalg.eig)(arr)\n w = w.astype(self._w_dtype, copy=False)\n vt = vt.astype(self._vt_dtype, copy=False)\n return (w, vt)\n\nclass ArrayFromTuple(Array):\n\n __slots__ = 'arrays', 'index', '_lower', '_upper'\n\n @types.apply_annotations\n def __init__(self, arrays:strictevaluable, index:types.strictint, shape:asshape, dtype:asdtype, *, _lower=float('-inf'), _upper=float('inf')):\n self.arrays = arrays\n self.index = index\n self._lower = _lower\n self._upper = _upper\n super().__init__(args=[arrays], shape=shape, dtype=dtype)\n\n def evalf(self, arrays):\n assert isinstance(arrays, tuple)\n return arrays[self.index]\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n elif hasattr(self.arrays, '_node_tuple'):\n cache[self] = node = self.arrays._node_tuple(cache, subgraph, times)[self.index]\n return node\n else:\n return super()._node(cache, subgraph, times)\n\n def _intbounds_impl(self):\n return self._lower, self._upper\n\nclass Zeros(Array):\n 'zero'\n\n __slots__ = ()\n __cache__ = '_assparse', '_unaligned'\n\n @types.apply_annotations\n def __init__(self, shape:asshape, dtype:asdtype):\n super().__init__(args=shape, shape=shape, dtype=dtype)\n\n @property\n def _unaligned(self):\n return Zeros((), self.dtype), ()\n\n def evalf(self, *shape):\n return numpy.zeros(shape, dtype=self.dtype)\n\n def _node(self, cache, subgraph, times):\n if self.ndim:\n return super()._node(cache, subgraph, times)\n elif self in cache:\n return cache[self]\n else:\n cache[self] = node = DuplicatedLeafNode('0', (type(self).__name__, times[self]))\n return node\n\n def _add(self, other):\n return other\n\n def _multiply(self, other):\n return self\n\n def _diagonalize(self, axis):\n return Zeros(self.shape+(self.shape[axis],), dtype=self.dtype)\n\n def _sum(self, axis):\n return Zeros(self.shape[:axis] + self.shape[axis+1:], dtype=int if self.dtype == bool else self.dtype)\n\n def _transpose(self, axes):\n shape = [self.shape[n] for n in axes]\n return Zeros(shape, dtype=self.dtype)\n\n def _insertaxis(self, axis, length):\n return Zeros(self.shape[:axis]+(length,)+self.shape[axis:], self.dtype)\n\n def _takediag(self, axis1, axis2):\n return Zeros(self.shape[:axis1]+self.shape[axis1+1:axis2]+self.shape[axis2+1:self.ndim]+(self.shape[axis1],), dtype=self.dtype)\n\n def _take(self, index, axis):\n return Zeros(self.shape[:axis] + index.shape + self.shape[axis+1:], dtype=self.dtype)\n\n def _inflate(self, dofmap, length, axis):\n return Zeros(self.shape[:axis] + (length,) + self.shape[axis+dofmap.ndim:], dtype=self.dtype)\n\n def _unravel(self, axis, shape):\n shape = self.shape[:axis] + shape + self.shape[axis+1:]\n return Zeros(shape, dtype=self.dtype)\n\n def _ravel(self, axis):\n return Zeros(self.shape[:axis] + (self.shape[axis]*self.shape[axis+1],) + self.shape[axis+2:], self.dtype)\n\n def _determinant(self, axis1, axis2):\n shape = list(self.shape)\n assert axis1 != axis2\n length, = set(map(shape.pop, sorted((axis1, axis2), reverse=True)))\n if iszero(length):\n return ones(shape, self.dtype)\n else:\n return Zeros(shape, self.dtype)\n\n @property\n def _assparse(self):\n return ()\n\n def _intbounds_impl(self):\n return 0, 0\n\nclass Inflate(Array):\n\n __slots__ = 'func', 'dofmap', 'length', 'warn'\n __cache__ = '_assparse', '_diagonals', '_inflations'\n\n @types.apply_annotations\n def __init__(self, func:asarray, dofmap:asarray, length:asindex):\n if not equalshape(func.shape[func.ndim-dofmap.ndim:], dofmap.shape):\n raise Exception('invalid dofmap')\n self.func = func\n self.dofmap = dofmap\n self.length = length\n self.warn = not dofmap.isconstant\n super().__init__(args=[func,dofmap,length], shape=(*func.shape[:func.ndim-dofmap.ndim], length), dtype=func.dtype)\n\n @property\n def _diagonals(self):\n return tuple(axes for axes in self.func._diagonals if all(axis < self.ndim-1 for axis in axes))\n\n @property\n def _inflations(self):\n inflations = [(self.ndim-1, types.frozendict({self.dofmap: self.func}))]\n for axis, parts in self.func._inflations:\n inflations.append((axis, types.frozendict((dofmap, Inflate(func, self.dofmap, self.length)) for dofmap, func in parts.items())))\n return tuple(inflations)\n\n def _simplified(self):\n for axis in range(self.dofmap.ndim):\n if equalindex(self.dofmap.shape[axis], 1):\n return Inflate(_take(self.func, 0, self.func.ndim-self.dofmap.ndim+axis), _take(self.dofmap, 0, axis), self.length)\n for axis, parts in self.func._inflations:\n i = axis - (self.ndim-1)\n if i >= 0:\n return util.sum(Inflate(f, _take(self.dofmap, ind, i), self.length) for ind, f in parts.items())\n if self.dofmap.ndim == 0 and equalindex(self.dofmap, 0) and equalindex(self.length, 1):\n return InsertAxis(self.func, 1)\n return self.func._inflate(self.dofmap, self.length, self.ndim-1) \\\n or self.dofmap._rinflate(self.func, self.length, self.ndim-1)\n\n def evalf(self, array, indices, length):\n assert indices.ndim == self.dofmap.ndim\n assert length.ndim == 0\n if self.warn and int(length) > indices.size:\n warnings.warn('using explicit inflation; this is usually a bug.', ExpensiveEvaluationWarning)\n inflated = numpy.zeros(array.shape[:array.ndim-indices.ndim] + (length,), dtype=self.dtype)\n numpy.add.at(inflated, (slice(None),)*(self.ndim-1)+(indices,), array)\n return inflated\n\n def _inflate(self, dofmap, length, axis):\n if dofmap.ndim == 0 and dofmap == self.dofmap and length == self.length:\n return diagonalize(self, -1, axis)\n\n def _derivative(self, var, seen):\n return _inflate(derivative(self.func, var, seen), self.dofmap, self.length, self.ndim-1)\n\n def _multiply(self, other):\n return Inflate(Multiply([self.func, Take(other, self.dofmap)]), self.dofmap, self.length)\n\n def _add(self, other):\n if isinstance(other, Inflate) and self.dofmap == other.dofmap:\n return Inflate(Add([self.func, other.func]), self.dofmap, self.length)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 == self.ndim-1:\n func = _take(self.func, self.dofmap, axis1)\n for i in range(self.dofmap.ndim):\n func = _takediag(func, axis1, axis2+self.dofmap.ndim-1-i)\n return Inflate(func, self.dofmap, self.length)\n else:\n return _inflate(_takediag(self.func, axis1, axis2), self.dofmap, self.length, self.ndim-3)\n\n def _take(self, index, axis):\n if axis != self.ndim-1:\n return Inflate(_take(self.func, index, axis), self.dofmap, self.length)\n newindex, newdofmap = SwapInflateTake(self.dofmap, index)\n if self.dofmap.ndim:\n func = self.func\n for i in range(self.dofmap.ndim-1):\n func = Ravel(func)\n intersection = Take(func, newindex)\n else: # kronecker; newindex is all zeros (but of varying length)\n intersection = InsertAxis(self.func, newindex.shape[0])\n if index.ndim:\n swapped = Inflate(intersection, newdofmap, index.size)\n for i in range(index.ndim-1):\n swapped = Unravel(swapped, index.shape[i], util.product(index.shape[i+1:]))\n else: # get; newdofmap is all zeros (but of varying length)\n swapped = Sum(intersection)\n return swapped\n\n def _diagonalize(self, axis):\n if axis != self.ndim-1:\n return _inflate(diagonalize(self.func, axis), self.dofmap, self.length, self.ndim-1)\n\n def _sum(self, axis):\n if axis == self.ndim-1:\n func = self.func\n for i in range(self.dofmap.ndim):\n func = Sum(func)\n return func\n return Inflate(sum(self.func, axis), self.dofmap, self.length)\n\n def _unravel(self, axis, shape):\n if axis != self.ndim-1:\n return Inflate(unravel(self.func, axis, shape), self.dofmap, self.length)\n\n def _sign(self):\n if self.dofmap.isconstant and _isunique(self.dofmap.eval()):\n return Inflate(Sign(self.func), self.dofmap, self.length)\n\n @property\n def _assparse(self):\n chunks = []\n flat_dofmap = _flat(self.dofmap)\n keep_dim = self.func.ndim - self.dofmap.ndim\n strides = (1, *itertools.accumulate(self.dofmap.shape[:0:-1], operator.mul))[::-1]\n for *indices, values in self.func._assparse:\n if self.dofmap.ndim:\n inflate_indices = Take(flat_dofmap, functools.reduce(operator.add, map(operator.mul, indices[keep_dim:], strides)))\n else:\n inflate_indices = appendaxes(self.dofmap, values.shape)\n chunks.append((*indices[:keep_dim], inflate_indices, values))\n return tuple(chunks)\n\n def _intbounds_impl(self):\n lower, upper = self.func._intbounds\n return min(lower, 0), max(upper, 0)\n\nclass SwapInflateTake(Evaluable):\n\n def __init__(self, inflateidx, takeidx):\n self.inflateidx = inflateidx\n self.takeidx = takeidx\n super().__init__(args=[inflateidx, takeidx])\n\n def __iter__(self):\n shape = ArrayFromTuple(self, index=2, shape=(), dtype=int, _lower=0),\n return (ArrayFromTuple(self, index=index, shape=shape, dtype=int, _lower=0) for index in range(2))\n\n def evalf(self, inflateidx, takeidx):\n uniqueinflate = _isunique(inflateidx)\n uniquetake = _isunique(takeidx)\n unique = uniqueinflate and uniquetake\n # If both indices are unique (i.e. they do not contain duplicates) then the\n # take and inflate operations can simply be restricted to the intersection,\n # with the the location of the intersection in the original index vectors\n # being the new indices for the swapped operations.\n intersection, subinflate, subtake = numpy.intersect1d(inflateidx, takeidx, return_indices=True, assume_unique=unique)\n if unique:\n return subinflate, subtake, numpy.array(len(intersection))\n # Otherwise, while still limiting the operations to the intersection, we\n # need to add the appropriate duplications on either side. The easiest way\n # to do this is to form the permutation matrix A for take (may contain\n # multiple items per column) and B for inflate (may contain several items\n # per row) and take the product AB for the combined operation. To then\n # decompose AB into the equivalent take followed by inflate we can simply\n # take the two index vectors from AB.nonzero() and form CD = AB. The\n # algorithm below does precisely this without forming AB explicitly.\n newinflate = []\n newtake = []\n for k, n in enumerate(intersection):\n for i in [subtake[k]] if uniquetake else numpy.equal(takeidx.ravel(), n).nonzero()[0]:\n for j in [subinflate[k]] if uniqueinflate else numpy.equal(inflateidx.ravel(), n).nonzero()[0]:\n newinflate.append(i)\n newtake.append(j)\n return numpy.array(newtake, dtype=int), numpy.array(newinflate, dtype=int), numpy.array(len(newtake), dtype=int)\n\nclass Diagonalize(Array):\n\n __slots__ = 'func'\n __cache__ = '_diagonals'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n if func.ndim == 0:\n raise Exception('cannot diagonalize scalar function')\n self.func = func\n super().__init__(args=[func], shape=(*func.shape, func.shape[-1]), dtype=func.dtype)\n\n @property\n def _diagonals(self):\n diagonals = [frozenset([self.ndim-2, self.ndim-1])]\n for axes in self.func._diagonals:\n if axes & diagonals[0]:\n diagonals[0] |= axes\n else:\n diagonals.append(axes)\n return tuple(diagonals)\n\n @property\n def _inflations(self):\n return tuple((axis, types.frozendict((dofmap, Diagonalize(func)) for dofmap, func in parts.items()))\n for axis, parts in self.func._inflations\n if axis < self.ndim-2)\n\n def _simplified(self):\n if self.shape[-1] == 1:\n return InsertAxis(self.func, 1)\n return self.func._diagonalize(self.ndim-2)\n\n def evalf(self, arr):\n result = numpy.zeros(arr.shape+(arr.shape[-1],), dtype=arr.dtype, order='F')\n diag = numpy.core.multiarray.c_einsum('...ii->...i', result)\n diag[:] = arr\n return result\n\n def _derivative(self, var, seen):\n return diagonalize(derivative(self.func, var, seen), self.ndim-2, self.ndim-1)\n\n def _inverse(self, axis1, axis2):\n if sorted([axis1, axis2]) == [self.ndim-2, self.ndim-1]:\n return Diagonalize(reciprocal(self.func))\n\n def _determinant(self, axis1, axis2):\n if sorted([axis1, axis2]) == [self.ndim-2, self.ndim-1]:\n return Product(self.func)\n elif axis1 < self.ndim-2 and axis2 < self.ndim-2:\n return Diagonalize(determinant(self.func, (axis1, axis2)))\n\n def _sum(self, axis):\n if axis >= self.ndim - 2:\n return self.func\n return Diagonalize(sum(self.func, axis))\n\n def _takediag(self, axis1, axis2):\n if axis1 == self.ndim-2: # axis2 == self.ndim-1\n return self.func\n elif axis2 >= self.ndim-2:\n return diagonalize(_takediag(self.func, axis1, self.ndim-2), self.ndim-3, self.ndim-2)\n else:\n return diagonalize(_takediag(self.func, axis1, axis2), self.ndim-4, self.ndim-3)\n\n def _take(self, index, axis):\n if axis < self.ndim - 2:\n return Diagonalize(_take(self.func, index, axis))\n func = _take(self.func, index, self.ndim-2)\n for i in range(index.ndim):\n func = diagonalize(func, self.ndim-2+i)\n return _inflate(func, index, self.func.shape[-1], self.ndim-2 if axis == self.ndim-1 else self.ndim-2+index.ndim)\n\n def _unravel(self, axis, shape):\n if axis >= self.ndim - 2:\n diag = diagonalize(diagonalize(Unravel(self.func, *shape), self.ndim-2, self.ndim), self.ndim-1, self.ndim+1)\n return ravel(diag, self.ndim if axis == self.ndim-2 else self.ndim-2)\n else:\n return Diagonalize(unravel(self.func, axis, shape))\n\n def _sign(self):\n return Diagonalize(Sign(self.func))\n\n def _product(self):\n if numeric.isint(self.shape[-1]) and self.shape[-1] > 1:\n return Zeros(self.shape[:-1], dtype=self.dtype)\n\n def _loopsum(self, index):\n return Diagonalize(loop_sum(self.func, index))\n\n @property\n def _assparse(self):\n return tuple((*indices, indices[-1], values) for *indices, values in self.func._assparse)\n\nclass Guard(Array):\n 'bar all simplifications'\n\n __slots__ = 'fun',\n\n @types.apply_annotations\n def __init__(self, fun:asarray):\n self.fun = fun\n super().__init__(args=[fun], shape=fun.shape, dtype=fun.dtype)\n\n @property\n def isconstant(self):\n return False # avoid simplifications based on fun being constant\n\n @staticmethod\n def evalf(dat):\n return dat\n\n def _derivative(self, var, seen):\n return Guard(derivative(self.fun, var, seen))\n\nclass TrigNormal(Array):\n 'cos, sin'\n\n __slots__ = 'angle',\n\n @types.apply_annotations\n def __init__(self, angle:asarray):\n self.angle = angle\n super().__init__(args=[angle], shape=(*angle.shape, 2), dtype=float)\n\n def _derivative(self, var, seen):\n return einsum('Ai,AB->AiB', TrigTangent(self.angle), derivative(self.angle, var, seen))\n\n def evalf(self, angle):\n return numpy.stack([numpy.cos(angle), numpy.sin(angle)], axis=self.ndim-1)\n\n def _simplified(self):\n if iszero(self.angle):\n return prependaxes(Inflate(1., 0, 2), self.angle.shape)\n\nclass TrigTangent(Array):\n '-sin, cos'\n\n __slots__ = 'angle',\n\n @types.apply_annotations\n def __init__(self, angle:asarray):\n self.angle = angle\n super().__init__(args=[angle], shape=(*angle.shape, 2), dtype=float)\n\n def _derivative(self, var, seen):\n return -einsum('Ai,AB->AiB', TrigNormal(self.angle), derivative(self.angle, var, seen))\n\n def evalf(self, angle):\n return numpy.stack([-numpy.sin(angle), numpy.cos(angle)], axis=self.ndim-1)\n\n def _simplified(self):\n if iszero(self.angle):\n return prependaxes(Inflate(1., 1, 2), self.angle.shape)\n\nclass Find(Array):\n 'indices of boolean index vector'\n\n __slots__ = 'where',\n\n @types.apply_annotations\n def __init__(self, where:asarray):\n assert isarray(where) and where.ndim == 1 and where.dtype == bool\n self.where = where\n super().__init__(args=[where], shape=[Sum(Int(where))], dtype=int)\n\n def evalf(self, where):\n return where.nonzero()[0]\n\n def _simplified(self):\n if self.isconstant:\n return Constant(self.eval())\n\nclass DerivativeTargetBase(Array):\n 'base class for derivative targets'\n\n __slots__ = ()\n\n @property\n def isconstant(self):\n return False\n\nclass WithDerivative(Array):\n '''Wrap the given function and define the derivative to a target.\n\n The wrapper is typically used together with a virtual derivative target like\n :class:`IdentifierDerivativeTarget`. The wrapper is removed in the simplified\n form.\n\n Parameters\n ----------\n func : :class:`Array`\n The function to wrap.\n var : :class:`DerivativeTargetBase`\n The derivative target.\n derivative : :class:`Array`\n The derivative with shape ``func.shape + var.shape``.\n\n See Also\n --------\n :class:`IdentifierDerivativeTarget` : a virtual derivative target\n '''\n\n __slots__ = '_func', '_var', '_deriv'\n\n def __init__(self, func: Array, var: DerivativeTargetBase, derivative: Array) -> None:\n self._func = func\n self._var = var\n self._deriv = derivative\n super().__init__(args=(func,), shape=func.shape, dtype=func.dtype)\n\n @property\n def arguments(self):\n return self._func.arguments | {self._var}\n\n def evalf(self, func: numpy.ndarray) -> numpy.ndarray:\n return func\n\n def _derivative(self, var: DerivativeTargetBase, seen) -> Array:\n if var == self._var:\n return self._deriv\n else:\n return derivative(self._func, var, seen)\n\n def _simplified(self) -> Array:\n return self._func\n\nclass Argument(DerivativeTargetBase):\n '''Array argument, to be substituted before evaluation.\n\n The :class:`Argument` is an :class:`Array` with a known shape, but whose\n values are to be defined later, before evaluation, e.g. using\n :func:`replace_arguments`.\n\n It is possible to take the derivative of an :class:`Array` to an\n :class:`Argument`:\n\n >>> from nutils import evaluable\n >>> a = evaluable.Argument('x', [])\n >>> b = evaluable.Argument('y', [])\n >>> f = a**3 + b**2\n >>> evaluable.derivative(f, a).simplified == (3*a**2).simplified\n True\n\n Args\n ----\n name : :class:`str`\n The Identifier of this argument.\n shape : :class:`tuple` of :class:`int`\\\\s\n The shape of this argument.\n '''\n\n __slots__ = '_name'\n\n @types.apply_annotations\n def __init__(self, name:types.strictstr, shape:asshape, dtype=float):\n self._name = name\n super().__init__(args=[EVALARGS], shape=shape, dtype=dtype)\n\n def evalf(self, evalargs):\n try:\n value = evalargs[self._name]\n except KeyError:\n raise ValueError('argument {!r} missing'.format(self._name))\n else:\n value = numpy.asarray(value)\n assert equalshape(value.shape, self.shape)\n value = value.astype(self.dtype, casting='safe', copy=False)\n return value\n\n def _derivative(self, var, seen):\n if isinstance(var, Argument) and var._name == self._name and self.dtype == float:\n result = _inflate_scalar(1., self.shape)\n for i, sh in enumerate(self.shape):\n result = diagonalize(result, i, i+self.ndim)\n return result\n else:\n return zeros(self.shape+var.shape)\n\n def __str__(self):\n return '{} {!r} <{}>'.format(self.__class__.__name__, self._name, self._shape_str(form=str))\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n else:\n label = '\\n'.join(filter(None, (type(self).__name__, self._name, self._shape_str(form=repr))))\n cache[self] = node = DuplicatedLeafNode(label, (type(self).__name__, times[self]))\n return node\n\n @property\n def arguments(self):\n return frozenset({self})\n\nclass IdentifierDerivativeTarget(DerivativeTargetBase):\n '''Virtual derivative target distinguished by an identifier.\n\n Parameters\n ----------\n identifier : hashable :class:`object`\n The identifier for this derivative target.\n shape : :class:`tuple` of :class:`Array` or :class:`int`\n The shape of this derivative target.\n\n See Also\n --------\n :class:`WithDerivative` : :class:`Array` wrapper with additional derivative\n '''\n\n __slots__ = 'identifier'\n\n @types.apply_annotations\n def __init__(self, identifier, shape:asshape):\n self.identifier = identifier\n super().__init__(args=[], shape=shape, dtype=float)\n\n def evalf(self):\n raise Exception('{} cannot be evaluabled'.format(type(self).__name__))\n\nclass Ravel(Array):\n\n __slots__ = 'func'\n __cache__ = '_inflations'\n\n @types.apply_annotations\n def __init__(self, func:asarray):\n if func.ndim < 2:\n raise Exception('cannot ravel function of dimension < 2')\n self.func = func\n super().__init__(args=[func], shape=(*func.shape[:-2], func.shape[-2] * func.shape[-1]), dtype=func.dtype)\n\n @property\n def _inflations(self):\n inflations = []\n stride = self.func.shape[-1]\n n = None\n for axis, old_parts in self.func._inflations:\n if axis == self.ndim - 1 and n is None:\n n = self.func.shape[-1]\n inflations.append((self.ndim - 1, types.frozendict((RavelIndex(dofmap, Range(n), *self.func.shape[-2:]), func) for dofmap, func in old_parts.items())))\n elif axis == self.ndim and n is None:\n n = self.func.shape[-2]\n inflations.append((self.ndim - 1, types.frozendict((RavelIndex(Range(n), dofmap, *self.func.shape[-2:]), func) for dofmap, func in old_parts.items())))\n elif axis < self.ndim - 1:\n inflations.append((axis, types.frozendict((dofmap, Ravel(func)) for dofmap, func in old_parts.items())))\n return tuple(inflations)\n\n def _simplified(self):\n if equalindex(self.func.shape[-2], 1):\n return get(self.func, -2, 0)\n if equalindex(self.func.shape[-1], 1):\n return get(self.func, -1, 0)\n return self.func._ravel(self.ndim-1)\n\n def evalf(self, f):\n return f.reshape(f.shape[:-2] + (f.shape[-2]*f.shape[-1],))\n\n def _multiply(self, other):\n if isinstance(other, Ravel) and equalshape(other.func.shape[-2:], self.func.shape[-2:]):\n return Ravel(Multiply([self.func, other.func]))\n return Ravel(Multiply([self.func, Unravel(other, *self.func.shape[-2:])]))\n\n def _add(self, other):\n return Ravel(self.func + Unravel(other, *self.func.shape[-2:]))\n\n def _sum(self, axis):\n if axis == self.ndim-1:\n return Sum(Sum(self.func))\n return Ravel(sum(self.func, axis))\n\n def _derivative(self, var, seen):\n return ravel(derivative(self.func, var, seen), axis=self.ndim-1)\n\n def _takediag(self, axis1, axis2):\n assert axis1 < axis2\n if axis2 <= self.ndim-2:\n return ravel(_takediag(self.func, axis1, axis2), self.ndim-3)\n else:\n unraveled = unravel(self.func, axis1, self.func.shape[-2:])\n return Ravel(_takediag(_takediag(unraveled, axis1, -2), axis1, -2))\n\n def _take(self, index, axis):\n if axis != self.ndim-1:\n return Ravel(_take(self.func, index, axis))\n\n def _rtake(self, func, axis):\n if self.ndim == 1:\n return Ravel(Take(func, self.func))\n\n def _unravel(self, axis, shape):\n if axis != self.ndim-1:\n return Ravel(unravel(self.func, axis, shape))\n elif equalshape(shape, self.func.shape[-2:]):\n return self.func\n\n def _inflate(self, dofmap, length, axis):\n if axis < self.ndim-dofmap.ndim:\n return Ravel(_inflate(self.func, dofmap, length, axis))\n elif dofmap.ndim == 0:\n return ravel(Inflate(self.func, dofmap, length), self.ndim-1)\n else:\n return _inflate(self.func, Unravel(dofmap, *self.func.shape[-2:]), length, axis)\n\n def _diagonalize(self, axis):\n if axis != self.ndim-1:\n return ravel(diagonalize(self.func, axis), self.ndim-1)\n\n def _insertaxis(self, axis, length):\n return ravel(insertaxis(self.func, axis+(axis==self.ndim), length), self.ndim-(axis==self.ndim))\n\n def _power(self, n):\n return Ravel(Power(self.func, Unravel(n, *self.func.shape[-2:])))\n\n def _sign(self):\n return Ravel(Sign(self.func))\n\n def _product(self):\n return Product(Product(self.func))\n\n def _loopsum(self, index):\n return Ravel(loop_sum(self.func, index))\n\n @property\n def _unaligned(self):\n unaligned, where = unalign(self.func)\n for i in self.ndim - 1, self.ndim:\n if i not in where:\n unaligned = InsertAxis(unaligned, self.func.shape[i])\n where += i,\n if where[-2:] != (self.ndim - 1, self.ndim):\n unaligned = Transpose(unaligned, numpy.argsort(where))\n where = tuple(sorted(where))\n return Ravel(unaligned), where[:-1]\n\n @property\n def _assparse(self):\n return tuple((*indices[:-2], indices[-2]*self.func.shape[-1]+indices[-1], values) for *indices, values in self.func._assparse)\n\n def _intbounds_impl(self):\n return self.func._intbounds_impl()\n\nclass Unravel(Array):\n\n __slots__ = 'func'\n\n @types.apply_annotations\n def __init__(self, func:asarray, sh1:asindex, sh2:asindex):\n if func.ndim == 0:\n raise Exception('cannot unravel scalar function')\n if not equalindex(func.shape[-1], sh1 * sh2):\n raise Exception('new shape does not match axis length')\n self.func = func\n super().__init__(args=[func, sh1, sh2], shape=(*func.shape[:-1], sh1, sh2), dtype=func.dtype)\n\n def _simplified(self):\n if equalindex(self.shape[-2], 1):\n return insertaxis(self.func, self.ndim-2, 1)\n if equalindex(self.shape[-1], 1):\n return insertaxis(self.func, self.ndim-1, 1)\n return self.func._unravel(self.ndim-2, self.shape[-2:])\n\n def _derivative(self, var, seen):\n return unravel(derivative(self.func, var, seen), axis=self.ndim-2, shape=self.shape[-2:])\n\n def evalf(self, f, sh1, sh2):\n return f.reshape(f.shape[:-1] + (sh1, sh2))\n\n def _takediag(self, axis1, axis2):\n if axis2 < self.ndim-2:\n return unravel(_takediag(self.func, axis1, axis2), self.ndim-4, self.shape[-2:])\n\n def _take(self, index, axis):\n if axis < self.ndim - 2:\n return Unravel(_take(self.func, index, axis), *self.shape[-2:])\n\n def _sum(self, axis):\n if axis < self.ndim - 2:\n return Unravel(sum(self.func, axis), *self.shape[-2:])\n\n @property\n def _assparse(self):\n return tuple((*indices[:-1], *divmod(indices[-1], appendaxes(self.shape[-1], values.shape)), values) for *indices, values in self.func._assparse)\n\nclass RavelIndex(Array):\n\n @types.apply_annotations\n def __init__(self, ia:asarray, ib:asarray, na:asindex, nb:asindex):\n self._ia = ia\n self._ib = ib\n self._na = na\n self._nb = nb\n self._length = na * nb\n super().__init__(args=[ia, ib, nb], shape=ia.shape + ib.shape, dtype=int)\n\n def evalf(self, ia, ib, nb):\n return ia[(...,)+(numpy.newaxis,)*ib.ndim] * nb + ib\n\n def _take(self, index, axis):\n if axis < self._ia.ndim:\n return RavelIndex(_take(self._ia, index, axis), self._ib, self._na, self._nb)\n else:\n return RavelIndex(self._ia, _take(self._ib, index, axis - self._ia.ndim), self._na, self._nb)\n\n def _rtake(self, func, axis):\n if equalindex(func.shape[axis], self._length):\n return _take(_take(unravel(func, axis, (self._na, self._nb)), self._ib, axis+1), self._ia, axis)\n\n def _rinflate(self, func, length, axis):\n if equalindex(length, self._length):\n return Ravel(Inflate(_inflate(func, self._ia, self._na, func.ndim - self.ndim), self._ib, self._nb))\n\n def _unravel(self, axis, shape):\n if axis < self._ia.ndim:\n return RavelIndex(unravel(self._ia, axis, shape), self._ib, self._na, self._nb)\n else:\n return RavelIndex(self._ia, unravel(self._ib, axis-self._ia.ndim, shape), self._na, self._nb)\n\n def _intbounds_impl(self):\n nbmin, nbmax = self._nb._intbounds\n iamin, iamax = self._ia._intbounds\n ibmin, ibmax = self._ib._intbounds\n return iamin * nbmin + ibmin, (iamax and nbmax and iamax * nbmax) + ibmax\n\nclass Range(Array):\n\n __slots__ = 'length'\n\n @types.apply_annotations\n def __init__(self, length:asindex):\n self.length = length\n super().__init__(args=[length], shape=[length], dtype=int)\n\n def _take(self, index, axis):\n return InRange(index, self.length)\n\n def _rtake(self, func, axis):\n if equalindex(self.length, func.shape[axis]):\n return func\n\n def _rinflate(self, func, length, axis):\n if length == self.length:\n return func\n\n def evalf(self, length):\n return numpy.arange(length)\n\n def _intbounds_impl(self):\n lower, upper = self.length._intbounds\n assert lower >= 0\n return 0, max(0, upper - 1)\n\nclass InRange(Array):\n\n __slots__ = 'index', 'length'\n\n @types.apply_annotations\n def __init__(self, index:asarray, length:asarray):\n self.index = index\n self.length = length\n super().__init__(args=[index, length], shape=index.shape, dtype=int)\n\n def evalf(self, index, length):\n assert index.size == 0 or 0 <= index.min() and index.max() < length\n return index\n\n def _simplified(self):\n lower_length, upper_length = self.length._intbounds\n lower_index, upper_index = self.index._intbounds\n if 0 <= lower_index <= upper_index < lower_length:\n return self.index\n\n def _intbounds_impl(self):\n lower_index, upper_index = self.index._intbounds\n lower_length, upper_length = self.length._intbounds\n upper = min(upper_index, max(0, upper_length - 1))\n return max(0, min(lower_index, upper)), upper\n\nclass Polyval(Array):\n '''\n Computes the :math:`k`-dimensional array\n\n .. math:: j_0,\\\\dots,j_{k-1} \\\\mapsto \\\\sum_{\\\\substack{i_0,\\\\dots,i_{n-1}\\\\in\\\\mathbb{N}\\\\\\\\i_0+\\\\cdots+i_{n-1}\\\\le d}} p_0^{i_0} \\\\cdots p_{n-1}^{i_{n-1}} c_{j_0,\\\\dots,j_{k-1},i_0,\\\\dots,i_{n-1}},\n\n where :math:`p` are the :math:`n`-dimensional local coordinates and :math:`c`\n is the argument ``coeffs`` and :math:`d` is the degree of the polynomial,\n where :math:`d` is the length of the last :math:`n` axes of ``coeffs``.\n\n .. warning::\n\n All coefficients with a (combined) degree larger than :math:`d` should be\n zero. Failing to do so won't raise an :class:`Exception`, but might give\n incorrect results.\n '''\n\n __slots__ = 'points_ndim', 'coeffs', 'points', 'ngrad'\n\n @types.apply_annotations\n def __init__(self, coeffs:asarray, points:asarray, ngrad:types.strictint=0):\n if points.ndim < 1:\n raise ValueError('argument `points` should have at least one axis')\n if not points.shape[-1].isconstant:\n raise ValueError('the last axis of argument `points` should be a constant integer')\n self.points_ndim = int(points.shape[-1])\n ndim = coeffs.ndim - self.points_ndim\n if ndim < 0:\n raise ValueError('argument `coeffs` should have at least one axis per spatial dimension')\n self.coeffs = coeffs\n self.points = points\n self.ngrad = ngrad\n super().__init__(args=[points, coeffs], shape=points.shape[:-1]+coeffs.shape[:ndim]+(self.points_ndim,)*ngrad, dtype=float)\n\n def evalf(self, points, coeffs):\n for igrad in range(self.ngrad):\n coeffs = numeric.poly_grad(coeffs, self.points_ndim)\n return numeric.poly_eval(coeffs, points)\n\n def _derivative(self, var, seen):\n dpoints = einsum('ABi,AiD->ABD', Polyval(self.coeffs, self.points, self.ngrad+1), derivative(self.points, var, seen), A=self.points.ndim-1)\n dcoeffs = Transpose.from_end(Polyval(Transpose.to_end(derivative(self.coeffs, var, seen), *range(self.coeffs.ndim)), self.points, self.ngrad), *range(self.points.ndim-1, self.ndim))\n return dpoints + dcoeffs\n\n def _take(self, index, axis):\n if axis < self.points.ndim - 1:\n return Polyval(self.coeffs, _take(self.points, index, axis), self.ngrad)\n elif axis < self.points.ndim - 1 + self.coeffs.ndim - self.points_ndim:\n return Polyval(_take(self.coeffs, index, axis - self.points.ndim + 1), self.points, self.ngrad)\n\n def _const_helper(self, *j):\n if len(j) == self.ngrad:\n coeffs = self.coeffs\n for i in reversed(range(self.points_ndim)):\n p = builtins.sum(k==i for k in j)\n coeffs = math.factorial(p)*get(coeffs, i+self.coeffs.ndim-self.points_ndim, p)\n return coeffs\n else:\n return stack([self._const_helper(*j, k) for k in range(self.points_ndim)], axis=self.coeffs.ndim-self.points_ndim+self.ngrad-len(j)-1)\n\n def _simplified(self):\n degree = 0 if self.points_ndim == 0 else self.coeffs.shape[-1]-1 if isinstance(self.coeffs.shape[-1], int) else float('inf')\n if iszero(self.coeffs) or self.ngrad > degree:\n return zeros_like(self)\n elif self.ngrad == degree:\n return prependaxes(self._const_helper(), self.points.shape[:-1])\n points, where = unalign(self.points)\n if points.ndim < self.points.ndim and set(where) != set(range(self.points.ndim-1)):\n if self.points.ndim - 1 not in where:\n points = InsertAxis(points, self.points.shape[-1])\n where += self.points.ndim - 1,\n elif where[-1] != self.points.ndim - 1:\n points = Transpose(points, numpy.argsort(where))\n where = tuple(sorted(where))\n where = where[:-1] + tuple(range(self.points.ndim - 1, self.ndim))\n return align(Polyval(self.coeffs, points, self.ngrad), where, self.shape)\n\nclass PolyOuterProduct(Array):\n\n def __init__(self, left, right):\n nleft = left.shape[1]\n assert all(n == nleft for n in left.shape[2:])\n nright = right.shape[1]\n assert all(n == nright for n in right.shape[2:])\n shape = (left.shape[0] * right.shape[0],) + (nleft + nright - 1,) * (left.ndim + right.ndim - 2)\n super().__init__(args=[left, right], shape=shape, dtype=float)\n\n def evalf(self, left, right):\n return numeric.poly_outer_product(left, right)\n\nclass Legendre(Array):\n '''Series of Legendre polynomial up to and including the given degree.\n\n Parameters\n ---------\n x : :class:`Array`\n The coordinates to evaluate the series at.\n degree : :class:`int`\n The degree of the last polynomial of the series.\n '''\n\n def __init__(self, x: Array, degree: int) -> None:\n assert x.dtype == float\n self._x = x\n self._degree = degree\n super().__init__(args=(x,), shape=(*x.shape, degree+1), dtype=float)\n\n def evalf(self, x: numpy.ndarray) -> numpy.ndarray:\n P = numpy.empty((*x.shape, self._degree+1), dtype=float)\n P[...,0] = 1\n if self._degree:\n P[...,1] = x\n for i in range(2, self._degree+1):\n P[...,i] = (2-1/i)*P[...,1]*P[...,i-1] - (1-1/i)*P[...,i-2]\n return P\n\n def _derivative(self, var, seen):\n d = numpy.zeros((self._degree+1,)*2, dtype=int)\n for i in range(self._degree+1):\n d[i,i+1::2] = 2*i+1\n dself = einsum('Ai,ij->Aj', self, d)\n return einsum('Ai,AB->AiB', dself, derivative(self._x, var, seen))\n\n def _simplified(self):\n unaligned, where = unalign(self._x)\n if where != tuple(range(self._x.ndim)):\n return align(Legendre(unaligned, self._degree), (*where, self.ndim-1), self.shape)\n\n def _takediag(self, axis1, axis2):\n if axis1 < self.ndim - 1 and axis2 < self.ndim - 1:\n return Transpose.to_end(Legendre(_takediag(self._x, axis1, axis2), self._degree), -2)\n\n def _take(self, index, axis):\n if axis < self.ndim - 1:\n return Legendre(_take(self._x, index, axis), self._degree)\n\n def _unravel(self, axis, shape):\n if axis < self.ndim - 1:\n return Legendre(unravel(self._x, axis, shape), self._degree)\n\nclass Choose(Array):\n '''Function equivalent of :func:`numpy.choose`.'''\n\n @types.apply_annotations\n def __init__(self, index:asarray, choices:asarrays):\n if index.dtype != int:\n raise Exception('index must be integer valued')\n dtype = choices[0].dtype\n if any(choice.dtype != dtype for choice in choices[1:]):\n raise Exception('dtypes vary')\n shape = index.shape\n if not all(equalshape(choice.shape, shape) for choice in choices):\n raise Exception('shapes vary')\n self.index = index\n self.choices = choices\n super().__init__(args=(index,)+choices, shape=shape, dtype=dtype)\n\n def evalf(self, index, *choices):\n return numpy.choose(index, choices)\n\n def _derivative(self, var, seen):\n return Choose(appendaxes(self.index, var.shape), [derivative(choice, var, seen) for choice in self.choices])\n\n def _simplified(self):\n if all(choice == self.choices[0] for choice in self.choices[1:]):\n return self.choices[0]\n index, *choices, where = unalign(self.index, *self.choices)\n if len(where) < self.ndim:\n return align(Choose(index, choices), where, self.shape)\n\n def _multiply(self, other):\n if isinstance(other, Choose) and self.index == other.index:\n return Choose(self.index, map(multiply, self.choices, other.choices))\n\n def _get(self, i, item):\n return Choose(get(self.index, i, item), [get(choice, i, item) for choice in self.choices])\n\n def _sum(self, axis):\n unaligned, where = unalign(self.index)\n if axis not in where:\n index = align(unaligned, [i-(i>axis) for i in where], self.shape[:axis]+self.shape[axis+1:])\n return Choose(index, [sum(choice, axis) for choice in self.choices])\n\n def _take(self, index, axis):\n return Choose(_take(self.index, index, axis), [_take(choice, index, axis) for choice in self.choices])\n\n def _takediag(self, axis, rmaxis):\n return Choose(takediag(self.index, axis, rmaxis), [takediag(choice, axis, rmaxis) for choice in self.choices])\n\n def _product(self):\n unaligned, where = unalign(self.index)\n if self.ndim-1 not in where:\n index = align(unaligned, where, self.shape[:-1])\n return Choose(index, [Product(choice) for choice in self.choices])\n\nclass NormDim(Array):\n\n @types.apply_annotations\n def __init__(self, length: asarray, index: asarray):\n assert length.dtype == int\n assert index.dtype == int\n assert equalshape(length.shape, index.shape)\n # The following corner cases makes the assertion fail, hence we can only\n # assert the bounds if the arrays are guaranteed to be unempty:\n #\n # Take(func, NormDim(func.shape[-1], Range(0) + func.shape[-1]))\n if all(n._intbounds[0] > 0 for n in index.shape):\n assert -length._intbounds[1] <= index._intbounds[0] and index._intbounds[1] <= length._intbounds[1] - 1\n self.length = length\n self.index = index\n super().__init__(args=[length, index], shape=index.shape, dtype=index.dtype)\n\n def evalf(self, length, index):\n assert length.shape == index.shape\n assert length.dtype.kind == 'i'\n assert index.dtype.kind == 'i'\n result = numpy.empty(index.shape, dtype=int)\n for i in numpy.ndindex(index.shape):\n result[i] = numeric.normdim(length[i], index[i])\n return result\n\n def _simplified(self):\n lower_length, upper_length = self.length._intbounds\n lower_index, upper_index = self.index._intbounds\n if 0 <= lower_index and upper_index < lower_length:\n return self.index\n if isinstance(lower_length, int) and lower_length == upper_length and -lower_length <= lower_index and upper_index < 0:\n return self.index + lower_length\n if self.length.isconstant and self.index.isconstant:\n return Constant(self.eval())\n\n def _intbounds_impl(self):\n lower_length, upper_length = self.length._intbounds\n lower_index, upper_index = self.index._intbounds\n if lower_index >= 0:\n return min(lower_index, upper_length - 1), min(upper_index, upper_length - 1)\n elif upper_index < 0 and isinstance(lower_length, int) and lower_length == upper_length:\n return max(lower_index + lower_length, 0), max(upper_index + lower_length, 0)\n else:\n return 0, upper_length - 1\n\nclass _LoopIndex(Argument):\n\n __slots__ = 'length'\n\n @types.apply_annotations\n def __init__(self, name: types.strictstr, length: asindex):\n self.length = length\n super().__init__(name, (), int)\n\n def __str__(self):\n try:\n length = self.length.__index__()\n except EvaluationError:\n length = '?'\n return 'LoopIndex({}, length={})'.format(self._name, length)\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n cache[self] = node = RegularNode('LoopIndex', (), dict(length=self.length._node(cache, subgraph, times)), (type(self).__name__, _Stats()), subgraph)\n return node\n\n def _intbounds_impl(self):\n lower_length, upper_length = self.length._intbounds\n return 0, max(0, upper_length - 1)\n\n def _simplified(self):\n if equalindex(self.length, 1):\n return Zeros((), int)\n\nclass LoopSum(Array):\n\n __cache__ = '_serialized'\n\n def prepare_funcdata(arg):\n # separate shape from array to make it simplifiable (annotations are\n # treated as preprocessor, which means the processed value is returned by\n # self.__reduce__)\n if isinstance(arg, tuple):\n return arg\n arg = asarray(arg)\n return (arg, *arg.shape)\n\n @types.apply_annotations\n def __init__(self, funcdata:prepare_funcdata, index_name:types.strictstr, length:asindex):\n shape = Tuple(funcdata[1:])\n self.index = loop_index(index_name, length)\n if self.index in shape.arguments:\n raise ValueError('the shape of the function must not depend on the index')\n self.func = funcdata[0]\n assert self.func.dtype != bool\n self._invariants, self._dependencies = _dependencies_sans_invariants(self.func, self.index)\n super().__init__(args=(shape, length, *self._invariants), shape=self.func.shape, dtype=self.func.dtype)\n\n @property\n def _serialized(self):\n indices = {d: i for i, d in enumerate(itertools.chain([self.index], self._invariants, self._dependencies))}\n return tuple((dep, tuple(map(indices.__getitem__, dep._Evaluable__args))) for dep in self._dependencies)\n\n def evalf(self, shape, length, *args):\n serialized = self._serialized\n result = numpy.zeros(shape, self.dtype)\n for index in range(length):\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf(*[values[i] for i in indices]) for op, indices in serialized)\n result += values[-1]\n return result\n\n def evalf_withtimes(self, times, shape, length, *args):\n serialized = self._serialized\n subtimes = times.setdefault(self, collections.defaultdict(_Stats))\n result = numpy.zeros(shape, self.dtype)\n for index in range(length):\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf_withtimes(subtimes, *[values[i] for i in indices]) for op, indices in serialized)\n result += values[-1]\n return result\n\n def _derivative(self, var, seen):\n return loop_sum(derivative(self.func, var, seen), self.index)\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n subcache = {}\n for arg in self._Evaluable__args:\n subcache[arg] = arg._node(cache, subgraph, times)\n loopgraph = Subgraph('Loop', subgraph)\n subtimes = times.get(self, collections.defaultdict(_Stats))\n sum_kwargs = {'shape[{}]'.format(i): n._node(cache, subgraph, times) for i, n in enumerate(self.shape)}\n sum_kwargs['func'] = self.func._node(subcache, loopgraph, subtimes)\n cache[self] = node = RegularNode('LoopSum', (), sum_kwargs, (type(self).__name__, subtimes['sum']), loopgraph)\n return node\n\n def _simplified(self):\n if iszero(self.func):\n return zeros_like(self)\n elif self.index not in self.func.arguments:\n return self.func * self.index.length\n return self.func._loopsum(self.index)\n\n def _takediag(self, axis1, axis2):\n return loop_sum(_takediag(self.func, axis1, axis2), self.index)\n\n def _take(self, index, axis):\n return loop_sum(_take(self.func, index, axis), self.index)\n\n def _unravel(self, axis, shape):\n return loop_sum(unravel(self.func, axis, shape), self.index)\n\n def _sum(self, axis):\n return loop_sum(sum(self.func, axis), self.index)\n\n def _add(self, other):\n if isinstance(other, LoopSum) and other.index == self.index:\n return loop_sum(self.func + other.func, self.index)\n\n def _multiply(self, other):\n return loop_sum(self.func * other, self.index)\n\n @property\n def _assparse(self):\n chunks = []\n for *elem_indices, elem_values in self.func._assparse:\n if self.ndim == 0:\n values = loop_concatenate(InsertAxis(elem_values, 1), self.index)\n while values.ndim:\n values = Sum(values)\n chunks.append((values,))\n else:\n if elem_values.ndim == 0:\n *elem_indices, elem_values = (InsertAxis(arr, 1) for arr in (*elem_indices, elem_values))\n else:\n # minimize ravels by transposing all variable length axes to the end\n variable = tuple(i for i, n in enumerate(elem_values.shape) if self.index in n.arguments)\n *elem_indices, elem_values = (Transpose.to_end(arr, *variable) for arr in (*elem_indices, elem_values))\n for i in variable[:-1]:\n *elem_indices, elem_values = map(Ravel, (*elem_indices, elem_values))\n assert all(self.index not in n.arguments for n in elem_values.shape[:-1])\n chunks.append(tuple(loop_concatenate(arr, self.index) for arr in (*elem_indices, elem_values)))\n return tuple(chunks)\n\nclass _SizesToOffsets(Array):\n\n def __init__(self, sizes):\n assert sizes.ndim == 1\n assert sizes.dtype == int\n assert sizes._intbounds[0] >= 0\n self._sizes = sizes\n super().__init__(args=[sizes], shape=(sizes.shape[0]+1,), dtype=int)\n\n def evalf(self, sizes):\n return numpy.cumsum([0, *sizes])\n\n def _simplified(self):\n unaligned, where = unalign(self._sizes)\n if not where:\n return Range(self.shape[0]) * appendaxes(unaligned, self.shape[:1])\n\n def _intbounds_impl(self):\n n = self._sizes.size._intbounds[1]\n m = self._sizes._intbounds[1]\n return 0, (0 if n == 0 or m == 0 else n * m)\n\nclass LoopConcatenate(Array):\n\n @types.apply_annotations\n def __init__(self, funcdata:asarrays, index_name:types.strictstr, length:asindex):\n self.funcdata = funcdata\n self.func, self.start, stop, *shape = funcdata\n self.index = loop_index(index_name, length)\n if not self.func.ndim:\n raise ValueError('expected an array with at least one axis')\n if any(self.index in n.arguments for n in shape):\n raise ValueError('the shape of the function must not depend on the index')\n self._lcc = LoopConcatenateCombined((self.funcdata,), index_name, length)\n super().__init__(args=[self._lcc], shape=shape, dtype=self.func.dtype)\n\n def evalf(self, arg):\n return arg[0]\n\n def evalf_withtimes(self, times, arg):\n with times[self]:\n return arg[0]\n\n def _derivative(self, var, seen):\n return Transpose.from_end(loop_concatenate(Transpose.to_end(derivative(self.func, var, seen), self.ndim-1), self.index), self.ndim-1)\n\n def _node(self, cache, subgraph, times):\n if self in cache:\n return cache[self]\n else:\n cache[self] = node = self._lcc._node_tuple(cache, subgraph, times)[0]\n return node\n\n def _simplified(self):\n if iszero(self.func):\n return zeros_like(self)\n elif self.index not in self.func.arguments:\n return Ravel(Transpose.from_end(InsertAxis(self.func, self.index.length), -2))\n unaligned, where = unalign(self.func)\n if self.ndim-1 not in where:\n # reinsert concatenation axis, at unit length if possible so we can\n # insert the remainder outside of the loop\n unaligned = InsertAxis(unaligned, self.func.shape[-1] if self.index in self.func.shape[-1].arguments else 1)\n where += self.ndim-1,\n elif where[-1] != self.ndim-1:\n # bring concatenation axis to the end\n unaligned = Transpose(unaligned, numpy.argsort(where))\n where = tuple(sorted(where))\n f = loop_concatenate(unaligned, self.index)\n if not equalindex(self.shape[-1], f.shape[-1]):\n # last axis was reinserted at unit length AND it was not unit length\n # originally - if it was unit length originally then we proceed only if\n # there are other insertions to promote, otherwise we'd get a recursion.\n f = Ravel(InsertAxis(f, self.func.shape[-1]))\n elif len(where) == self.ndim:\n return\n return align(f, where, self.shape)\n\n def _takediag(self, axis1, axis2):\n if axis1 < self.ndim-1 and axis2 < self.ndim-1:\n return Transpose.from_end(loop_concatenate(Transpose.to_end(_takediag(self.func, axis1, axis2), -2), self.index), -2)\n\n def _take(self, index, axis):\n if axis < self.ndim-1:\n return loop_concatenate(_take(self.func, index, axis), self.index)\n\n def _unravel(self, axis, shape):\n if axis < self.ndim-1:\n return loop_concatenate(unravel(self.func, axis, shape), self.index)\n\n @property\n def _assparse(self):\n chunks = []\n for *indices, last_index, values in self.func._assparse:\n last_index = last_index + prependaxes(self.start, last_index.shape)\n chunks.append(tuple(loop_concatenate(_flat(arr), self.index) for arr in (*indices, last_index, values)))\n return tuple(chunks)\n\n @property\n def _loop_concatenate_deps(self):\n return (self,) + super()._loop_concatenate_deps\n\n def _intbounds_impl(self):\n return self.func._intbounds\n\nclass LoopConcatenateCombined(Evaluable):\n\n __cache__ = '_serialized'\n\n @types.apply_annotations\n def __init__(self, funcdatas:types.tuple[asarrays], index_name:types.strictstr, length:asindex):\n self._funcdatas = funcdatas\n self._funcs = tuple(func for func, start, stop, *shape in funcdatas)\n self._index_name = index_name\n self._index = loop_index(index_name, length)\n if any(not func.ndim for func in self._funcs):\n raise ValueError('expected an array with at least one axis')\n shapes = [Tuple(shape) for func, start, stop, *shape in funcdatas]\n if any(self._index in shape.arguments for shape in shapes):\n raise ValueError('the shape of the function must not depend on the index')\n self._invariants, self._dependencies = _dependencies_sans_invariants(\n Tuple([Tuple([start, stop, func]) for func, start, stop, *shape in funcdatas]), self._index)\n super().__init__(args=(Tuple(shapes), length, *self._invariants))\n\n @property\n def _serialized(self):\n indices = {d: i for i, d in enumerate(itertools.chain([self._index], self._invariants, self._dependencies))}\n return tuple((dep, tuple(map(indices.__getitem__, dep._Evaluable__args))) for dep in self._dependencies)\n\n def evalf(self, shapes, length, *args):\n serialized = self._serialized\n results = [parallel.shempty(tuple(map(int, shape)), dtype=func.dtype) for func, shape in zip(self._funcs, shapes)]\n with parallel.ctxrange('loop {}'.format(self._index_name), int(length)) as indices:\n for index in indices:\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf(*[values[i] for i in indices]) for op, indices in serialized)\n for result, (start, stop, block) in zip(results, values[-1]):\n result[...,start:stop] = block\n return tuple(results)\n\n def evalf_withtimes(self, times, shapes, length, *args):\n serialized = self._serialized\n subtimes = times.setdefault(self, collections.defaultdict(_Stats))\n results = [parallel.shempty(tuple(map(int, shape)), dtype=func.dtype) for func, shape in zip(self._funcs, shapes)]\n for index in range(length):\n values = [numpy.array(index)]\n values.extend(args)\n values.extend(op.evalf_withtimes(subtimes, *[values[i] for i in indices]) for op, indices in serialized)\n for func, result, (start, stop, block) in zip(self._funcs, results, values[-1]):\n with subtimes['concat', func]:\n result[...,start:stop] = block\n return tuple(results)\n\n def _node_tuple(self, cache, subgraph, times):\n if (self, 'tuple') in cache:\n return cache[self, 'tuple']\n subcache = {}\n for arg in self._invariants:\n subcache[arg] = arg._node(cache, subgraph, times)\n loopgraph = Subgraph('Loop', subgraph)\n subtimes = times.get(self, collections.defaultdict(_Stats))\n concats = []\n for func, start, stop, *shape in self._funcdatas:\n concat_kwargs = {'shape[{}]'.format(i): n._node(cache, subgraph, times) for i, n in enumerate(shape)}\n concat_kwargs['start'] = start._node(subcache, loopgraph, subtimes)\n concat_kwargs['stop'] = stop._node(subcache, loopgraph, subtimes)\n concat_kwargs['func'] = func._node(subcache, loopgraph, subtimes)\n concats.append(RegularNode('LoopConcatenate', (), concat_kwargs, (type(self).__name__, subtimes['concat', func]), loopgraph))\n cache[self, 'tuple'] = concats = tuple(concats)\n return concats\n\n# AUXILIARY FUNCTIONS (FOR INTERNAL USE)\n\n_ascending = lambda arg: numpy.greater(numpy.diff(arg), 0).all()\n_normdims = lambda ndim, shapes: tuple(numeric.normdim(ndim,sh) for sh in shapes)\n\ndef _gatherblocks(blocks):\n return tuple((ind, util.sum(funcs)) for ind, funcs in util.gather(blocks))\n\ndef _gathersparsechunks(chunks):\n return tuple((*ind, util.sum(funcs)) for ind, funcs in util.gather((tuple(ind), func) for *ind, func in chunks))\n\ndef _numpy_align(a, b):\n '''check shape consistency and inflate scalars'''\n\n a = asarray(a)\n b = asarray(b)\n if a.dtype != b.dtype:\n type_order = bool, int, float, complex\n if type_order.index(a.dtype) < type_order.index(b.dtype):\n a = astype[b.dtype](a)\n else:\n b = astype[a.dtype](b)\n if not a.ndim:\n return _inflate_scalar(a, b.shape), b\n if not b.ndim:\n return a, _inflate_scalar(b, a.shape)\n if equalshape(a.shape, b.shape):\n return a, b\n raise ValueError('incompatible shapes: {} != {}'.format(*[tuple(int(n) if n.isconstant else n for n in arg.shape) for arg in (a, b)]))\n\ndef _inflate_scalar(arg, shape):\n arg = asarray(arg)\n assert arg.ndim == 0\n for idim, length in enumerate(shape):\n arg = insertaxis(arg, idim, length)\n return arg\n\ndef _isunique(array):\n return numpy.unique(array).size == array.size\n\ndef _dependencies_sans_invariants(func, arg):\n invariants = []\n dependencies = []\n _populate_dependencies_sans_invariants(func, arg, invariants, dependencies, {arg})\n assert (dependencies or invariants or [arg])[-1] == func\n return tuple(invariants), tuple(dependencies)\n\ndef _populate_dependencies_sans_invariants(func, arg, invariants, dependencies, cache):\n if func in cache:\n return\n cache.add(func)\n if arg in func.arguments:\n for child in func._Evaluable__args:\n _populate_dependencies_sans_invariants(child, arg, invariants, dependencies, cache)\n dependencies.append(func)\n else:\n invariants.append(func)\n\nclass _Stats:\n\n __slots__ = 'ncalls', 'time', '_start'\n\n def __init__(self, ncalls: int = 0, time: int = 0) -> None:\n self.ncalls = ncalls\n self.time = time\n self._start = None\n\n def __repr__(self):\n return '_Stats(ncalls={}, time={})'.format(self.ncalls, self.time)\n\n def __add__(self, other):\n if not isinstance(other, _Stats):\n return NotImplemented\n return _Stats(self.ncalls+other.ncalls, self.time+other.time)\n\n def __enter__(self) -> None:\n self._start = time.perf_counter_ns()\n\n def __exit__(self, *exc_info) -> None:\n self.time += time.perf_counter_ns() - self._start\n self.ncalls += 1\n\n# FUNCTIONS\n\ndef isarray(arg):\n return isinstance(arg, Array)\n\ndef _containsarray(arg):\n return any(map(_containsarray, arg)) if isinstance(arg, (list, tuple)) else isarray(arg)\n\ndef iszero(arg):\n return isinstance(arg.simplified, Zeros)\n\ndef zeros(shape, dtype=float):\n return Zeros(shape, dtype)\n\ndef zeros_like(arr):\n return zeros(arr.shape, arr.dtype)\n\ndef isuniform(arg, value):\n unaligned, where = unalign(arg)\n return not where and isinstance(unaligned, Constant) and unaligned.value[()] == value\n\ndef ones(shape, dtype=float):\n return _inflate_scalar(numpy.ones((), dtype=dtype), shape)\n\ndef ones_like(arr):\n return ones(arr.shape, arr.dtype)\n\ndef reciprocal(arg):\n return power(arg, -1.)\n\ndef negative(arg):\n return multiply(arg, -1)\n\ndef sin(x):\n return Sin(x)\n\ndef cos(x):\n return Cos(x)\n\ndef tan(x):\n return Tan(x)\n\ndef arcsin(x):\n return ArcSin(x)\n\ndef arccos(x):\n return ArcCos(x)\n\ndef arctan(x):\n return ArcTan(x)\n\ndef exp(x):\n return Exp(x)\n\ndef ln(x):\n return Log(x)\n\ndef divmod(x, y):\n div = FloorDivide(*_numpy_align(x, y))\n mod = x - div * y\n return div, mod\n\ndef mod(arg1, arg2):\n return Mod(*_numpy_align(arg1, arg2))\n\ndef log2(arg):\n return ln(arg) / ln(2)\n\ndef log10(arg):\n return ln(arg) / ln(10)\n\ndef sqrt(arg):\n return power(arg, .5)\n\ndef arctan2(arg1, arg2):\n return ArcTan2(*_numpy_align(arg1, arg2))\n\ndef abs(arg):\n return arg * sign(arg)\n\ndef sinh(arg):\n return .5 * (exp(arg) - exp(-arg))\n\ndef cosh(arg):\n return .5 * (exp(arg) + exp(-arg))\n\ndef tanh(arg):\n return 1 - 2. / (exp(2*arg) + 1)\n\ndef arctanh(arg):\n return .5 * (ln(1+arg) - ln(1-arg))\n\ndef divide(arg1, arg2):\n return multiply(arg1, reciprocal(arg2))\n\ndef subtract(arg1, arg2):\n return add(arg1, negative(arg2))\n\ndef insertaxis(arg, n, length):\n return Transpose.from_end(InsertAxis(arg, length), n)\n\ndef concatenate(args, axis=0):\n lengths = [arg.shape[axis] for arg in args]\n *offsets, totlength = util.cumsum(lengths + [0])\n return Transpose.from_end(util.sum(Inflate(Transpose.to_end(arg, axis), Range(length) + offset, totlength) for arg, length, offset in zip(args, lengths, offsets)), axis)\n\ndef stack(args, axis=0):\n return Transpose.from_end(util.sum(Inflate(arg, i, len(args)) for i, arg in enumerate(args)), axis)\n\ndef repeat(arg, length, axis):\n arg = asarray(arg)\n assert equalindex(arg.shape[axis], 1)\n return insertaxis(get(arg, axis, 0), axis, length)\n\ndef get(arg, iax, item):\n if numeric.isint(item):\n if numeric.isint(arg.shape[iax]):\n item = numeric.normdim(arg.shape[iax], item)\n else:\n assert item >= 0\n return Take(Transpose.to_end(arg, iax), item)\n\ndef determinant(arg, axes=(-2,-1)):\n return Determinant(Transpose.to_end(arg, *axes))\n\ndef grammium(arg, axes=(-2,-1)):\n arg = Transpose.to_end(arg, *axes)\n grammium = einsum('Aki,Akj->Aij', arg, arg)\n return Transpose.from_end(grammium, *axes)\n\ndef sqrt_abs_det_gram(arg, axes=(-2,-1)):\n arg = Transpose.to_end(arg, *axes)\n if equalindex(arg.shape[-1], arg.shape[-2]):\n return abs(Determinant(arg))\n else:\n return sqrt(abs(Determinant(grammium(arg))))\n\ndef inverse(arg, axes=(-2,-1)):\n return Transpose.from_end(Inverse(Transpose.to_end(arg, *axes)), *axes)\n\ndef takediag(arg, axis=-2, rmaxis=-1):\n arg = asarray(arg)\n axis = numeric.normdim(arg.ndim, axis)\n rmaxis = numeric.normdim(arg.ndim, rmaxis)\n assert axis < rmaxis\n return Transpose.from_end(_takediag(arg, axis, rmaxis), axis)\n\ndef _takediag(arg, axis1=-2, axis2=-1):\n return TakeDiag(Transpose.to_end(arg, axis1, axis2))\n\ndef derivative(func, var, seen=None):\n 'derivative'\n\n assert isinstance(var, DerivativeTargetBase), 'invalid derivative target {!r}'.format(var)\n if var.dtype != float or var not in func.arguments:\n return Zeros(func.shape + var.shape, dtype=func.dtype)\n if seen is None:\n seen = {}\n if func in seen:\n result = seen[func]\n else:\n result = func._derivative(var, seen)\n seen[func] = result\n assert equalshape(result.shape, func.shape+var.shape), 'bug in {}._derivative'.format(type(func).__name__)\n return result\n\ndef diagonalize(arg, axis=-1, newaxis=-1):\n arg = asarray(arg)\n axis = numeric.normdim(arg.ndim, axis)\n newaxis = numeric.normdim(arg.ndim+1, newaxis)\n assert axis < newaxis\n return Transpose.from_end(Diagonalize(Transpose.to_end(arg, axis)), axis, newaxis)\n\ndef sign(arg):\n arg = asarray(arg)\n return Sign(arg)\n\ndef eig(arg, axes=(-2,-1), symmetric=False):\n eigval, eigvec = Eig(Transpose.to_end(arg, *axes), symmetric)\n return Tuple(Transpose.from_end(v, *axes) for v in [diagonalize(eigval), eigvec])\n\[email protected]_annotations\ndef _takeslice(arg:asarray, s:types.strict[slice], axis:types.strictint):\n n = arg.shape[axis]\n if s.step == None or s.step == 1:\n start = 0 if s.start is None else s.start if s.start >= 0 else s.start + n\n stop = n if s.stop is None else s.stop if s.stop >= 0 else s.stop + n\n if start == 0 and stop == n:\n return arg\n index = Range(stop-start) + start\n elif n.isconstant:\n index = Constant(numpy.arange(*s.indices(arg.shape[axis])))\n else:\n raise Exception('a non-unit slice requires a constant-length axis')\n return take(arg, index, axis)\n\[email protected]_annotations\ndef take(arg:asarray, index:asarray, axis:types.strictint):\n assert index.ndim == 1\n length = arg.shape[axis]\n if index.dtype == bool:\n assert equalindex(index.shape[0], length)\n index = Find(index)\n elif index.isconstant:\n index_ = index.eval()\n ineg = numpy.less(index_, 0)\n if not length.isconstant:\n if ineg.any():\n raise IndexError('negative indices only allowed for constant-length axes')\n elif ineg.any():\n if numpy.less(index_, -int(length)).any():\n raise IndexError('indices out of bounds: {} < {}'.format(index_, -int(length)))\n return _take(arg, Constant(index_ + ineg * int(length)), axis)\n elif numpy.greater_equal(index_, int(length)).any():\n raise IndexError('indices out of bounds: {} >= {}'.format(index_, int(length)))\n elif numpy.greater(numpy.diff(index_), 0).all():\n return mask(arg, numeric.asboolean(index_, int(length)), axis)\n return _take(arg, index, axis)\n\[email protected]_annotations\ndef _take(arg:asarray, index:asarray, axis:types.strictint):\n axis = numeric.normdim(arg.ndim, axis)\n return Transpose.from_end(Take(Transpose.to_end(arg, axis), index), *range(axis, axis+index.ndim))\n\[email protected]_annotations\ndef _inflate(arg:asarray, dofmap:asarray, length:asindex, axis:types.strictint):\n axis = numeric.normdim(arg.ndim+1-dofmap.ndim, axis)\n assert equalshape(dofmap.shape, arg.shape[axis:axis+dofmap.ndim])\n return Transpose.from_end(Inflate(Transpose.to_end(arg, *range(axis, axis+dofmap.ndim)), dofmap, length), axis)\n\ndef mask(arg, mask, axis=0):\n return take(arg, mask, axis)\n\ndef unravel(func, axis, shape):\n func = asarray(func)\n axis = numeric.normdim(func.ndim, axis)\n assert len(shape) == 2\n return Transpose.from_end(Unravel(Transpose.to_end(func, axis), *shape), axis, axis+1)\n\ndef ravel(func, axis):\n func = asarray(func)\n axis = numeric.normdim(func.ndim-1, axis)\n return Transpose.from_end(Ravel(Transpose.to_end(func, axis, axis+1)), axis)\n\ndef _flat(func):\n func = asarray(func)\n if func.ndim == 0:\n return InsertAxis(func, 1)\n while func.ndim > 1:\n func = Ravel(func)\n return func\n\ndef prependaxes(func, shape):\n 'Prepend axes with specified `shape` to `func`.'\n\n func = asarray(func)\n for i, n in enumerate(shape):\n func = insertaxis(func, i, n)\n return func\n\ndef appendaxes(func, shape):\n 'Append axes with specified `shape` to `func`.'\n\n func = asarray(func)\n for n in shape:\n func = InsertAxis(func, n)\n return func\n\ndef loop_index(name, length):\n return _LoopIndex(name, length)\n\ndef loop_sum(func, index):\n func = asarray(func)\n index = types.strict[_LoopIndex](index)\n return LoopSum(func, index._name, index.length)\n\ndef _loop_concatenate_data(func, index):\n func = asarray(func)\n index = types.strict[_LoopIndex](index)\n chunk_size = func.shape[-1]\n if chunk_size.isconstant:\n chunk_sizes = InsertAxis(chunk_size, index.length)\n else:\n chunk_sizes = loop_concatenate(InsertAxis(func.shape[-1], 1), index)\n offsets = _SizesToOffsets(chunk_sizes)\n start = Take(offsets, index)\n stop = Take(offsets, index+1)\n return (func, start, stop, *func.shape[:-1], Take(offsets, index.length))\n\ndef loop_concatenate(func, index):\n funcdata = _loop_concatenate_data(func, index)\n return LoopConcatenate(funcdata, index._name, index.length)\n\ndef loop_concatenate_combined(funcs, index):\n unique_funcs = []\n unique_funcs.extend(func for func in funcs if func not in unique_funcs)\n unique_func_data = tuple(_loop_concatenate_data(func, index) for func in unique_funcs)\n loop = LoopConcatenateCombined(unique_func_data, index._name, index.length)\n return tuple(ArrayFromTuple(loop, unique_funcs.index(func), shape, func.dtype) for func, start, stop, *shape in unique_func_data)\n\n@replace\ndef replace_arguments(value, arguments):\n '''Replace :class:`Argument` objects in ``value``.\n\n Replace :class:`Argument` objects in ``value`` according to the ``arguments``\n map, taking into account derivatives to the local coordinates.\n\n Args\n ----\n value : :class:`Array`\n Array to be edited.\n arguments : :class:`collections.abc.Mapping` with :class:`Array`\\\\s as values\n :class:`Argument`\\\\s replacements. The key correspond to the ``name``\n passed to an :class:`Argument` and the value is the replacement.\n\n Returns\n -------\n :class:`Array`\n The edited ``value``.\n '''\n if isinstance(value, Argument) and value._name in arguments:\n v = asarray(arguments[value._name])\n assert equalshape(value.shape, v.shape), (value.shape, v.shape)\n assert value.dtype == v.dtype, (value.dtype, v.dtype)\n return v\n\ndef einsum(fmt, *args, **dims):\n '''Multiply and/or contract arrays via format string.\n\n The format string consists of a comma separated list of axis labels, followed\n by ``->`` and the axis labels of the return value. For example, the following\n swaps the axes of a matrix:\n\n >>> einsum('ij->ji', ones([2,3]))\n nutils.evaluable.Transpose<f:3,2>\n\n Axis labels that do not occur in the return value are summed. For example,\n the following performs a dot product of three matrices:\n\n >>> einsum('ij,jk,kl->il', ones([2,3]), ones([3,4]), ones([4,5]))\n nutils.evaluable.Sum<f:2,5>\n\n In case the dimension of the input and output arrays may vary, a variable\n length axes group can be denoted by a capital. Its length is automatically\n established based on the dimension of the input arrays. The following example\n performs a tensor product of an array and a vector:\n\n >>> einsum('A,i->Ai', ones([2,3,4]), ones([5]))\n nutils.evaluable.Multiply<f:2,3,4,5>\n\n The format string may contain multiple variable length axes groups, but their\n lengths must be resolvable from left to right. In case this is not possible,\n lengths may be specified as keyword arguments.\n\n >>> einsum('AjB,i->AijB', ones([2,3,4]), ones([5]), B=1)\n nutils.evaluable.Multiply<f:2,5,3,4>\n '''\n\n sin, sout = fmt.split('->')\n sin = sin.split(',')\n\n if len(sin) != len(args):\n raise ValueError('number of arguments does not match format string')\n\n if any(len(s) != len(set(s)) for s in (*sin, sout)):\n raise ValueError('internal repetitions are not supported')\n\n if any(n < 0 for n in dims.values()):\n raise ValueError('axis group dimensions cannot be negative')\n\n for c in 'abcdefghijklmnopqrstuvwxyz':\n dims.setdefault(c, 1) # lowercase characters default to single dimension\n\n for s, arg in zip(sin, args):\n missing_dims = arg.ndim - builtins.sum(dims.get(c, 0) for c in s)\n unknown_axes = [c for c in s if c not in dims]\n if len(unknown_axes) == 1 and missing_dims >= 0:\n dims[unknown_axes[0]] = missing_dims\n elif len(unknown_axes) > 1:\n raise ValueError('cannot establish length of variable groups {}'.format(', '.join(unknown_axes)))\n elif missing_dims:\n raise ValueError('argument dimensions are inconsistent with format string')\n\n # expand characters to match argument dimension\n *sin, sout = [[(c, d) for c in s for d in range(dims[c])] for s in (*sin, sout)]\n sall = sout + sorted({c for s in sin for c in s if c not in sout})\n\n shapes = {}\n for s, arg in zip(sin, args):\n assert len(s) == arg.ndim\n for c, sh in zip(s, arg.shape):\n if not equalindex(shapes.setdefault(c, sh), sh):\n raise ValueError('shapes do not match for axis {0[0]}{0[1]}'.format(c))\n\n ret = None\n for s, arg in zip(sin, args):\n index = {c: i for i, c in enumerate(s)}\n for c in sall:\n if c not in index:\n index[c] = arg.ndim\n arg = InsertAxis(arg, shapes[c])\n v = Transpose(arg, [index[c] for c in sall])\n ret = v if ret is None else ret * v\n for i in range(len(sout), len(sall)):\n ret = Sum(ret)\n return ret\n\[email protected]_or_multiple\ndef eval_sparse(funcs: AsEvaluableArray, **arguments: typing.Mapping[str, numpy.ndarray]) -> typing.Tuple[numpy.ndarray, ...]:\n '''Evaluate one or several Array objects as sparse data.\n\n Args\n ----\n funcs : :class:`tuple` of Array objects\n Arrays to be evaluated.\n arguments : :class:`dict` (default: None)\n Optional arguments for function evaluation.\n\n Returns\n -------\n results : :class:`tuple` of sparse data arrays\n '''\n\n funcs = tuple(func.as_evaluable_array.assparse for func in funcs)\n with Tuple(funcs).optimized_for_numpy.session(graphviz=graphviz) as eval:\n return eval(**arguments)\n\n\nif __name__ == '__main__':\n # Diagnostics for the development for simplify operations.\n simplify_priority = (\n Transpose, Ravel, # reinterpretation\n InsertAxis, Inflate, Diagonalize, # size increasing\n Multiply, Add, LoopSum, Sign, Power, Inverse, Unravel, # size preserving\n Product, Determinant, TakeDiag, Take, Sum) # size decreasing\n # The simplify priority defines the preferred order in which operations are\n # performed: shape decreasing operations such as Sum and Take should be done\n # as soon as possible, and shape increasing operations such as Inflate and\n # Diagonalize as late as possible. In shuffling the order of operations the\n # two classes might annihilate each other, for example when a Sum passes\n # through a Diagonalize. Any shape increasing operations that remain should\n # end up at the surface, exposing sparsity by means of the assparse method.\n attrs = ['_'+cls.__name__.lower() for cls in simplify_priority]\n # The simplify operations responsible for swapping (a.o.) are methods named\n # '_add', '_multiply', etc. In order to avoid recursions the operations\n # should only be defined in the direction defined by operator priority. The\n # following code warns gainst violations of this rule and lists permissible\n # simplifications that have not yet been implemented.\n for i, cls in enumerate(simplify_priority):\n warn = [attr for attr in attrs[:i] if getattr(cls, attr) is not getattr(Array, attr)]\n if warn:\n print('[!] {} should not define {}'.format(cls.__name__, ', '.join(warn)))\n missing = [attr for attr in attrs[i+1:] if not getattr(cls, attr) is not getattr(Array, attr)]\n if missing:\n print('[ ] {} could define {}'.format(cls.__name__, ', '.join(missing)))\n\n# vim:sw=2:sts=2:et\n" ]
[ [ "numpy.sum", "numpy.core.multiarray.c_einsum", "numpy.ones", "numpy.multiply", "numpy.intersect1d", "numpy.less", "numpy.diff", "numpy.dtype", "numpy.argsort", "numpy.asarray", "numpy.add", "numpy.ndindex", "numpy.transpose", "numpy.rollaxis", "numpy.cos", "numpy.ndarray", "numpy.unique", "numpy.zeros", "numpy.equal", "numpy.linalg.det", "numpy.repeat", "numpy.arange", "numpy.all", "numpy.power", "numpy.einsum", "numpy.linalg.solve", "numpy.cumsum", "numpy.sign", "numpy.interp", "numpy.empty", "numpy.linalg.inv", "numpy.product", "numpy.array", "numpy.sin", "numpy.concatenate", "numpy.choose" ] ]
zhaoruinan/indy_vision_task_sim
[ "71500c69de53808f8a691d600e56213c1768a9c6" ]
[ "src_ros2/ros2_sim_indy_pybullet/ros2_sim_indy_pybullet/indy7_fixed_cam_test.py" ]
[ "import pybullet as p\nimport time\nimport numpy as np\nobjects = ['apple', 'orange', 'banana', 'milk', 'orange']\np.connect(p.GUI)\np.setGravity(0, 0, -9.8)\n#planeId = p.loadURDF(\"plane.urdf\", [0, 0, 0])\nTableId = p.loadURDF(\"table/table.urdf\", [0.45, 0.35, -0.65])\nindyId= p.loadURDF(\"indy7.urdf\", [0, 0, 0])\nnum_obj = len(objects)\nobj_postions = np.random.rand(num_obj,2)\nz_postion = np.empty(num_obj); z_postion.fill(0.2)\nobj_postions = np.c_[ obj_postions, z_postion ] \nprint(obj_postions)\nfor object in objects:\n obj_path = \"models/urdf/\"+object+\".urdf\"\n objId = p.loadURDF(obj_path, obj_postions[-1,])\n obj_postions = np.delete(obj_postions, -1, 0)\n#appleId = p.loadURDF(\"models/urdf/apple.urdf\", [-0.4, 0, 0.1])\n\n\nviewMatrix = p.computeViewMatrix(\n cameraEyePosition=[0, 0, 3],\n cameraTargetPosition=[0, 0, 0],\n cameraUpVector=[0, 1, 0])\nprojectionMatrix = p.computeProjectionMatrixFOV(\n fov=45.0,\n aspect=1.0,\n nearVal=0.1,\n farVal=3.1)\nwidth, height, rgbImg, depthImg, segImg = p.getCameraImage(\n width=224, \n height=224,\n viewMatrix=viewMatrix,\n projectionMatrix=projectionMatrix)\np.resetBasePositionAndOrientation(indyId, [0, 0, 0.03], [0, 0, 0, 1])\np.setRealTimeSimulation(1) \ntime.sleep(1000)\np.disconnect()" ]
[ [ "numpy.empty", "numpy.random.rand", "numpy.delete" ] ]
RamsteinWR/PneumoniaRSNA1
[ "08bdba51292307a78ef711c6be4a63faea240ddf" ]
[ "models/RelationNetworks/relation_rcnn/core/rcnn.py" ]
[ "\"\"\"\nFast R-CNN:\ndata =\n {'data': [num_images, c, h, w],\n 'rois': [num_rois, 5]}\nlabel =\n {'label': [num_rois],\n 'bbox_target': [num_rois, 4 * num_classes],\n 'bbox_weight': [num_rois, 4 * num_classes]}\nroidb extended format [image_index]\n ['image', 'height', 'width', 'flipped',\n 'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']\n\"\"\"\n\nimport numpy as np\nimport numpy.random as npr\nfrom bbox.bbox_regression import expand_bbox_regression_targets\nfrom bbox.bbox_transform import bbox_overlaps, bbox_transform\nfrom utils.image import get_image, tensor_vstack\n\n\ndef get_rcnn_testbatch(roidb, cfg):\n \"\"\"\n return a dict of testbatch\n :param roidb: ['image', 'flipped'] + ['boxes']\n :return: data, label, im_info\n \"\"\"\n # assert len(roidb) == 1, 'Single batch only'\n imgs, roidb = get_image(roidb, cfg)\n im_array = imgs\n im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]\n\n im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]\n\n if cfg.network.ROIDispatch:\n data = []\n for i in range(len(im_rois)):\n w = im_rois[i][:, 2] - im_rois[i][:, 0] + 1\n h = im_rois[i][:, 3] - im_rois[i][:, 1] + 1\n feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)\n\n rois_0 = im_rois[i][np.where(feat_id == 0)]\n if len(rois_0) == 0:\n rois_0 = np.zeros((1, 4))\n rois_1 = im_rois[i][np.where(feat_id == 1)]\n if len(rois_1) == 0:\n rois_1 = np.zeros((1, 4))\n rois_2 = im_rois[i][np.where(feat_id == 2)]\n if len(rois_2) == 0:\n rois_2 = np.zeros((1, 4))\n rois_3 = im_rois[i][np.where(feat_id == 3)]\n if len(rois_3) == 0:\n rois_3 = np.zeros((1, 4))\n # stack batch index\n data.append({'data': im_array[i],\n 'rois_0': np.hstack((0 * np.ones((rois_0.shape[0], 1)), rois_0)),\n 'rois_1': np.hstack((0 * np.ones((rois_1.shape[0], 1)), rois_1)),\n 'rois_2': np.hstack((0 * np.ones((rois_2.shape[0], 1)), rois_2)),\n 'rois_3': np.hstack((0 * np.ones((rois_3.shape[0], 1)), rois_3))})\n if cfg.TEST.LEARN_NMS:\n data[-1]['im_info'] = im_info[i]\n else:\n rois = im_rois\n rois_array = [np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i])) for i in range(len(rois))]\n\n data = []\n for i in range(len(roidb)):\n data.append({'data': im_array[i],\n 'rois': rois_array[i]})\n if cfg.TEST.LEARN_NMS:\n data[-1]['im_info'] = im_info[i]\n\n label = {}\n\n return data, label, im_info\n\n\ndef get_rcnn_batch(roidb, cfg):\n \"\"\"\n return a dict of multiple images\n :param roidb: a list of dict, whose length controls batch size\n ['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']\n :return: data, label\n \"\"\"\n num_images = len(roidb)\n imgs, roidb = get_image(roidb, cfg)\n im_array = tensor_vstack(imgs)\n\n assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, \\\n 'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)\n\n if cfg.TRAIN.BATCH_ROIS == -1:\n rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])\n fg_rois_per_image = rois_per_image\n else:\n rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES\n fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)\n\n if cfg.network.ROIDispatch:\n rois_array_0 = list()\n rois_array_1 = list()\n rois_array_2 = list()\n rois_array_3 = list()\n else:\n rois_array = list()\n\n gt_labels_array = list()\n labels_array = list()\n bbox_targets_array = list()\n bbox_weights_array = list()\n\n for im_i in range(num_images):\n roi_rec = roidb[im_i]\n\n # infer num_classes from gt_overlaps\n num_classes = roi_rec['gt_overlaps'].shape[1]\n\n # label = class RoI has max overlap with\n rois = roi_rec['boxes']\n labels = roi_rec['max_classes']\n overlaps = roi_rec['max_overlaps']\n bbox_targets = roi_rec['bbox_targets']\n gt_lables = roi_rec['is_gt']\n\n if cfg.TRAIN.BATCH_ROIS == -1:\n im_rois, labels_t, bbox_targets, bbox_weights = \\\n sample_rois_v2(rois, num_classes, cfg, labels=labels, overlaps=overlaps, bbox_targets=bbox_targets,\n gt_boxes=None)\n\n assert np.abs(im_rois - rois).max() < 1e-3\n assert np.abs(labels_t - labels).max() < 1e-3\n else:\n im_rois, labels, bbox_targets, bbox_weights, gt_lables = \\\n sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,\n labels, overlaps, bbox_targets, gt_lables=gt_lables)\n\n # project im_rois\n # do not round roi\n if cfg.network.ROIDispatch:\n w = im_rois[:, 2] - im_rois[:, 0] + 1\n h = im_rois[:, 3] - im_rois[:, 1] + 1\n feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)\n\n rois_0_idx = np.where(feat_id == 0)[0]\n rois_0 = im_rois[rois_0_idx]\n if len(rois_0) == 0:\n rois_0 = np.zeros((1, 4))\n label_0 = -np.ones((1,))\n gt_label_0 = -np.ones((1,))\n bbox_targets_0 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_0 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_0 = labels[rois_0_idx]\n gt_label_0 = gt_lables[rois_0_idx]\n bbox_targets_0 = bbox_targets[rois_0_idx]\n bbox_weights_0 = bbox_weights[rois_0_idx]\n\n rois_1_idx = np.where(feat_id == 1)[0]\n rois_1 = im_rois[rois_1_idx]\n if len(rois_1) == 0:\n rois_1 = np.zeros((1, 4))\n label_1 = -np.ones((1,))\n gt_label_1 = -np.ones((1,))\n bbox_targets_1 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_1 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_1 = labels[rois_1_idx]\n gt_label_1 = gt_lables[rois_1_idx]\n bbox_targets_1 = bbox_targets[rois_1_idx]\n bbox_weights_1 = bbox_weights[rois_1_idx]\n\n rois_2_idx = np.where(feat_id == 2)\n rois_2 = im_rois[rois_2_idx]\n if len(rois_2) == 0:\n rois_2 = np.zeros((1, 4))\n label_2 = -np.ones((1,))\n gt_label_2 = -np.ones((1,))\n bbox_targets_2 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_2 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_2 = labels[rois_2_idx]\n gt_label_2 = gt_lables[rois_2_idx]\n bbox_targets_2 = bbox_targets[rois_2_idx]\n bbox_weights_2 = bbox_weights[rois_2_idx]\n\n rois_3_idx = np.where(feat_id == 3)\n rois_3 = im_rois[rois_3_idx]\n if len(rois_3) == 0:\n rois_3 = np.zeros((1, 4))\n label_3 = -np.ones((1,))\n gt_label_3 = -np.ones((1,))\n bbox_targets_3 = np.zeros((1, bbox_targets.shape[1]))\n bbox_weights_3 = np.zeros((1, bbox_weights.shape[1]))\n else:\n label_3 = labels[rois_3_idx]\n gt_label_3 = gt_lables[rois_3_idx]\n bbox_targets_3 = bbox_targets[rois_3_idx]\n bbox_weights_3 = bbox_weights[rois_3_idx]\n\n # stack batch index\n rois_array_0.append(np.hstack((im_i * np.ones((rois_0.shape[0], 1)), rois_0)))\n rois_array_1.append(np.hstack((im_i * np.ones((rois_1.shape[0], 1)), rois_1)))\n rois_array_2.append(np.hstack((im_i * np.ones((rois_2.shape[0], 1)), rois_2)))\n rois_array_3.append(np.hstack((im_i * np.ones((rois_3.shape[0], 1)), rois_3)))\n\n labels = np.concatenate([label_0, label_1, label_2, label_3], axis=0)\n gt_lables = np.concatenate([gt_label_0, gt_label_1, gt_label_2, gt_label_3], axis=0)\n bbox_targets = np.concatenate([bbox_targets_0, bbox_targets_1, bbox_targets_2, bbox_targets_3], axis=0)\n bbox_weights = np.concatenate([bbox_weights_0, bbox_weights_1, bbox_weights_2, bbox_weights_3], axis=0)\n else:\n rois = im_rois\n batch_index = im_i * np.ones((rois.shape[0], 1))\n rois_array_this_image = np.hstack((batch_index, rois))\n rois_array.append(rois_array_this_image)\n\n # add labels\n gt_labels_array.append(gt_lables)\n labels_array.append(labels)\n bbox_targets_array.append(bbox_targets)\n bbox_weights_array.append(bbox_weights)\n\n gt_labels_array = np.array(gt_labels_array)\n nongt_index_array = np.where(gt_labels_array == 0)[1]\n labels_array = np.array(labels_array)\n bbox_targets_array = np.array(bbox_targets_array)\n bbox_weights_array = np.array(bbox_weights_array)\n\n if cfg.network.USE_NONGT_INDEX:\n\n label = {'label': labels_array,\n 'nongt_index': nongt_index_array,\n 'bbox_target': bbox_targets_array,\n 'bbox_weight': bbox_weights_array}\n\n else:\n label = {'label': labels_array,\n 'bbox_target': bbox_targets_array,\n 'bbox_weight': bbox_weights_array}\n\n if cfg.network.ROIDispatch:\n rois_array_0 = np.array(rois_array_0)\n rois_array_1 = np.array(rois_array_1)\n rois_array_2 = np.array(rois_array_2)\n rois_array_3 = np.array(rois_array_3)\n # rois_concate = np.concatenate((rois_array_0, rois_array_1, rois_array_2, rois_array_3), axis=1)\n # gt_rois_t = rois_concate[:, gt_labels_array[0,:] > 0]\n data = {'data': im_array,\n 'rois_0': rois_array_0,\n 'rois_1': rois_array_1,\n 'rois_2': rois_array_2,\n 'rois_3': rois_array_3}\n else:\n rois_array = np.array(rois_array)\n data = {'data': im_array,\n 'rois': rois_array}\n\n if cfg.TRAIN.LEARN_NMS:\n # im info\n im_info = np.array([roidb[0]['im_info']], dtype=np.float32)\n # gt_boxes\n if roidb[0]['gt_classes'].size > 0:\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)\n gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]\n gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]\n else:\n gt_boxes = np.empty((0, 5), dtype=np.float32)\n data['im_info'] = im_info\n data['gt_boxes'] = gt_boxes\n\n return data, label\n\n\ndef sample_rois_v2(rois, num_classes, cfg,\n labels=None, overlaps=None, bbox_targets=None, gt_boxes=None):\n \"\"\"\n generate random sample of ROIs comprising foreground and background examples\n :param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index\n :param fg_rois_per_image: foreground roi number\n :param rois_per_image: total roi number\n :param num_classes: number of classes\n :param labels: maybe precomputed\n :param overlaps: maybe precomputed (max_overlaps)\n :param bbox_targets: maybe precomputed\n :param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)\n :return: (labels, rois, bbox_targets, bbox_weights)\n \"\"\"\n if labels is None:\n overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))\n gt_assignment = overlaps.argmax(axis=1)\n overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # set labels of bg_rois to be 0\n bg_ind = np.where(overlaps < cfg.TRAIN.BG_THRESH_HI)[0]\n labels[bg_ind] = 0\n\n # load or compute bbox_target\n if bbox_targets is not None:\n bbox_target_data = bbox_targets\n else:\n targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment, :4])\n if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:\n targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))\n / np.array(cfg.TRAIN.BBOX_STDS))\n bbox_target_data = np.hstack((labels[:, np.newaxis], targets))\n\n bbox_targets, bbox_weights = \\\n expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)\n\n return rois, labels, bbox_targets, bbox_weights\n\n\ndef sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,\n labels=None, overlaps=None, bbox_targets=None, gt_boxes=None, gt_lables=None):\n \"\"\"\n generate random sample of ROIs comprising foreground and background examples\n :param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index\n :param fg_rois_per_image: foreground roi number\n :param rois_per_image: total roi number\n :param num_classes: number of classes\n :param labels: maybe precomputed\n :param overlaps: maybe precomputed (max_overlaps)\n :param bbox_targets: maybe precomputed\n :param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)\n :return: (labels, rois, bbox_targets, bbox_weights)\n \"\"\"\n if labels is None:\n overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))\n gt_assignment = overlaps.argmax(axis=1)\n overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # foreground RoI with FG_THRESH overlap\n fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)\n # Sample foreground regions without replacement\n if len(fg_indexes) > fg_rois_per_this_image:\n fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)\n\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n # Compute number of background RoIs to take from this image (guarding against there being fewer than desired)\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\n bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)\n # Sample foreground regions without replacement\n if len(bg_indexes) > bg_rois_per_this_image:\n bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)\n\n # indexes selected\n keep_indexes = np.append(fg_indexes, bg_indexes)\n\n # pad more to ensure a fixed minibatch size\n while keep_indexes.shape[0] < rois_per_image:\n gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])\n gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)\n keep_indexes = np.append(keep_indexes, gap_indexes)\n\n # select gt_labels\n gt_lables = gt_lables[keep_indexes]\n # select labels\n labels = labels[keep_indexes]\n # set labels of bg_rois to be 0\n bg_ind = np.where(overlaps[keep_indexes] < cfg.TRAIN.BG_THRESH_HI)[0]\n labels[bg_ind] = 0\n rois = rois[keep_indexes]\n\n # load or compute bbox_target\n if bbox_targets is not None:\n bbox_target_data = bbox_targets[keep_indexes, :]\n else:\n targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])\n if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:\n targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))\n / np.array(cfg.TRAIN.BBOX_STDS))\n bbox_target_data = np.hstack((labels[:, np.newaxis], targets))\n\n bbox_targets, bbox_weights = \\\n expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)\n\n return rois, labels, bbox_targets, bbox_weights, gt_lables\n" ]
[ [ "numpy.sqrt", "numpy.sum", "numpy.ones", "numpy.append", "numpy.empty", "numpy.zeros", "numpy.concatenate", "numpy.abs", "numpy.random.choice", "numpy.hstack", "numpy.array", "numpy.where", "numpy.round", "numpy.minimum" ] ]
ezg/PanoramicDataWin8
[ "229e9ab64cda30a0bd1c6d39a70754ba4651ad43" ]
[ "backend/binrange.py" ]
[ "#!/usr/bin/python\nimport json\nimport numpy as np\nimport pandas as pd\nimport math\n\nclass BinRange():\n def __init__(self, dataMinValue, dataMaxValue, targetBinNumber):\n self.dataMinValue = float(dataMinValue)\n self.dataMaxValue = float(dataMaxValue)\n self.targetBinNumber = float(targetBinNumber)\n self.maxValue = 0\n self.minValue = 0\n \n def getIndex(self, value):\n raise NotImplementedError()\n \n def addStep(self, value):\n raise NotImplementedError()\n \n def getLabel(self, value):\n return str(value)\n \n def getBins(self):\n raise NotImplementedError()\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, dimension):\n raise NotImplementedError()\n\n def getLabels(self):\n labels = []\n for b in self.getBins():\n labels.append((bin, bin, self.addStep(bin), self.getLabel(bin)))\n return labels\n\nclass AggregatedBinRange(BinRange): \n \n def __init__(self):\n BinRange.__init__(self, 0, 0, 0)\n self.type = 'AggregatedBinRange'\n \n @staticmethod\n def initialize():\n scale = AggregatedBinRange()\n return scale\n \n def getIndex(self, value):\n return 0 \n \n def addStep(self, value):\n return value + 1\n \n def getBins(self):\n scale = [0]\n return scale\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, dimension):\n return AggregatedBinRange()\n\nclass NominalBinRange(BinRange): \n def __init__(self):\n BinRange.__init__(self, 0, 0, 0)\n self.labelsValue = {} #string, index\n self.valuesLabel = {} #index, string\n self.type = 'NominalBinRange'\n \n @staticmethod\n def initialize(df, val):\n uniqueValues = df[val].unique()\n \n scale = NominalBinRange()\n for u in uniqueValues:\n if not u in scale.labelsValue:\n index = len(scale.labelsValue.keys())\n scale.labelsValue[u] = index\n scale.valuesLabel[index] = u\n return scale\n \n def getIndexFromValue(self, value):\n return self.labelsValue[value] \n \n def getIndex(self, value):\n return value\n \n def addStep(self, value):\n return value\n \n def getLabel(self, value):\n return self.valuesLabel[value] \n \n def getBins(self):\n scale = []\n for idx, label in enumerate(self.labelsValue):\n scale.append(idx)\n return scale\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, val):\n newRange = NominalBinRange()\n newRange.labelsValue = self.labelsValue\n newRange.valuesLabel = self.valuesLabel\n \n uniqueValues = df[val].unique()\n \n for u in uniqueValues:\n if not u in newRange.labelsValue:\n index = len(newRange.labelsValue.keys())\n newRange.labelsValue[u] = index\n newRange.valuesLabel[index] = u\n return newRange\n \nclass QuantitativeBinRange(BinRange): \n def __init__(self, dataMinValue, dataMaxValue, targetBinNumber, isIntegerRange):\n BinRange.__init__(self, dataMinValue, dataMaxValue, targetBinNumber)\n self.isIntegerRange = isIntegerRange\n self.step = 0\n self.type = 'QuantitativeBinRange'\n \n @staticmethod\n def initialize(dataMinValue, dataMaxValue, targetBinNumber, isIntegerRange):\n scale = QuantitativeBinRange(dataMinValue, dataMaxValue, targetBinNumber, isIntegerRange)\n extent = scale.__getExtent(scale.dataMinValue, scale.dataMaxValue, scale.targetBinNumber)\n scale.minValue = extent[0]\n scale.maxValue = extent[1]\n scale.step = extent[2]\n return scale\n \n def getIndex(self, value):\n return int(math.floor(round((value - self.minValue) / self.step, 8))) \n \n def addStep(self, value):\n return value + self.step\n \n def getBins(self):\n scale = []\n idx = 0\n for v in np.arange(self.minValue, self.maxValue, self.step):\n scale.append(v)\n idx += 1\n return scale\n \n def getUpdatedBinRange(self, dataMin, dataMax, df, dimension):\n newMin = self.minValue\n newMax = self.maxValue\n\n if dataMin < self.minValue:\n while dataMin < newMin:\n newMin -= self.step\n \n if dataMax >= self.maxValue:\n while dataMax >= newMax:\n newMax += self.step\n\n multiplier = int(len(self.getBins()) / self.targetBinNumber);\n newStep = self.step\n if multiplier > 1:\n pass\n #newStep = Step * (double)multiplier\n\n newRange = QuantitativeBinRange(dataMin, dataMax, self.targetBinNumber, self.isIntegerRange)\n \n newRange.minValue = newMin\n newRange.maxValue = newMax\n newRange.dataMinValue = min(dataMin, self.dataMinValue)\n newRange.dataMaxValue = min(dataMax, self.dataMaxValue)\n newRange.step = self.step\n return newRange\n \n def __getExtent(self, dataMin, dataMax, m):\n if (dataMin == dataMax):\n dataMax += 0.1\n \n span = dataMax - dataMin\n\n step = math.pow(10, math.floor(math.log10(span / m)))\n err = m / span * step\n\n if (err <= .15):\n step *= 10\n elif (err <= .35):\n step *= 5\n elif (err <= .75):\n step *= 2\n\n if (self.isIntegerRange):\n step = math.ceil(step)\n \n ret = [0,0,0]\n ret[0] = (math.floor(round(dataMin, 8) / step) * step)\n ret[1] = (math.floor(round(dataMax, 8) / step) * step + step)\n ret[2] = step\n\n return ret\n \n" ]
[ [ "numpy.arange" ] ]
banjtheman/odsc_nlp_workshop
[ "6562938fff0e9e50d4db8feed5552eaaa7a7f1f6" ]
[ "module_5/helpful_flow.py" ]
[ "# Python imports\nimport logging\nimport os\n\n# Project imports\nimport utils as helpful_funcs\n\n# 3rd party imports\nfrom metaflow import FlowSpec, Parameter, step, card\nimport numpy as np\n\n# How to run\n# python helpful_flow.py run --output_dir test_run\n\n\nclass HelpfulFlow(FlowSpec):\n \"\"\"\n This flow will run the Helpful pipeline\n \"\"\"\n\n output_dir = Parameter(\n \"output_dir\",\n default=\"test_run\",\n help=\"Location of output files\",\n required=True,\n )\n\n # The helpful training data\n train_data = \"https://helpful-sentences-from-reviews.s3.amazonaws.com/train.json\"\n test_data = \"https://helpful-sentences-from-reviews.s3.amazonaws.com/test.json\"\n\n @card\n @step\n def start(self):\n \"\"\"\n This is the 'start' step. All flows must have a step named 'start' that\n is the first step in the flow. We will download the data\n \"\"\"\n\n # Make output dir\n cmd = f\"mkdir -p {self.output_dir}\"\n os.system(cmd)\n\n # Get raw data\n self.raw_data_train = helpful_funcs.get_data(self.train_data)\n self.raw_data_test = helpful_funcs.get_data(self.test_data)\n self.next(self.prepare_data)\n\n @card\n @step\n def prepare_data(self):\n\n \"\"\"\n prepare data\n \"\"\"\n # Transfrom raw data to a dataframe\n self.df_train = helpful_funcs.data_to_df(self.raw_data_train)\n self.df_test = helpful_funcs.data_to_df(self.raw_data_test)\n\n # save df to output folder\n self.df_train.to_csv(\n f\"{self.output_dir}/helpful_sentences_train.csv\", index=False\n )\n self.df_test.to_csv(\n f\"{self.output_dir}/helpful_sentences_test.csv\", index=False\n )\n\n # We can call N functions to run in parallel\n self.next(self.vader_run, self.fasttext_start, self.huggingface_split)\n\n @card\n @step\n def vader_run(self):\n\n \"\"\"\n Run vader on data\n \"\"\"\n # Transfrom raw data to a dataframe\n self.results = helpful_funcs.test_vader(self.df_test)\n self.run_name = \"vader\"\n\n self.next(self.join)\n\n @card\n @step\n def fasttext_start(self):\n\n \"\"\"\n Convert data to fasttext format\n \"\"\"\n\n helpful_funcs.convert_csv_to_fast_text_doc(self.df_train, self.output_dir)\n self.next(self.fasttext_train)\n\n @card\n @step\n def fasttext_train(self):\n\n \"\"\"\n Train fasttext model\n \"\"\"\n\n # Note the fasttext_model cant be saved by metaflow, so we just eval here\n fasttext_model = helpful_funcs.train_fasttext_model(self.output_dir)\n self.results = helpful_funcs.test_fasttext(self.df_test, fasttext_model)\n self.run_name = \"fasttext\"\n\n self.next(self.join)\n\n @card\n @step\n def huggingface_split(self):\n\n \"\"\"\n Split data into 5\n \"\"\"\n # TODO we can prob split based on max workers\n self.helpful_list = np.array_split(self.df_test, 5)\n\n self.next(self.huggingface_predict, foreach=\"helpful_list\")\n\n @card\n @step\n def huggingface_predict(self):\n\n \"\"\"\n Predict with huggingface model\n \"\"\"\n\n self.run_name = \"huggingface_\"\n self.results = helpful_funcs.run_hugging_face(self.input)\n\n self.next(self.huggingface_join)\n\n @card\n @step\n def huggingface_join(self, inputs):\n \"\"\"\n Combine huggingface scores\n \"\"\"\n\n self.results = [input.results for input in inputs]\n self.run_names = [input.run_name for input in inputs]\n\n print(\"Huggingface Results\")\n print(self.results)\n print(self.run_names)\n\n sent_scores = {}\n sent_scores[\"pos_match\"] = 0\n sent_scores[\"neg_match\"] = 0\n sent_scores[\"miss\"] = 0\n sent_scores[\"model\"] = \"huggingface\"\n\n for index, result in enumerate(self.results):\n\n sent_scores[\"pos_match\"] += result[\"pos_match\"]\n sent_scores[\"neg_match\"] = result[\"neg_match\"]\n sent_scores[\"miss\"] = result[\"miss\"]\n\n num_sents = (\n sent_scores[\"pos_match\"] + sent_scores[\"neg_match\"] + sent_scores[\"miss\"]\n )\n missed_percent = sent_scores[\"miss\"] / num_sents\n correct_percent = 1 - missed_percent\n sent_scores[\"missed_percent\"] = missed_percent\n sent_scores[\"correct_percent\"] = correct_percent\n\n self.run_name = \"huggingface\"\n self.results = sent_scores\n\n self.next(self.join)\n\n @card\n @step\n def join(self, inputs):\n \"\"\"\n Save data artifacts from the runs\n \"\"\"\n\n self.results = [input.results for input in inputs]\n self.run_names = [input.run_name for input in inputs]\n\n print(\"Final Results\")\n print(self.results)\n print(self.run_names)\n\n for index, result in enumerate(self.results):\n\n curr_name = self.run_names[index]\n\n # save outputs\n helpful_funcs.save_json(\n f\"{self.output_dir}/{curr_name}_results.json\", result\n )\n\n self.next(self.end)\n\n @card\n @step\n def end(self):\n \"\"\"\n This is the 'end' step. All flows must have an 'end' step, which is the\n last step in the flow. It will print a \"Done and Done\"\n \"\"\"\n\n logging.info(\"Done and Done\")\n\n\nif __name__ == \"__main__\":\n loglevel = logging.INFO\n logging.basicConfig(\n format=\"%(asctime)s |%(levelname)s: %(message)s\", level=loglevel\n )\n HelpfulFlow()\n" ]
[ [ "numpy.array_split" ] ]
kingjuno/devolearn
[ "555c8c55441a4f0b9ed8801c37d07c45b03ec774" ]
[ "devolearn/cell_membrane_segmentor/cell_membrane_segmentor.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision.transforms import ToTensor\nfrom torchvision.transforms import ToPILImage\n\nimport os\nimport cv2\nimport wget\nimport imutils\nfrom tqdm import tqdm, tqdm_notebook\nfrom PIL import Image\nimport numpy as np\nfrom collections import deque\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport segmentation_models_pytorch as smp\nimport warnings\nwarnings.filterwarnings(\"ignore\") \n\nfrom ..base_inference_engine import InferenceEngine\n\n\"\"\"\n3d segmentation model for C elegans embryo\n\"\"\"\n\ndef generate_centroid_image(thresh):\n \"\"\"Used when centroid_mode is set to True\n \n Args:\n thresh (np.array): 2d numpy array that is returned from the segmentation model\n\n Returns:\n np.array : image containing the contours and their respective centroids \n list : list of all centroids for the given image as [(x1,y1), (x2,y2)...]\n \"\"\"\n\n thresh = cv2.blur(thresh, (5,5))\n thresh = thresh.astype(np.uint8)\n centroid_image = np.zeros(thresh.shape)\n cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n centroids = []\n for c in cnts:\n try:\n # compute the center of the contour\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n # draw the contour and center of the shape on the image\n cv2.drawContours(centroid_image, [c], -1, (255, 255, 255), 2)\n cv2.circle(centroid_image, (cX, cY), 2, (255, 255, 255), -1)\n centroids.append((cX, cY))\n except:\n pass\n\n return centroid_image, centroids\n\nclass cell_membrane_segmentor(InferenceEngine):\n def __init__(self, device = \"cpu\"):\n \"\"\"Segments the c. elegans embryo from images/videos, \n depends on segmentation-models-pytorch for the model backbone\n\n Args:\n device (str, optional): set to \"cuda\", runs operations on gpu and set to \"cpu\", runs operations on cpu. Defaults to \"cpu\".\n \"\"\"\n \n self.device = device\n self.ENCODER = 'resnet18'\n self.ENCODER_WEIGHTS = 'imagenet'\n self.CLASSES = [\"nucleus\"]\n self.ACTIVATION = 'sigmoid'\n self.in_channels = 1\n self.model_url = \"https://github.com/DevoLearn/devolearn/raw/master/devolearn/cell_membrane_segmentor/cell_membrane_segmentation_model.pth\"\n self.model_name = \"cell_membrane_segmentation_model.pth\"\n self.model_dir = os.path.dirname(__file__)\n # print(\"at : \", os.path.dirname(__file__))\n\n self.model = smp.FPN(\n encoder_name= self.ENCODER, \n encoder_weights= self.ENCODER_WEIGHTS, \n classes=len(self.CLASSES), \n activation= self.ACTIVATION,\n in_channels = self.in_channels \n )\n\n\n self.download_checkpoint()\n self.model.to(self.device)\n self.model.eval()\n\n self.mini_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((256,256), interpolation = Image.NEAREST),\n transforms.ToTensor(),\n ])\n\n\n def download_checkpoint(self):\n try:\n # print(\"model already downloaded, loading model...\")\n self.model = torch.load(self.model_dir + \"/\" + self.model_name, map_location= self.device) \n except:\n print(\"model not found, downloading from:\", self.model_url)\n if os.path.isdir(self.model_dir) == False:\n os.mkdir(self.model_dir)\n filename = wget.download(self.model_url, out= self.model_dir)\n # print(filename)\n self.model = torch.load(self.model_dir + \"/\" + self.model_name, map_location= self.device) \n\n def preprocess(self, image_grayscale_numpy):\n\n tensor = self.mini_transform(image_grayscale_numpy).unsqueeze(0).to(self.device)\n return tensor\n\n def predict(self, image_path, pred_size = (350,250), centroid_mode = False):\n \"\"\"Loads an image from image_path and converts it to grayscale, \n then passes it through the model and returns centroids of the segmented features.\n reference{\n https://github.com/DevoLearn/devolearn#segmenting-the-c-elegans-embryo\n }\n\n Args:\n image_path (str): path to image\n pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).\n centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.\n\n Returns:\n centroid_mode set to False:\n np.array : 1 channel image.\n centroid_mode set to True:\n np.array : 1 channel image,\n list : list of centroids.\n \"\"\"\n\n im = cv2.imread(image_path,0)\n\n tensor = self.preprocess(im)\n res = self.model(tensor).detach().cpu().numpy()[0][0]\n \n res = cv2.resize(res,pred_size)\n if centroid_mode == False:\n return res\n else:\n centroid_image, centroids = generate_centroid_image(res)\n return centroid_image, centroids\n \n\n def predict_from_video(self, video_path, pred_size = (350,250), save_folder = \"preds\", centroid_mode = False, notebook_mode = False):\n \"\"\"Splits a video from video_path into frames and passes the \n frames through the model for predictions. Saves predicted images in save_folder.\n And optionally saves all the centroid predictions into a pandas.DataFrame. \n\n Args:\n video_path (str): path to the video file.\n pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).\n save_folder (str, optional): path to folder to be saved in. Defaults to \"preds\".\n centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.\n notebook_mode (bool, optional): toogle between script(False) and notebook(True), for better user interface. Defaults to False.\n\n Returns:\n centroid_mode set to True:\n pd.DataFrame : containing file name and their centriods\n centroid_mode set to False:\n list : list containing the names of the entries in the save_folder directory \n \"\"\"\n \n vidObj = cv2.VideoCapture(video_path) \n success = 1\n images = deque()\n count = 0\n\n if centroid_mode == True:\n filenames_centroids = []\n\n while success: \n success, image = vidObj.read() \n \n try:\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n images.append(image)\n \n except:\n # print(\"skipped possible corrupt frame number : \", count)\n pass\n count += 1 \n \n if os.path.isdir(save_folder) == False:\n os.mkdir(save_folder)\n\n if notebook_mode == True:\n for i in tqdm_notebook(range(len(images)), desc = \"saving predictions: \"): \n save_name = save_folder + \"/\" + str(i) + \".jpg\"\n tensor = self.mini_transform(images[i]).unsqueeze(0).to(self.device)\n res = self.model(tensor).detach().cpu().numpy()[0][0]\n\n if centroid_mode == True:\n res, centroids = generate_centroid_image(res)\n filenames_centroids.append([save_name, centroids])\n\n res = cv2.resize(res,pred_size)\n cv2.imwrite(save_name, res*255)\n else :\n for i in tqdm(range(len(images)), desc = \"saving predictions: \"):\n save_name = save_folder + \"/\" + str(i) + \".jpg\"\n tensor = self.mini_transform(images[i]).unsqueeze(0).to(self.device)\n res = self.model(tensor).detach().cpu().numpy()[0][0]\n\n if centroid_mode == True:\n res, centroids = generate_centroid_image(res)\n filenames_centroids.append([save_name, centroids])\n\n res = cv2.resize(res,pred_size)\n cv2.imwrite(save_name, res*255)\n\n if centroid_mode == True:\n df = pd.DataFrame(filenames_centroids, columns = [\"filenames\", \"centroids\"])\n return df\n else:\n return os.listdir(save_folder)\n" ]
[ [ "pandas.DataFrame", "torch.load", "numpy.zeros" ] ]
avi2412/nlp-dl-prework
[ "902d77344c351954e370a4aacf5a427db68cfad9" ]
[ "Lego-Collector's-Dilemma/code.py" ]
[ "# --------------\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\n# code starts here\n\ndata = pd.read_csv(path)\ndf = pd.DataFrame(data)\n#print(df.iloc[0:5])\n\nX = df.drop(['list_price'], axis = 1)\ny = df.iloc[:, 1]\n\nX_train, X_test, y_train, y_test = train_test_split (X, y, test_size = 0.3, random_state = 6)\n\n# code ends here\n\n\n\n# --------------\nimport matplotlib.pyplot as plt\n\n# code starts here \ncols = X_train.columns\nfig, axes = plt.subplots(3,3)\n\nfor i in range(0,3):\n for j in range(0,3):\n col = cols[i*3 + j]\n axes[i,j].scatter(X_train[col],y_train)\n axes[i,j].set_title(col)\n\nplt.show()\n# code ends here\n\n\n\n# --------------\n# Code starts here\ncorr =X_train.corr()\n\nX_train.drop(columns = ['play_star_rating','val_star_rating'], inplace = True)\nX_test.drop(columns = ['play_star_rating','val_star_rating'], inplace =True)\n\n# Code ends here\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# Code starts here\n\nregressor = LinearRegression()\nregressor.fit(X_train,y_train)\ny_pred = regressor.predict(X_test)\n\nmse = mean_squared_error(y_test,y_pred)\nr2 = r2_score(y_test,y_pred)\nprint(r2)\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\nresidual = y_test-y_pred\nresidual.hist()\n\n\n\n# Code ends here\n\n\n" ]
[ [ "sklearn.metrics.mean_squared_error", "pandas.read_csv", "pandas.DataFrame", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.subplots", "sklearn.cross_validation.train_test_split", "matplotlib.pyplot.show", "sklearn.metrics.r2_score" ] ]
ibrahim-sheriff/Deploying-a-ML-Model-on-Heroku-with-FastAPI
[ "483c563d0e3838580f5cd643c70db6a47e1c1219" ]
[ "src/tests/conftest.py" ]
[ "\"\"\"\r\nAuthor: Ibrahim Sherif\r\nDate: October, 2021\r\nThis script holds the conftest data used with pytest module\r\n\"\"\"\r\nimport os\r\nimport pytest\r\nimport pandas as pd\r\nimport great_expectations as ge\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport config\r\nfrom pipeline.data import get_clean_data\r\n\r\n\r\[email protected](scope='session')\r\ndef data():\r\n \"\"\"\r\n Data loaded from csv file used for tests\r\n\r\n Returns:\r\n df (ge.DataFrame): Data loaded from csv file\r\n \"\"\"\r\n if not os.path.exists(config.DATA_DIR):\r\n pytest.fail(f\"Data not found at path: {config.DATA_DIR}\")\r\n\r\n X_df, y_df = get_clean_data(config.DATA_DIR)\r\n X_df['salary'] = y_df\r\n X_df['salary'] = X_df['salary'].map({1: '>50k', 0: '<=50k'})\r\n\r\n df = ge.from_pandas(X_df)\r\n\r\n return df\r\n\r\n\r\[email protected](scope='session')\r\ndef sample_data():\r\n \"\"\"\r\n Sampled data from csv file used for tests\r\n\r\n Returns:\r\n X_train: Features train data\r\n X_test: Features test data\r\n y_train: Labels train data\r\n y_test: Labels test data\r\n \"\"\"\r\n if not os.path.exists(config.DATA_DIR):\r\n pytest.fail(f\"Data not found at path: {config.DATA_DIR}\")\r\n\r\n data_df = pd.read_csv(config.DATA_DIR, nrows=10)\r\n\r\n # chaning column names to use _ instead of -\r\n columns = data_df.columns\r\n columns = [col.replace('-', '_') for col in columns]\r\n data_df.columns = columns\r\n\r\n # make all characters to be lowercase in string columns\r\n data_df = data_df.applymap(\r\n lambda s: s.lower() if isinstance(s, str) else s)\r\n\r\n data_df['salary'] = data_df['salary'].map({'>50k': 1, '<=50k': 0})\r\n\r\n y_df = data_df.pop('salary')\r\n X_df = data_df\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(\r\n X_df, y_df, test_size=0.3, random_state=config.RANDOM_STATE, stratify=y_df)\r\n\r\n return X_train, X_test, y_train, y_test\r\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split" ] ]
Simmons-Wang/easy-backtrack-tweets
[ "d5c7912e06376f63800e76af658a79b87129dc92" ]
[ "tweetBacktrack.py" ]
[ "import datetime\nimport pickle\nimport tweepy as tp\nimport pandas as pd\nimport time\n\n\ndef lookUpDetail(ids):\n \"\"\"\n :param ids: the list of tweets ids, the maximum length is 100 at a time.\n :return: dataframe which include 'tweet_id', 'favorite_count', 'retweet_count', 'lang',\n 'hashtags', 'url', 'user_id'\n \"\"\"\n statuses = api.lookup_statuses(ids)\n details = [[i.id, i.favorite_count, i.retweet_count, i.lang,\n i.entities['hashtags'], i.entities['urls'],\n i.author.id] for i in statuses]\n df = pd.DataFrame(details, columns=['tweet_id', 'favorite_count', 'retweet_count', 'lang',\n 'hashtags', 'urls', 'user_id'])\n\n df.hashtags = df.hashtags.apply(lambda x: [i['text'] for i in x] if x else [])\n df.urls = df.urls.apply(lambda x: x[0]['url'] if x else None)\n return df\n\n\ndef get_following(my_name):\n user1 = api.get_friends(screen_name=my_name, cursor=-1, count=200) # 200 is the limit\n user = user1[0]\n while user1[0]:\n user1 = api.get_friends(screen_name=my_name, cursor=user1[1][1], count=200)\n user = user + user1[0]\n time.sleep(2)\n friendsScreenName = [i.screen_name for i in user] # change this line to collect other attribute of friends\n return friendsScreenName\n\n\ndef get_history(f, start_time):\n tws = api.user_timeline(screen_name=f, count=200) # one crawl limit is 200\n userTws = tws.copy()\n while tws and (tws[-1].created_at > start_time):\n tws = api.user_timeline(screen_name=f, count=200, max_id=tws.max_id)\n userTws = userTws + tws\n details = [[i.created_at, i.id, i.text, i.favorite_count, i.retweet_count, i.lang,\n i.entities['hashtags'], i.entities['urls'],\n i.author.id, f] for i in userTws]\n return details\n\n\nif __name__ == '__main__':\n # replace the following attributions with yours\n CONSUMER_KEY = \"\"\n CONSUMER_SECRET = \"\"\n ACCESS_TOKEN = \"\"\n ACCESS_TOKEN_SECRET = \"\"\n\n auth = tp.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tp.API(auth)\n\n test_ids = [\n 1481194503650549761,\n 1481194425170956292,\n 1480951940389371914,\n 1480942056365252610,\n 1480888363011903491,\n 1480886828072718337,\n 1480848873627086849,\n 1480844751880351745,\n 1480823233267920897]\n\n test_result1 = lookUpDetail(test_ids)\n test_result2 = get_following('') # replace it with your name\n test_result3 = get_history('') # replace it with your name\n" ]
[ [ "pandas.DataFrame" ] ]
jezsadler/summit
[ "982de7f6424bb94da2084d4d84396b4b2673eeca" ]
[ "summit/benchmarks/experiment_emulator/bnn_emulator.py" ]
[ "import os\nimport os.path as osp\n\nimport numpy as np\n\nfrom summit.benchmarks.experiment_emulator.emulator import Emulator\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom blitz.modules import BayesianLinear\nfrom blitz.utils import variational_estimator\n\nfrom sklearn.metrics import r2_score\n\n# =======================================================================\n\n\nclass BNNEmulator(Emulator):\n \"\"\"BNN Emulator\n\n A Bayesian Neural Network (BNN) emulator.\n\n Parameters\n ---------\n domain: summit.domain.Domain\n The domain of the experiment\n dataset: class:~summit.utils.dataset.DataSet, optional\n A DataSet with data for training where the data columns correspond to the domain and the data rows correspond to the training points.\n By default: None\n model_name: string, optional\n Name of the model that is used for saving model parameters. Should be unique.\n By default: \"dataset_emulator_model_name\"\n \"\"\"\n\n # =======================================================================\n\n def __init__(self, domain, dataset, model_name, kwargs={}):\n super().__init__(domain, dataset, model_name, kwargs)\n self._model = self._setup_model()\n\n # Set model name for saving\n self.save_path = kwargs.get(\n \"save_path\",\n osp.join(osp.dirname(osp.realpath(__file__)), \"trained_models/BNN\"),\n )\n\n # Set up training hyperparameters\n self.set_training_hyperparameters()\n\n # =======================================================================\n\n def _setup_model(self, **kwargs):\n \"\"\" Setup the BNN model \"\"\"\n\n @variational_estimator\n class BayesianRegressor(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n\n self.blinear1 = BayesianLinear(input_dim, 24)\n self.blinear2 = BayesianLinear(24, 24)\n self.blinear3 = BayesianLinear(24, 24)\n self.blinear4 = BayesianLinear(24, 1)\n\n def forward(self, x):\n x = F.leaky_relu(self.blinear1(x))\n x = F.leaky_relu(self.blinear2(x))\n x = F.dropout(x, p=0.1, training=self.training)\n x = F.leaky_relu(self.blinear3(x))\n x = F.dropout(x, p=0.1, training=self.training)\n x = F.relu(self.blinear4(x))\n y = x\n return y.view(-1)\n\n # Training of model on given dataloader\n def _train(self, regressor, device, optimizer, criterion, X_train, loader):\n regressor.train()\n\n for i, (datapoints, labels) in enumerate(loader):\n optimizer.zero_grad()\n loss = regressor.sample_elbo(\n inputs=datapoints.to(device),\n labels=labels.to(device),\n criterion=criterion,\n sample_nbr=3,\n complexity_cost_weight=1 / X_train.shape[0],\n )\n loss.backward()\n optimizer.step()\n\n # Evaluate model for given dataloader\n def _evaluate_regression(\n self,\n regressor,\n device,\n loader,\n fun_untransform_data,\n out_transform,\n get_predictions=False,\n ):\n regressor.eval()\n regressor.freeze_()\n\n mae = 0\n pred_data = []\n real_data = []\n for i, (datapoints, labels) in enumerate(loader):\n data = datapoints.to(device)\n pred = regressor(data)\n tmp_pred_data = fun_untransform_data(\n data=pred, reduce=out_transform[0], divide=out_transform[1]\n )\n tmp_real_data = fun_untransform_data(\n data=labels, reduce=out_transform[0], divide=out_transform[1]\n )\n mae += (tmp_pred_data - tmp_real_data).abs().sum(0).item()\n\n if get_predictions:\n pred_data.extend(tmp_pred_data.tolist())\n real_data.extend(tmp_real_data.tolist())\n\n if get_predictions:\n return pred_data, real_data\n\n regressor.unfreeze_()\n\n return mae / len(loader.dataset)\n\n regression_model = BayesianRegressor(self.input_dim)\n return regression_model\n\n # =======================================================================\n\n def set_training_hyperparameters(self, kwargs={}):\n # Setter method for hyperparameters of training\n self.epochs = kwargs.get(\n \"epochs\", 300\n ) # number of max epochs the model is trained\n self.initial_lr = kwargs.get(\"initial_lr\", 0.001) # initial learning rate\n self.min_lr = kwargs.get(\"min_lr\", 0.00001)\n self.lr_decay = kwargs.get(\"lr_decay\", 0.7) # learning rate decay\n self.lr_decay_patience = kwargs.get(\n \"lr_decay_patience\", 3\n ) # number of epochs before learning rate is reduced by lr_decay\n self.early_stopping_epochs = kwargs.get(\n \"early_stopping_epochs\", 30\n ) # number of epochs before early stopping\n self.batch_size_train = kwargs.get(\"batch_size_train\", 4)\n self.transform_input = kwargs.get(\"transform_input\", \"standardize\")\n self.transform_output = kwargs.get(\"transform_output\", \"standardize\")\n self.test_size = kwargs.get(\"test_size\", 0.1)\n self.shuffle = kwargs.get(\"shuffle\", False)\n\n # =======================================================================\n\n def train_model(self, dataset=None, verbose=True, kwargs={}):\n # Manual call of training -> overwrite dataset with new dataset for training\n if dataset is not None:\n self._dataset = dataset\n\n # #<cv_fold>-fold cross-validation\n cv_fold = kwargs.get(\"cv_fold\", 10)\n\n # Data preprocess\n train_dataset, test_dataset = self._data_preprocess(\n transform_input=self.transform_input,\n transform_output=self.transform_output,\n test_size=self.test_size,\n shuffle=self.shuffle,\n )\n\n X_train_init, y_train_init = (\n torch.tensor(train_dataset[0]).float(),\n torch.tensor(train_dataset[1]).float(),\n )\n X_test, y_test = (\n torch.tensor(test_dataset[0]).float(),\n torch.tensor(test_dataset[1]).float(),\n )\n\n shuffle_train = kwargs.get(\"shuffle_train\", False)\n if shuffle_train:\n perm = torch.randperm(len(y_train_init))\n train_data = torch.cat([X_train_init, y_train_init], axis=1)[perm]\n X_train, y_train = (\n train_data[:, : -self.output_dim],\n train_data[:, -self.output_dim :],\n )\n else:\n X_train, y_train = X_train_init, y_train_init\n\n if verbose:\n print(\"\\n<---- Start training of BNN model ---->\")\n print(\" --- Length of train dataset: {} ---\".format(X_train.shape[0]))\n print(\" --- Length of test dataset: {} ---\".format(X_test.shape[0]))\n for i, k in enumerate(self.output_models):\n if verbose:\n print(\n \"\\n <-- Start {}-fold cross-validation training of BNN regressor on objective: {} -->\\n\".format(\n cv_fold, k\n )\n )\n\n train_acc, val_acc, test_acc = [], [], []\n y_train_pred_l, y_train_real_l, y_test_pred_l, y_test_real_l = (\n [],\n [],\n [],\n [],\n )\n for j in range(cv_fold):\n if verbose:\n print(\" ---------------- Split {} ----------------\".format(j + 1))\n\n # Set training details\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n regressor = self._setup_model().to(device)\n optimizer = optim.Adam(regressor.parameters(), lr=self.initial_lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n factor=self.lr_decay,\n patience=self.lr_decay_patience,\n min_lr=self.min_lr,\n )\n criterion = torch.nn.MSELoss()\n model_save_name = (\n self.model_name + \"_\" + str(k) + \"_\" + str(j + 1) + \"_BNN_model.pt\"\n )\n model_save_dir = osp.join(self.save_path, model_save_name)\n storable = self._check_file_path(model_save_dir)\n if not storable:\n self.output_models[k] = self._load_model(self.model_name)[k]\n continue\n\n # Setup train and val dataset for cross-validation\n if cv_fold <= 1:\n raise ValueError(\n \"{}-fold Cross-Validation not possible. Increase cv_fold.\".format(\n cv_fold\n )\n )\n if len(X_train) < cv_fold:\n raise ValueError(\n \"Too few data points ({}) for training provided. Decrease cv_fold.\".format(\n len(X_train)\n )\n )\n n = len(X_train) // cv_fold\n r = len(X_train) % cv_fold\n val_mask = torch.zeros(len(X_train), dtype=torch.uint8)\n # make sure every data point is included in the validation set once\n if j < r:\n val_mask[j * (n + 1) : (j + 1) * (n + 1)] = 1\n else:\n val_mask[j * n + r : (j + 1) * n + r] = 1\n X_val_cv, y_val_cv = X_train[val_mask], y_train[val_mask]\n X_train_cv, y_train_cv = X_train[1 - val_mask], y_train[1 - val_mask]\n\n out_transform = self.data_transformation_dict[k]\n y_train_obj, y_val_obj, y_test_obj = (\n y_train_cv[:, i],\n y_val_cv[:, i],\n y_test[:, i],\n )\n ds_train = torch.utils.data.TensorDataset(X_train_cv, y_train_obj)\n dataloader_train = torch.utils.data.DataLoader(\n ds_train, batch_size=self.batch_size_train, shuffle=True\n )\n ds_val = torch.utils.data.TensorDataset(X_val_cv, y_val_obj)\n dataloader_val = torch.utils.data.DataLoader(\n ds_val, batch_size=16, shuffle=False\n )\n ds_test = torch.utils.data.TensorDataset(X_test, y_test_obj)\n dataloader_test = torch.utils.data.DataLoader(\n ds_test, batch_size=16, shuffle=False\n )\n\n max_iter_stop = (\n self.early_stopping_epochs\n ) # maximum number of consecutive iteration w/o improvement after which training is stopped\n tmp_iter_stop = 0\n best_train_mae, best_val_mae, best_test_mae = (\n float(\"inf\"),\n float(\"inf\"),\n float(\"inf\"),\n )\n for epoch in range(self.epochs):\n\n lr = scheduler.optimizer.param_groups[0][\"lr\"]\n\n # train model\n self._model._train(\n regressor,\n device,\n optimizer,\n criterion,\n X_train_cv,\n dataloader_train,\n )\n\n train_mae = self._model._evaluate_regression(\n regressor,\n device,\n dataloader_train,\n self._untransform_data,\n out_transform,\n )\n val_mae = self._model._evaluate_regression(\n regressor,\n device,\n dataloader_val,\n self._untransform_data,\n out_transform,\n )\n scheduler.step(val_mae)\n\n if verbose:\n print(\n \" -- Epoch: {:03d}, LR: {:7f}, Train MAE: {:4f}, Val MAE: {:4f}\".format(\n epoch, lr, train_mae, val_mae\n )\n )\n\n # if prediction accuracy was improved in current epoch, reset <tmp_iter_stop> and save model\n if best_val_mae > val_mae:\n best_val_mae = val_mae\n tmp_iter_stop = 0\n torch.save(regressor.state_dict(), model_save_dir)\n test_mae = self._model._evaluate_regression(\n regressor,\n device,\n dataloader_test,\n self._untransform_data,\n out_transform,\n )\n best_train_mae, best_test_mae = train_mae, test_mae\n if verbose:\n print(\n \" -> Val MAE improved, current Test MAE: {:4f}\".format(\n test_mae\n )\n )\n # if prediction accuracy was not imporved in current epoch, increase <tmp_iter_stop> and stop training if <max_iter_stop> is reached\n else:\n tmp_iter_stop += 1\n if tmp_iter_stop >= max_iter_stop:\n break\n\n train_acc.append(best_train_mae)\n val_acc.append(best_val_mae)\n test_acc.append(best_test_mae)\n\n y_train_obj = y_train_init[:, i]\n ds_train_all = torch.utils.data.TensorDataset(X_train_init, y_train_obj)\n\n # load final model from epoch with lowest prediction accuracy\n regressor.load_state_dict(torch.load(model_save_dir))\n\n # get final model predictions for training and test data\n y_train_pred, y_train_real = self._model._evaluate_regression(\n regressor=regressor,\n device=device,\n loader=torch.utils.data.DataLoader(ds_train_all, shuffle=False),\n fun_untransform_data=self._untransform_data,\n out_transform=out_transform,\n get_predictions=True,\n )\n y_test_pred, y_test_real = self._model._evaluate_regression(\n regressor=regressor,\n device=device,\n loader=torch.utils.data.DataLoader(ds_test, shuffle=False),\n fun_untransform_data=self._untransform_data,\n out_transform=out_transform,\n get_predictions=True,\n )\n y_train_pred_l.append(y_train_pred), y_train_real_l.append(y_train_real)\n y_test_pred_l.append(y_test_pred), y_test_real_l.append(y_test_real)\n\n train_acc, val_acc, test_acc = (\n torch.tensor(train_acc),\n torch.tensor(val_acc),\n torch.tensor(test_acc),\n )\n y_train_pred_l, y_train_real_l, y_test_pred_l, y_test_real_l = (\n torch.tensor(y_train_pred_l),\n torch.tensor(y_train_real_l),\n torch.tensor(y_test_pred_l),\n torch.tensor(y_test_real_l),\n )\n\n X_train_final = np.asarray(X_train_init.tolist())\n X_test_final = np.asarray(X_test.tolist())\n for ind, inp_var in enumerate(self.input_names_transformable):\n tmp_inp_transform = self.data_transformation_dict[inp_var]\n X_train_final[:, ind] = self._untransform_data(\n data=X_train_final[:, ind],\n reduce=tmp_inp_transform[0],\n divide=tmp_inp_transform[1],\n )\n X_test_final[:, ind] = self._untransform_data(\n data=X_test_final[:, ind],\n reduce=tmp_inp_transform[0],\n divide=tmp_inp_transform[1],\n )\n\n self.output_models[k] = {\n \"model_save_dirs\": [\n self.model_name + \"_\" + str(k) + \"_\" + str(j + 1)\n for j in range(cv_fold)\n ],\n \"Final train MAE\": train_acc.mean().tolist(),\n \"Final validation MAE\": val_acc.mean().tolist(),\n \"Final test MAE\": test_acc.mean().tolist(),\n \"data_transformation_dict\": self.data_transformation_dict,\n \"X variable names\": self.input_names,\n \"X_train\": X_train_final.tolist(),\n \"y_train_real\": y_train_real_l.mean(axis=0).tolist(),\n \"y_train_pred_average\": y_train_pred_l.mean(axis=0).tolist(),\n \"X_test\": X_test_final.tolist(),\n \"y_test_real\": y_test_real_l.mean(axis=0).tolist(),\n \"y_test_pred_average\": y_test_pred_l.mean(axis=0).tolist(),\n }\n\n if verbose:\n print(\n \"\\n <-- Finished training of BNN model on objective: {} -->\\n\"\n \" -- Final Train MAE: {:4f}, Final Val MAE: {:4f}, Final Test MAE: {:4f} --\\n\"\n \" -- Model saved at: {} --\\n\".format(\n k,\n train_acc.mean(),\n val_acc.mean(),\n test_acc.mean(),\n model_save_dir,\n )\n )\n\n self._save_model()\n\n if verbose:\n print(\"<---- End training of BNN regressor ---->\\n\")\n\n # =======================================================================\n\n def validate_model(\n self, dataset=None, parity_plots=False, get_pred=False, kwargs={}\n ):\n self.output_models = self._load_model(self.model_name)\n\n self._model.freeze_() # freeze the model, in order to predict using only their weight distribution means\n self._model.eval() # set to evaluation mode (may be redundant)\n\n val_dict = {}\n lst_parity_plots = None\n if parity_plots:\n lst_parity_plots = []\n\n if dataset is not None:\n for i, (k, v) in enumerate(self.output_models.items()):\n model_load_dirs = v[\"model_save_dirs\"]\n self.data_transformation_dict = v[\"data_transformation_dict\"]\n out_transform = self.data_transformation_dict[k]\n\n X_val = self._data_preprocess(\n inference=True, infer_dataset=dataset, validate=True\n )\n X_val = torch.tensor(X_val).float()\n y_val = torch.tensor(dataset[(k, \"DATA\")].to_numpy()).float()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n prediction_l = []\n for m in model_load_dirs:\n model_load_dir = osp.join(self.save_path, m + \"_BNN_model.pt\")\n self._model.load_state_dict(\n torch.load(model_load_dir, map_location=torch.device(device))\n )\n data = X_val.to(device)\n predictions = self._model(data).detach()\n predictions = self._untransform_data(\n data=predictions,\n reduce=out_transform[0],\n divide=out_transform[1],\n )\n prediction_l.append(predictions)\n prediction_l = torch.tensor(prediction_l)\n predictions = prediction_l.mean(axis=0)\n val_dict[k] = {\n \"MAE\": (predictions - y_val).abs().mean().item(),\n \"RMSE\": ((((predictions - y_val) ** 2).mean()) ** (1 / 2)).item(),\n \"r2\": r2_score(y_val, predictions)\n if y_val.shape[0] > 1\n else \"Too few data points to calculate r2.\",\n }\n\n if parity_plots:\n parity_plot = self.create_parity_plot(\n datasets_pred=[predictions],\n datasets_real=[y_val],\n kwargs=kwargs,\n )\n lst_parity_plots.append(parity_plot)\n else:\n for i, (k, v) in enumerate(self.output_models.items()):\n y_train_real, y_train_pred, y_test_real, y_test_pred = (\n torch.tensor(v[\"y_train_real\"]).float(),\n torch.tensor(v[\"y_train_pred_average\"]).float(),\n torch.tensor(v[\"y_test_real\"]).float(),\n torch.tensor(v[\"y_test_pred_average\"]).float(),\n )\n val_dict[k] = {\n \"Train\": {\n \"MAE\": (y_train_real - y_train_pred).abs().mean().item(),\n \"RMSE\": (\n (((y_train_real - y_train_pred) ** 2).mean()) ** (1 / 2)\n ).item(),\n \"r2\": r2_score(y_train_real, y_train_pred)\n if y_train_pred.shape[0] > 1\n else \"Too few data points to calculate r2.\",\n },\n \"Test\": {\n \"MAE\": (y_test_real - y_test_pred).abs().mean().item(),\n \"RMSE\": (\n (((y_test_real - y_test_pred) ** 2).mean()) ** (1 / 2)\n ).item(),\n \"r2\": r2_score(y_test_real, y_test_pred)\n if y_test_pred.shape[0] > 1\n else \"Too few data points to calculate r2.\",\n },\n }\n if parity_plots:\n parity_plot = self.create_parity_plot(\n datasets_pred=[y_train_pred, y_test_pred],\n datasets_real=[y_train_real, y_test_real],\n kwargs=kwargs,\n )\n lst_parity_plots.append(parity_plot)\n if get_pred:\n return predictions\n return val_dict, lst_parity_plots\n\n # =======================================================================\n\n def infer_model(self, dataset):\n\n self.output_models = self._load_model(self.model_name)\n\n self._model.eval() # set to evaluation mode (may be redundant)\n self._model.freeze_() # freeze the model, in order to predict using only their weight distribution means\n\n infer_dict = {}\n for i, (k, v) in enumerate(self.output_models.items()):\n model_load_dirs = v[\"model_save_dirs\"]\n self.data_transformation_dict = v[\"data_transformation_dict\"]\n out_transform = self.data_transformation_dict[k]\n\n X_infer = self._data_preprocess(inference=True, infer_dataset=dataset)\n X_infer = torch.tensor(X_infer).float()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n prediction_l = []\n for m in model_load_dirs:\n model_load_dir = osp.join(self.save_path, m + \"_BNN_model.pt\")\n self._model.load_state_dict(\n torch.load(model_load_dir, map_location=torch.device(device))\n )\n data = X_infer.to(device)\n predictions = self._model(data).item()\n predictions = self._untransform_data(\n data=predictions, reduce=out_transform[0], divide=out_transform[1]\n )\n prediction_l.append(predictions)\n prediction_l = torch.tensor(prediction_l)\n predictions = prediction_l.mean(axis=0).item()\n infer_dict[k] = predictions\n\n return infer_dict\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.MSELoss", "torch.nn.functional.dropout", "torch.load", "torch.tensor", "torch.device", "torch.cuda.is_available", "sklearn.metrics.r2_score", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.utils.data.TensorDataset", "torch.cat" ] ]
johnnytheboii/TensorFuzz_2.0
[ "d1d7ae7de26067c2a1c223dbef6d897752aa8f71" ]
[ "examples/quantize/quantized_fuzzer.py" ]
[ "# Copyright 2018 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fuzz a neural network to find disagreements between normal and quantized.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom lib import fuzz_utils\nfrom lib.corpus import InputCorpus\nfrom lib.corpus import seed_corpus_from_numpy_arrays\nfrom lib.coverage_functions import raw_logit_coverage_function\nfrom lib.coverage_functions import neuron_coverage_function\nfrom lib.coverage_functions import neuron_boundary_coverage_function\nfrom lib.coverage_functions import top_k_neuron_coverage_function\nfrom lib.fuzzer import Fuzzer\nfrom lib.mutation_functions import do_basic_mutations\nfrom lib.sample_functions import recent_sample_function\nfrom lib.sample_functions import uniform_sample_function\nimport time\n\n\ntf.flags.DEFINE_string(\n \"checkpoint_dir\", None, \"Dir containing checkpoints of model to fuzz.\"\n)\ntf.flags.DEFINE_string(\n \"output_path\", None, \"Where to write the satisfying output.\"\n)\ntf.flags.DEFINE_integer(\n \"total_inputs_to_fuzz\", 100, \"Loops over the whole corpus.\"\n)\ntf.flags.DEFINE_integer(\n \"mutations_per_corpus_item\", 100, \"Number of times to mutate corpus item.\"\n)\ntf.flags.DEFINE_float(\n \"perturbation_constraint\", None, \"Constraint on norm of perturbations.\"\n)\ntf.flags.DEFINE_float(\n \"ann_threshold\",\n 1.0,\n \"Distance below which we consider something new coverage.\",\n)\ntf.flags.DEFINE_boolean(\n \"random_seed_corpus\", False, \"Whether to choose a random seed corpus.\"\n)\nFLAGS = tf.flags.FLAGS\n\n\ndef metadata_function(metadata_batches):\n \"\"\"Gets the metadata.\"\"\"\n logit_32_batch = metadata_batches[0]\n logit_16_batch = metadata_batches[1]\n metadata_list = []\n for idx in range(logit_16_batch.shape[0]):\n metadata_list.append((logit_32_batch[idx], logit_16_batch[idx]))\n return metadata_list\n\n\ndef objective_function(corpus_element):\n \"\"\"Checks if the element is misclassified.\"\"\"\n logits_32 = corpus_element.metadata[0]\n logits_16 = corpus_element.metadata[1]\n prediction_16 = np.argmax(logits_16)\n prediction_32 = np.argmax(logits_32)\n if prediction_16 == prediction_32:\n return False\n return True\n\n\n# pylint: disable=too-many-locals\ndef main(_):\n \"\"\"Constructs the fuzzer and fuzzes.\"\"\"\n\n # Log more\n tf.logging.set_verbosity(tf.logging.INFO)\n\n coverage_function = top_k_neuron_coverage_function\n image, label = fuzz_utils.basic_mnist_input_corpus(\n choose_randomly=FLAGS.random_seed_corpus\n )\n numpy_arrays = [[image, label]]\n image_copy = image[:]\n\n with tf.Session() as sess:\n\n overall_start_time = time.time()\n tensor_map = fuzz_utils.get_tensors_from_checkpoint(\n sess, FLAGS.checkpoint_dir\n )\n\n fetch_function = fuzz_utils.build_fetch_function(sess, tensor_map)\n\n size = FLAGS.mutations_per_corpus_item\n\n def mutation_function(elt):\n \"\"\"Mutates the element in question.\"\"\"\n return do_basic_mutations(elt, size, FLAGS.perturbation_constraint)\n\n seed_corpus = seed_corpus_from_numpy_arrays(\n numpy_arrays, coverage_function, metadata_function, fetch_function\n )\n corpus = InputCorpus(\n seed_corpus, uniform_sample_function, FLAGS.ann_threshold, \"kdtree\"\n )\n fuzzer = Fuzzer(\n corpus,\n coverage_function,\n metadata_function,\n objective_function,\n mutation_function,\n fetch_function,\n )\n result, fetch_time = fuzzer.loop(FLAGS.total_inputs_to_fuzz)\n \n overall_end_time = time.time()\n print(\"Overall time is \" + str(overall_end_time-overall_start_time))\n print(\"Fetch time is \" + str(fetch_time))\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.flags.DEFINE_integer", "tensorflow.flags.DEFINE_float", "tensorflow.app.run", "tensorflow.logging.set_verbosity", "numpy.argmax", "tensorflow.flags.DEFINE_boolean", "tensorflow.Session", "tensorflow.flags.DEFINE_string" ] ]
C-bowman/inference-tools
[ "499c3c23f1b3817b4cabde21ba45f2e2c6b95f77" ]
[ "inference/priors.py" ]
[ "\"\"\"\n.. moduleauthor:: Chris Bowman <[email protected]>\n\"\"\"\nfrom typing import Union, Iterable\n\nfrom numpy import array, log, pi, zeros, concatenate, float64, where\nfrom numpy.random import normal, exponential, uniform\nfrom itertools import chain\n\n\nclass JointPrior(object):\n \"\"\"\n A class which combines multiple prior distribution objects into a single\n joint-prior distribution object.\n\n :param components: \\\n A list of prior distribution objects (e.g. GaussianPrior, ExponentialPrior)\n which will be combined into a single joint-prior object.\n\n :param int n_variables: \\\n The total number of model variables.\n \"\"\"\n\n def __init__(self, components, n_variables):\n if not all(isinstance(c, BasePrior) for c in components):\n raise TypeError(\n \"\"\"\n All objects contained in the 'components' argument must be instances\n of a subclass of BasePrior (e.g. GaussianPrior, UniformPrior)\n \"\"\"\n )\n\n # Combine any prior components which are of the same type\n self.components = []\n for cls in [GaussianPrior, ExponentialPrior, UniformPrior]:\n L = [c for c in components if isinstance(c, cls)]\n if len(L) == 1:\n self.components.extend(L)\n elif len(L) > 1:\n self.components.append(cls.combine(L))\n\n # check that no variable appears more than once across all prior components\n self.prior_variables = []\n for var in chain(*[c.variables for c in self.components]):\n if var in self.prior_variables:\n raise ValueError(\n f\"Variable index '{var}' appears more than once in prior components\"\n )\n self.prior_variables.append(var)\n\n if len(self.prior_variables) != n_variables:\n raise ValueError(\n f\"\"\"\n The total number of variables specified across the various prior\n components ({len(self.prior_variables)}) does not match the number specified in\n the 'n_variables' argument ({n_variables}).\n \"\"\"\n )\n\n if not all(0 <= i < n_variables for i in self.prior_variables):\n raise ValueError(\n \"\"\"\n All variable indices given to the prior components must have values\n in the range [0, n_variables-1].\n \"\"\"\n )\n\n self.n_variables = n_variables\n\n all_bounds = chain(*[c.bounds for c in self.components])\n all_inds = chain(*[c.variables for c in self.components])\n both = sorted(\n [(b, i) for b, i in zip(all_bounds, all_inds)], key=lambda x: x[1]\n )\n self.bounds = [v[0] for v in both]\n\n def __call__(self, theta):\n \"\"\"\n Returns the joint-prior log-probability value, calculated as the sum\n of the log-probabilities from each prior component for the provided\n set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n return sum(c(theta) for c in self.components)\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n grad = zeros(self.n_variables)\n for c in self.components:\n grad[c.variables] = c.gradient(theta)\n return grad\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n sample = zeros(self.n_variables)\n for c in self.components:\n sample[c.variables] = c.sample()\n return sample\n\n\nclass BasePrior(object):\n @staticmethod\n def check_variables(variable_inds: Union[int, Iterable[int]], n_vars: int):\n if not isinstance(variable_inds, (int, Iterable)):\n raise TypeError(\"'variable_inds' must be an integer or list of integers\")\n\n if isinstance(variable_inds, int):\n variable_inds = [variable_inds]\n\n if not all(isinstance(p, int) for p in variable_inds):\n raise TypeError(\"'variable_inds' must be an integer or list of integers\")\n\n if n_vars != len(variable_inds):\n raise ValueError(\n \"\"\"\n The total number of variables specified via the 'variable_indices' argument is\n inconsistent with the number specified by the other arguments.\n \"\"\"\n )\n\n if len(variable_inds) != len(set(variable_inds)):\n raise ValueError(\n \"\"\"\n All integers given via the 'variable_indices' must be unique.\n Two or more of the given integers are duplicates.\n \"\"\"\n )\n\n return variable_inds\n\n\nclass GaussianPrior(BasePrior):\n \"\"\"\n A class for generating a Gaussian prior for one or more of the model variables.\n\n :param mean: \\\n A list specifying the means of the Gaussian priors on each of the variables specified\n in the ``variable_indices`` argument.\n\n :param sigma: \\\n A list specifying the standard deviations of the Gaussian priors on each of the\n variables specified in the ``variable_indices`` argument.\n\n :param variable_indices: \\\n A list of integers specifying the indices of the variables to which the prior will apply.\n \"\"\"\n\n def __init__(self, mean, sigma, variable_indices):\n\n self.mean = array(mean, dtype=float64).squeeze()\n self.sigma = array(sigma, dtype=float64).squeeze()\n\n # if parameters were passed as floats, convert from 0D to 1D arrays\n if self.mean.ndim == 0:\n self.mean = self.mean.reshape([1])\n if self.sigma.ndim == 0:\n self.sigma = self.sigma.reshape([1])\n\n self.n_params = self.mean.size\n\n if self.mean.size != self.sigma.size:\n raise ValueError(\n \"mean and sigma arguments must have the same number of elements\"\n )\n\n if self.mean.ndim > 1 or self.sigma.ndim > 1:\n raise ValueError(\"mean and sigma arguments must be 1D arrays\")\n\n if not (self.sigma > 0.0).all():\n raise ValueError('All values of \"sigma\" must be greater than zero')\n\n self.variables = self.check_variables(variable_indices, self.n_params)\n\n # pre-calculate some quantities as an optimisation\n self.inv_sigma = 1.0 / self.sigma\n self.inv_sigma_sqr = self.inv_sigma ** 2\n self.normalisation = -log(self.sigma).sum() - 0.5 * log(2 * pi) * self.n_params\n self.bounds = [(None, None)] * self.n_params\n\n def __call__(self, theta):\n \"\"\"\n Returns the prior log-probability value for the provided set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n z = (self.mean - theta[self.variables]) * self.inv_sigma\n return -0.5 * (z ** 2).sum() + self.normalisation\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n return (self.mean - theta[self.variables]) * self.inv_sigma_sqr\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n return normal(loc=self.mean, scale=self.sigma)\n\n @classmethod\n def combine(cls, priors):\n if not all(isinstance(p, cls) for p in priors):\n raise ValueError(f\"All prior objects being combined must be of type {cls}\")\n\n variables = []\n for p in priors:\n variables.extend(p.variables)\n\n means = concatenate([p.mean for p in priors])\n sigmas = concatenate([p.sigma for p in priors])\n\n return cls(mean=means, sigma=sigmas, variable_indices=variables)\n\n\nclass ExponentialPrior(BasePrior):\n \"\"\"\n A class for generating an exponential prior for one or more of the model variables.\n\n :param beta: \\\n A list specifying the 'beta' parameter value of the exponential priors on each of the\n variables specified in the ``variable_indices`` argument.\n\n :param variable_indices: \\\n A list of integers specifying the indices of the variables to which the prior will apply.\n \"\"\"\n\n def __init__(self, beta, variable_indices):\n\n self.beta = array(beta, dtype=float64).squeeze()\n if self.beta.ndim == 0:\n self.beta = self.beta.reshape([1])\n self.n_params = self.beta.size\n\n if self.beta.ndim > 1:\n raise ValueError(\"beta argument must be a 1D array\")\n\n if not (self.beta > 0.0).all():\n raise ValueError('All values of \"beta\" must be greater than zero')\n\n self.variables = self.check_variables(variable_indices, self.n_params)\n\n # pre-calculate some quantities as an optimisation\n self.lam = 1.0 / self.beta\n self.normalisation = log(self.lam).sum()\n self.zeros = zeros(self.n_params)\n self.bounds = [(0.0, None)] * self.n_params\n\n def __call__(self, theta):\n \"\"\"\n Returns the prior log-probability value for the provided set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n if (theta < 0.0).any():\n return -1e100\n return -(self.lam * theta[self.variables]).sum() + self.normalisation\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n return where(theta[self.variables] >= 0.0, -self.lam, self.zeros)\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n return exponential(scale=self.beta)\n\n @classmethod\n def combine(cls, priors):\n if not all(isinstance(p, cls) for p in priors):\n raise ValueError(f\"All prior objects being combined must be of type {cls}\")\n\n variables = []\n for p in priors:\n variables.extend(p.variables)\n\n betas = concatenate([p.beta for p in priors])\n return cls(beta=betas, variable_indices=variables)\n\n\nclass UniformPrior(BasePrior):\n \"\"\"\n A class for generating a uniform prior for one or more of the model variables.\n\n :param lower: \\\n A list specifying the lower bound of the uniform priors on each of the variables\n specified in the ``variable_indices`` argument.\n\n :param upper: \\\n A list specifying the upper bound of the uniform priors on each of the variables\n specified in the ``variable_indices`` argument.\n\n :param variable_indices: \\\n A list of integers specifying the indices of the variables to which the prior will apply.\n \"\"\"\n\n def __init__(self, lower, upper, variable_indices):\n self.lower = array(lower).squeeze()\n self.upper = array(upper).squeeze()\n\n # if parameters were passed as floats, convert from 0D to 1D arrays\n self.lower = self.lower.reshape([1]) if self.lower.ndim == 0 else self.lower\n self.upper = self.upper.reshape([1]) if self.upper.ndim == 0 else self.upper\n\n self.n_params = self.lower.size\n self.grad = zeros(self.n_params)\n\n if self.lower.size != self.upper.size:\n raise ValueError(\n \"\"\"'lower' and 'upper' arguments must have the same number of elements\"\"\"\n )\n\n if self.lower.ndim > 1 or self.upper.ndim > 1:\n raise ValueError(\"'lower' and 'upper' arguments must be 1D arrays\")\n\n if (self.upper <= self.lower).any():\n raise ValueError(\n \"All values in 'lower' must be less than the corresponding values in 'upper'\"\n )\n\n self.variables = self.check_variables(variable_indices, self.n_params)\n\n # pre-calculate some quantities as an optimisation\n self.normalisation = -log(self.upper - self.lower).sum()\n self.bounds = [(lo, up) for lo, up in zip(self.lower, self.upper)]\n\n def __call__(self, theta):\n \"\"\"\n Returns the prior log-probability value for the provided set of model parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The prior log-probability value.\n \"\"\"\n t = theta[self.variables]\n inside = (self.lower <= t) & (t <= self.upper)\n if inside.all():\n return self.normalisation\n return -1e100\n\n def gradient(self, theta):\n \"\"\"\n Returns the gradient of the prior log-probability with respect to the model\n parameters.\n\n :param theta: \\\n The model parameters as a 1D ``numpy.ndarray``.\n\n :returns: \\\n The gradient of the prior log-probability with respect to the model parameters.\n \"\"\"\n return self.grad\n\n def sample(self):\n \"\"\"\n Draws a sample from the prior.\n\n :returns: \\\n A single sample from the prior distribution as a 1D ``numpy.ndarray``.\n \"\"\"\n return uniform(low=self.lower, high=self.upper)\n\n @classmethod\n def combine(cls, priors):\n if not all(isinstance(p, cls) for p in priors):\n raise ValueError(f\"All prior objects being combined must be of type {cls}\")\n\n variables = []\n for p in priors:\n variables.extend(p.variables)\n\n lower = concatenate([p.lower for p in priors])\n upper = concatenate([p.upper for p in priors])\n\n return cls(lower=lower, upper=upper, variable_indices=variables)\n" ]
[ [ "numpy.random.uniform", "numpy.zeros", "numpy.where", "numpy.log", "numpy.random.exponential", "numpy.random.normal", "numpy.concatenate", "numpy.array" ] ]
mschwoer/alphapept
[ "446b3c8b2a20619a74ff872c24a01fed8b99a20a" ]
[ "alphapept/ext/bruker/timsdata.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Python wrapper for timsdata.dll\"\"\"\n\nimport numpy as np\nimport sqlite3\nimport os, sys\nfrom ctypes import *\n\nif sys.platform[:5] == \"win32\":\n libname = \"timsdata.dll\"\nelif sys.platform[:5] == \"linux\":\n libname = \"libtimsdata.so\"\nelse:\n raise Exception(\"Unsupported platform.\")\n\n\npath = os.path.dirname(os.path.abspath(__file__))\nlibname = os.path.join(path, libname)\n\ndll = cdll.LoadLibrary(libname)\ndll.tims_open.argtypes = [ c_char_p, c_uint32 ]\ndll.tims_open.restype = c_uint64\ndll.tims_close.argtypes = [ c_uint64 ]\ndll.tims_close.restype = None\ndll.tims_get_last_error_string.argtypes = [ c_char_p, c_uint32 ]\ndll.tims_get_last_error_string.restype = c_uint32\ndll.tims_has_recalibrated_state.argtypes = [ c_uint64 ]\ndll.tims_has_recalibrated_state.restype = c_uint32\ndll.tims_read_scans_v2.argtypes = [ c_uint64, c_int64, c_uint32, c_uint32, c_void_p, c_uint32 ]\ndll.tims_read_scans_v2.restype = c_uint32\nMSMS_SPECTRUM_FUNCTOR = CFUNCTYPE(None, c_int64, c_uint32, POINTER(c_double), POINTER(c_float))\ndll.tims_read_pasef_msms.argtypes = [ c_uint64, POINTER(c_int64), c_uint32, MSMS_SPECTRUM_FUNCTOR ]\ndll.tims_read_pasef_msms.restype = c_uint32\ndll.tims_read_pasef_msms_for_frame.argtypes = [ c_uint64, c_int64, MSMS_SPECTRUM_FUNCTOR ]\ndll.tims_read_pasef_msms_for_frame.restype = c_uint32\nMSMS_PROFILE_SPECTRUM_FUNCTOR = CFUNCTYPE(None, c_int64, c_uint32, POINTER(c_int32))\nif sys.platform[:5] == \"win32\":\n dll.tims_read_pasef_profile_msms.argtypes = [ c_uint64, POINTER(c_int64), c_uint32, MSMS_PROFILE_SPECTRUM_FUNCTOR ]\n dll.tims_read_pasef_profile_msms.restype = c_uint32\n dll.tims_read_pasef_profile_msms_for_frame.argtypes = [ c_uint64, c_int64, MSMS_PROFILE_SPECTRUM_FUNCTOR ]\n dll.tims_read_pasef_profile_msms_for_frame.restype = c_uint32\n\nconvfunc_argtypes = [ c_uint64, c_int64, POINTER(c_double), POINTER(c_double), c_uint32 ]\n\ndll.tims_index_to_mz.argtypes = convfunc_argtypes\ndll.tims_index_to_mz.restype = c_uint32\ndll.tims_mz_to_index.argtypes = convfunc_argtypes\ndll.tims_mz_to_index.restype = c_uint32\n\ndll.tims_scannum_to_oneoverk0.argtypes = convfunc_argtypes\ndll.tims_scannum_to_oneoverk0.restype = c_uint32\ndll.tims_oneoverk0_to_scannum.argtypes = convfunc_argtypes\ndll.tims_oneoverk0_to_scannum.restype = c_uint32\n\ndll.tims_scannum_to_voltage.argtypes = convfunc_argtypes\ndll.tims_scannum_to_voltage.restype = c_uint32\ndll.tims_voltage_to_scannum.argtypes = convfunc_argtypes\ndll.tims_voltage_to_scannum.restype = c_uint32\n\nif sys.platform[:5] == \"win32\":\n dll.tims_oneoverk0_to_ccs_for_mz.argtypes = [c_double, c_int32, c_double]\n dll.tims_oneoverk0_to_ccs_for_mz.restype = c_double\n\n dll.tims_ccs_to_oneoverk0_for_mz.argtypes = [c_double, c_int32, c_double]\n dll.tims_ccs_to_oneoverk0_for_mz.restype = c_double\n\ndef throwLastTimsDataError (dll_handle):\n \"\"\"Throw last TimsData error string as an exception.\"\"\"\n\n len = dll_handle.tims_get_last_error_string(None, 0)\n buf = create_string_buffer(len)\n dll_handle.tims_get_last_error_string(buf, len)\n raise RuntimeError(buf.value)\n\n# Decodes a properties BLOB of type 12 (array of strings = concatenation of\n# zero-terminated UTF-8 strings). (The BLOB object returned by an SQLite query can be\n# directly put into this function.) \\returns a list of unicode strings.\ndef decodeArrayOfStrings (blob):\n if blob is None:\n return None # property not set\n\n if len(blob) == 0:\n return [] # empty list\n\n blob = bytearray(blob)\n if blob[-1] != 0:\n raise ValueError(\"Illegal BLOB contents.\") # trailing nonsense\n\n if sys.version_info.major == 2:\n return unicode(str(blob), 'utf-8').split('\\0')[:-1]\n if sys.version_info.major == 3:\n return str(blob, 'utf-8').split('\\0')[:-1]\n\n\n# Convert 1/K0 to CCS for a given charge and mz\ndef oneOverK0ToCCSforMz(ook0, charge, mz):\n return dll.tims_oneoverk0_to_ccs_for_mz(ook0, charge, mz)\n\n# Convert CCS to 1/K0 for a given charge and mz\ndef ccsToOneOverK0ToCCSforMz(ccs, charge, mz):\n return dll.tims_ccs_to_oneoverk0_for_mz(ccs, charge, mz)\n\n\nclass TimsData:\n\n def __init__ (self, analysis_directory, use_recalibrated_state=False):\n\n if sys.version_info.major == 2:\n if not isinstance(analysis_directory, unicode):\n raise ValueError(\"analysis_directory must be a Unicode string.\")\n if sys.version_info.major == 3:\n if not isinstance(analysis_directory, str):\n raise ValueError(\"analysis_directory must be a string.\")\n\n self.dll = dll\n\n self.handle = self.dll.tims_open(\n analysis_directory.encode('utf-8'),\n 1 if use_recalibrated_state else 0 )\n if self.handle == 0:\n throwLastTimsDataError(self.dll)\n\n self.conn = sqlite3.connect(os.path.join(analysis_directory, \"analysis.tdf\"))\n\n self.initial_frame_buffer_size = 128 # may grow in readScans()\n\n def __del__ (self):\n if hasattr(self, 'handle'):\n self.dll.tims_close(self.handle)\n\n def __callConversionFunc (self, frame_id, input_data, func):\n\n if type(input_data) is np.ndarray and input_data.dtype == np.float64:\n # already \"native\" format understood by DLL -> avoid extra copy\n in_array = input_data\n else:\n # convert data to format understood by DLL:\n in_array = np.array(input_data, dtype=np.float64)\n\n cnt = len(in_array)\n out = np.empty(shape=cnt, dtype=np.float64)\n success = func(self.handle, frame_id,\n in_array.ctypes.data_as(POINTER(c_double)),\n out.ctypes.data_as(POINTER(c_double)),\n cnt)\n\n if success == 0:\n throwLastTimsDataError(self.dll)\n\n return out\n\n def indexToMz (self, frame_id, indices):\n return self.__callConversionFunc(frame_id, indices, self.dll.tims_index_to_mz)\n\n def mzToIndex (self, frame_id, mzs):\n return self.__callConversionFunc(frame_id, mzs, self.dll.tims_mz_to_index)\n\n def scanNumToOneOverK0 (self, frame_id, scan_nums):\n return self.__callConversionFunc(frame_id, scan_nums, self.dll.tims_scannum_to_oneoverk0)\n\n def oneOverK0ToScanNum (self, frame_id, mobilities):\n return self.__callConversionFunc(frame_id, mobilities, self.dll.tims_oneoverk0_to_scannum)\n\n def scanNumToVoltage (self, frame_id, scan_nums):\n return self.__callConversionFunc(frame_id, scan_nums, self.dll.tims_scannum_to_voltage)\n\n def voltageToScanNum (self, frame_id, voltages):\n return self.__callConversionFunc(frame_id, voltages, self.dll.tims_voltage_to_scannum)\n\n\n # Output: list of tuples (indices, intensities)\n def readScans (self, frame_id, scan_begin, scan_end):\n\n # buffer-growing loop\n while True:\n cnt = int(self.initial_frame_buffer_size) # necessary cast to run with python 3.5\n buf = np.empty(shape=cnt, dtype=np.uint32)\n len = 4 * cnt\n\n required_len = self.dll.tims_read_scans_v2(self.handle, frame_id, scan_begin, scan_end,\n buf.ctypes.data_as(POINTER(c_uint32)),\n len)\n if required_len == 0:\n throwLastTimsDataError(self.dll)\n\n if required_len > len:\n if required_len > 16777216:\n # arbitrary limit for now...\n raise RuntimeError(\"Maximum expected frame size exceeded.\")\n self.initial_frame_buffer_size = required_len / 4 + 1 # grow buffer\n else:\n break\n\n result = []\n d = scan_end - scan_begin\n for i in range(scan_begin, scan_end):\n npeaks = buf[i-scan_begin]\n indices = buf[d : d+npeaks]\n d += npeaks\n intensities = buf[d : d+npeaks]\n d += npeaks\n result.append((indices,intensities))\n\n return result\n\n # read some peak-picked MS/MS spectra for a given list of precursors; returns a dict mapping\n # 'precursor_id' to a pair of arrays (mz_values, area_values).\n def readPasefMsMs (self, precursor_list):\n precursors_for_dll = np.array(precursor_list, dtype=np.int64)\n\n result = {}\n\n @MSMS_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_peaks, mz_values, area_values):\n result[precursor_id] = (mz_values[0:num_peaks], area_values[0:num_peaks])\n\n rc = self.dll.tims_read_pasef_msms(self.handle,\n precursors_for_dll.ctypes.data_as(POINTER(c_int64)),\n len(precursor_list),\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n\n\t\t# read peak-picked MS/MS spectra for a given frame; returns a dict mapping\n # 'precursor_id' to a pair of arrays (mz_values, area_values).\n def readPasefMsMsForFrame (self, frame_id):\n result = {}\n\n @MSMS_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_peaks, mz_values, area_values):\n result[precursor_id] = (mz_values[0:num_peaks], area_values[0:num_peaks])\n\n rc = self.dll.tims_read_pasef_msms_for_frame(self.handle,\n frame_id,\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n\n\t\t# read some \"quasi profile\" MS/MS spectra for a given list of precursors; returns a dict mapping\n # 'precursor_id' to the profil arrays (intensity_values).\n def readPasefProfileMsMs (self, precursor_list):\n precursors_for_dll = np.array(precursor_list, dtype=np.int64)\n\n result = {}\n\n @MSMS_PROFILE_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_points, intensity_values):\n result[precursor_id] = intensity_values[0:num_points]\n\n rc = self.dll.tims_read_pasef_profile_msms(self.handle,\n precursors_for_dll.ctypes.data_as(POINTER(c_int64)),\n len(precursor_list),\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n\n # read \"quasi profile\" MS/MS spectra for a given frame; returns a dict mapping\n # 'precursor_id' to the profil arrays (intensity_values).\n def readPasefProfileMsMsForFrame (self, frame_id):\n result = {}\n\n @MSMS_PROFILE_SPECTRUM_FUNCTOR\n def callback_for_dll(precursor_id, num_points, intensity_values):\n result[precursor_id] = intensity_values[0:num_points]\n\n rc = self.dll.tims_read_pasef_profile_msms_for_frame(self.handle,\n frame_id,\n callback_for_dll)\n\n if rc == 0:\n throwLastTimsDataError(self.dll)\n\n return result\n" ]
[ [ "numpy.array", "numpy.empty" ] ]
mahanswaray/simpletransformers
[ "44a97d689b6bd19495e698ae918e67c80828559e" ]
[ "simpletransformers/classification/multi_label_classification_model.py" ]
[ "import logging\nimport random\nimport warnings\nfrom multiprocessing import cpu_count\n\nimport numpy as np\nimport torch\nfrom transformers import (\n WEIGHTS_NAME,\n AlbertConfig,\n AlbertTokenizer,\n BertConfig,\n BertTokenizer,\n DistilBertConfig,\n DistilBertTokenizer,\n ElectraConfig,\n ElectraTokenizer,\n FlaubertConfig,\n FlaubertTokenizer,\n RobertaConfig,\n RobertaTokenizer,\n XLMConfig,\n XLMRobertaConfig,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetTokenizer,\n)\n\nfrom simpletransformers.classification import ClassificationModel\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.config.model_args import MultiLabelClassificationArgs\nfrom simpletransformers.custom_models.models import (\n AlbertForMultiLabelSequenceClassification,\n BertForMultiLabelSequenceClassification,\n DistilBertForMultiLabelSequenceClassification,\n ElectraForMultiLabelSequenceClassification,\n FlaubertForMultiLabelSequenceClassification,\n RobertaForMultiLabelSequenceClassification,\n XLMForMultiLabelSequenceClassification,\n XLMRobertaForMultiLabelSequenceClassification,\n XLNetForMultiLabelSequenceClassification,\n)\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiLabelClassificationModel(ClassificationModel):\n def __init__(\n self,\n model_type,\n model_name,\n num_labels=None,\n pos_weight=None,\n args=None,\n use_cuda=True,\n cuda_device=-1,\n **kwargs,\n ):\n\n \"\"\"\n Initializes a MultiLabelClassification model.\n\n Args:\n model_type: The type of model (bert, roberta)\n model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).\n num_labels (optional): The number of labels or classes in the dataset.\n pos_weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"bert\": (BertConfig, BertForMultiLabelSequenceClassification, BertTokenizer,),\n \"roberta\": (RobertaConfig, RobertaForMultiLabelSequenceClassification, RobertaTokenizer,),\n \"xlnet\": (XLNetConfig, XLNetForMultiLabelSequenceClassification, XLNetTokenizer,),\n \"xlm\": (XLMConfig, XLMForMultiLabelSequenceClassification, XLMTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForMultiLabelSequenceClassification, DistilBertTokenizer,),\n \"albert\": (AlbertConfig, AlbertForMultiLabelSequenceClassification, AlbertTokenizer,),\n \"flaubert\": (FlaubertConfig, FlaubertForMultiLabelSequenceClassification, FlaubertTokenizer,),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForMultiLabelSequenceClassification, XLMRobertaTokenizer,),\n \"electra\": (ElectraConfig, ElectraForMultiLabelSequenceClassification, ElectraTokenizer),\n }\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, MultiLabelClassificationArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = {key: value[\"value\"] for key, value in sweep_config.as_dict().items() if key != \"_wandb\"}\n self.args.update_from_dict(sweep_values)\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if not use_cuda:\n self.args.fp16 = False\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n if num_labels:\n self.config = config_class.from_pretrained(model_name, num_labels=num_labels, **self.args.config)\n self.num_labels = num_labels\n else:\n self.config = config_class.from_pretrained(model_name, **self.args.config)\n self.num_labels = self.config.num_labels\n self.pos_weight = pos_weight\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n if self.pos_weight:\n self.model = model_class.from_pretrained(\n model_name, config=self.config, pos_weight=torch.Tensor(self.pos_weight).to(self.device), **kwargs\n )\n else:\n self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)\n\n self.results = {}\n\n self.tokenizer = tokenizer_class.from_pretrained(model_name, do_lower_case=self.args.do_lower_case, **kwargs)\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n\n if self.args.wandb_project and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args.wandb_project = None\n\n def _load_model_args(self, input_dir):\n args = MultiLabelClassificationArgs()\n args.load(input_dir)\n return args\n\n def train_model(\n self,\n train_df,\n multi_label=True,\n eval_df=None,\n output_dir=None,\n show_running_loss=True,\n args=None,\n verbose=True,\n **kwargs,\n ):\n return super().train_model(\n train_df,\n multi_label=multi_label,\n eval_df=eval_df,\n output_dir=output_dir,\n show_running_loss=show_running_loss,\n verbose=True,\n args=args,\n **kwargs,\n )\n\n def eval_model(self, eval_df, multi_label=True, output_dir=None, verbose=False, silent=False, **kwargs):\n return super().eval_model(\n eval_df, output_dir=output_dir, multi_label=multi_label, verbose=verbose, silent=silent, **kwargs\n )\n\n def evaluate(self, eval_df, output_dir, multi_label=True, prefix=\"\", verbose=True, silent=False, **kwargs):\n return super().evaluate(\n eval_df, output_dir, multi_label=multi_label, prefix=prefix, verbose=verbose, silent=silent, **kwargs\n )\n\n def load_and_cache_examples(\n self, examples, evaluate=False, no_cache=False, multi_label=True, verbose=True, silent=False\n ):\n return super().load_and_cache_examples(\n examples, evaluate=evaluate, no_cache=no_cache, multi_label=multi_label, verbose=verbose, silent=silent\n )\n\n def compute_metrics(self, preds, labels, eval_examples, multi_label=True, **kwargs):\n return super().compute_metrics(preds, labels, eval_examples, multi_label=multi_label, **kwargs)\n\n def predict(self, to_predict, multi_label=True):\n return super().predict(to_predict, multi_label=multi_label)\n" ]
[ [ "torch.cuda.manual_seed_all", "torch.manual_seed", "numpy.random.seed", "torch.cuda.is_available", "torch.device", "torch.Tensor" ] ]
Tudor67/Neural-Networks-Assignments
[ "7376e9d3b0059df2f2b21d56787c47d3c1ba6746" ]
[ "2018-2019/project/utils/evaluation.py" ]
[ "import config\nimport numpy as np\nimport tensorflow as tf\n\ndef get_tp_fp_fn(a, b):\n a = np.equal(a, 1)\n not_a = np.logical_not(a)\n b = np.equal(b, 1)\n not_b = np.logical_not(b)\n \n tp = np.logical_and(a, b).sum().astype(np.float64)\n fp = np.logical_and(a, not_b).sum().astype(np.float64)\n fn = np.logical_and(not_a, b).sum().astype(np.float64)\n \n return tp, fp, fn\n\ndef jaccard(a, b):\n tp, fp, fn = get_tp_fp_fn(a, b)\n \n jaccard_coef = None\n if tp + fp + fn == 0:\n jaccard_coef = 1.\n else:\n jaccard_coef = tp / (tp + fp + fn)\n \n return jaccard_coef\n\ndef dice(a, b):\n tp, fp, fn = get_tp_fp_fn(a, b)\n \n dice_coef = None\n if tp + fp + fn == 0:\n dice_coef = 1.\n else:\n dice_coef = (2 * tp) / (2 * tp + fp + fn)\n \n return dice_coef\n\ndef jaccard_and_dice(preds, gts, thr):\n jaccard_and_dice_res = np.zeros((len(preds), 2))\n \n for idx, (pred, gt) in enumerate(zip(preds, gts)):\n pred = (pred >= thr)\n \n jaccard_coef = jaccard(pred, gt)\n dice_coef = dice(pred, gt)\n \n jaccard_and_dice_res[idx] = (jaccard_coef, dice_coef)\n \n return jaccard_and_dice_res\n\n# tensorflow implementation (with thr)\ndef tf_get_tp_fp_fn(a_in, b_in):\n a = tf.greater_equal(a_in, config.PRED_THR)\n not_a = tf.logical_not(a)\n b = tf.greater_equal(b_in, config.PRED_THR)\n not_b = tf.logical_not(b)\n \n tp_and = tf.logical_and(a, b)\n tp_count = tf.count_nonzero(tp_and)\n tp = tf.cast(tp_count, tf.float64)\n \n fp_and = tf.logical_and(a, not_b)\n fp_count = tf.count_nonzero(fp_and)\n fp = tf.cast(fp_count, tf.float64)\n \n fn_and = tf.logical_and(not_a, b)\n fn_count = tf.count_nonzero(fn_and)\n fn = tf.cast(fn_count, tf.float64)\n \n return tp, fp, fn\n\ndef tf_jaccard(a, b):\n tp, fp, fn = tf_get_tp_fp_fn(a, b)\n jaccard_coef = tf.cond(tf.equal(tp + fp + fn, 0),\n lambda: tf.constant(1, tf.float64),\n lambda: tp / (tp + fp + fn))\n return jaccard_coef\n\ndef tf_dice(a, b):\n tp, fp, fn = tf_get_tp_fp_fn(a, b)\n dice_coef = tf.cond(tf.equal(tp + fp + fn, 0),\n lambda: tf.constant(1, tf.float64),\n lambda: (2 * tp) / (2 * tp + fp + fn))\n return dice_coef" ]
[ [ "tensorflow.greater_equal", "tensorflow.equal", "tensorflow.logical_not", "numpy.equal", "numpy.logical_and", "tensorflow.logical_and", "numpy.logical_not", "tensorflow.cast", "tensorflow.constant", "tensorflow.count_nonzero" ] ]
belkhir-nacim/generative_model_toolbox
[ "573e69979a77030004afe2df216893f556225454" ]
[ "generative_models_toolbox/vqvae2/sample.py" ]
[ "import argparse\nimport os\n\nimport torch\nimport torchvision.utils\nfrom tqdm import tqdm\n\nfrom .vqvae import VQVAE\nfrom .pixelsnail import PixelSNAIL\n\n\[email protected]_grad()\ndef sample_model(model, device, batch, size, temperature, condition=None):\n row = torch.zeros(batch, *size, dtype=torch.int64).to(device)\n cache = {}\n\n for i in tqdm(range(size[0])):\n for j in range(size[1]):\n out, cache = model(row[:, : i + 1, :], condition=condition, cache=cache)\n prob = torch.softmax(out[:, :, i, j] / temperature, 1)\n sample = torch.multinomial(prob, 1).squeeze(-1)\n row[:, i, j] = sample\n return row\n\n\ndef load_model(model: str, checkpoint: str, device):\n ckpt = torch.load(checkpoint)\n\n if 'args' in ckpt:\n args = ckpt['args']\n\n if model == 'vqvae':\n model = VQVAE()\n\n elif model == 'pixelsnail_top':\n model = PixelSNAIL(\n [32, 32],\n 512,\n args.channel,\n 5,\n 4,\n args.n_res_block,\n args.n_res_channel,\n dropout=args.dropout,\n n_out_res_block=args.n_out_res_block,\n )\n\n elif model == 'pixelsnail_bottom':\n model = PixelSNAIL(\n [64, 64],\n 512,\n args.channel,\n 5,\n 4,\n args.n_res_block,\n args.n_res_channel,\n attention=False,\n dropout=args.dropout,\n n_cond_res_block=args.n_cond_res_block,\n cond_res_channel=args.n_res_channel,\n )\n\n if 'model' in ckpt:\n ckpt = ckpt['model']\n\n model.load_state_dict(ckpt)\n model = model.to(device)\n model.eval()\n\n return model\n\n\ndef main():\n device = 'cuda'\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch', type=int, default=8)\n parser.add_argument('--vqvae', type=str)\n parser.add_argument('--top', type=str)\n parser.add_argument('--bottom', type=str)\n parser.add_argument('--temp', type=float, default=1.0)\n parser.add_argument('filename', type=str)\n\n args = parser.parse_args()\n\n model_vqvae = load_model('vqvae', args.vqvae, device)\n model_top = load_model('pixelsnail_top', args.top, device)\n model_bottom = load_model('pixelsnail_bottom', args.bottom, device)\n\n top_sample = sample_model(model_top, device, args.batch, [32, 32], args.temp)\n bottom_sample = sample_model(\n model_bottom, device, args.batch, [64, 64], args.temp, condition=top_sample\n )\n\n decoded_sample = model_vqvae.decode_code(top_sample, bottom_sample)\n decoded_sample = decoded_sample.clamp(-1, 1)\n\n torchvision.utils.save_image(decoded_sample, args.filename, normalize=True, range=(-1, 1))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load", "torch.no_grad", "torch.multinomial", "torch.zeros", "torch.softmax" ] ]
tatsushi-ikeda/pyheom
[ "d069fcf791959942b7a0357cda349d9976e06313" ]
[ "pyheom/pyheom.py" ]
[ "# \n# LibHEOM: Copyright (c) Tatsushi Ikeda\n# This library is distributed under BSD 3-Clause License.\n# See LINCENSE.txt for licence.\n# ------------------------------------------------------------------------\n\nimport enum\nimport sys\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse\nimport importlib\n\npylibheom = importlib.import_module(\"pylibheom\")\nfrom pyheom.noise_decomposition import *\n\nversion = getattr(pylibheom, 'version')()\n__version__ = version\n\nunit = enum.Enum('unit',\n '''dimensionless\n femtosecond\n picosecond\n wavenumber\n electronvolt''')\n\nhbar__J_s = 1.05457180013e-34\nUNIT_ENERGY_VALUE__J = {\n unit.wavenumber: 1.98644582441459e-23, # (299792458*100*6.62607004081e-34)\n unit.electronvolt: 1.602176620898e-19,\n};\nUNIT_TIME_VALUE__S = {\n unit.femtosecond: 1.0e-15,\n unit.picosecond: 1.0e-12,\n}\n\nunits = {'energy':unit.dimensionless,\n 'time': unit.dimensionless}\n\ndef calc_unit():\n if (units['energy'] == unit.dimensionless or units['time'] == unit.dimensionless):\n if (units['energy'] == unit.dimensionless and units['time'] == unit.dimensionless):\n result = 1.0\n else:\n print('[Error] Unit mismatch error: Both unit_energy and unit_time should be dimensionless.', file=sys.stderr)\n sys.exit(1)\n else:\n result = (UNIT_ENERGY_VALUE__J[units['energy']]\n *UNIT_TIME_VALUE__S[units['time']]\n /hbar__J_s)\n return result\n\n\ndef get_coo_matrix(matrix):\n impl_class_name = \"coo_matrix\"\n if matrix.dtype == np.complex64:\n ipml_class_name += \"_c\"\n elif matrix.dtype == np.complex128:\n impl_class_name += \"_z\"\n else:\n print('[Error] Unsupported matrix type: {}.'.format(matrix.dtype),\n file=sys.stderr)\n sys.exit(1)\n coo = sp.sparse.coo_matrix(matrix)\n impl_class = getattr(pylibheom, impl_class_name)\n return impl_class(\n coo.shape[0],\n coo.shape[1],\n coo.nnz,\n coo.row,\n coo.col,\n coo.data)\n \nclass heom():\n def __init__(self,\n H,\n noises,\n max_tier,\n matrix_type='sparse',\n hrchy_connection='loop',\n hrchy_filter=None,\n gpu_device=None,\n callback=lambda lidx, est: None,\n callback_interval=1024,\n unrolling=False):\n self.n_state = H.shape[0]\n \n impl_class_name = 'heom_z'\n\n if matrix_type == 'dense':\n impl_class_name += 'd'\n elif matrix_type == 'sparse':\n impl_class_name += 's'\n else:\n print('[Error] Unknown internal matrix type: {}.'.format(\n matrix_type))\n sys.exit(1)\n \n impl_class_name += 'l'\n\n if hrchy_connection == 'loop':\n impl_class_name += 'l'\n elif hrchy_connection == 'hierarchical-Liouville':\n impl_class_name += 'h'\n else:\n print('[Error] Unknown hrchy_connection: {}.'.format(\n hrchy_connection))\n sys.exit(1)\n\n if unrolling and self.n_state in [2, 3]:\n impl_class_name += '_{}'.format(self.n_state)\n \n if (not gpu_device is None):\n if getattr(pylibheom, 'support_gpu_parallelization'):\n impl_class_name += '_gpu'\n else:\n print('[Error] gpu parallelization is not supported.')\n print(' specified gpu device: {}.'.format(gpu_device))\n sys.exit(1)\n\n self.impl = getattr(pylibheom, impl_class_name)()\n \n if (not gpu_device is None):\n self.impl.set_device_number(gpu_device)\n \n self.impl.set_hamiltonian(get_coo_matrix(H.astype(np.complex128)))\n\n n_noise = len(noises)\n self.impl.alloc_noises(n_noise)\n \n self.noises = []\n \n for u in range(n_noise):\n gamma = noises[u][\"C\"][\"gamma\"].astype(np.complex128)\n phi_0 = noises[u][\"C\"][\"phi_0\"].astype(np.complex128)\n sigma = noises[u][\"C\"][\"sigma\"].astype(np.complex128)\n s = noises[u][\"C\"][\"s\"].astype(np.complex128)\n a = noises[u][\"C\"][\"a\"].astype(np.complex128)\n S_delta = complex(noises[u][\"C\"][\"S_delta\"])\n self.noises.append(type(\"noise\", (object,),\n dict(gamma=gamma,\n phi_0=phi_0,\n sigma_s=s.T@sigma,\n sigma_a=a.T@sigma,\n S_delta=S_delta)))\n self.impl.set_noise(u,\n get_coo_matrix(noises[u][\"V\"].astype(np.complex128)),\n get_coo_matrix(gamma),\n phi_0,\n sigma,\n get_coo_matrix(s),\n S_delta,\n get_coo_matrix(a))\n\n if hrchy_filter:\n self.hrchy_filter = lambda index, depth, lk: hrchy_filter(index, depth, lk, self.noises)\n else:\n self.hrchy_filter = lambda index, depth, lk, noises: True\n\n self.impl.linearize()\n self.n_hrchy \\\n = self.impl.alloc_hrchy(max_tier,\n callback,\n callback_interval,\n self.hrchy_filter,\n False if hrchy_filter is None else True)\n self.rho_h = np.zeros((self.n_state, self.n_state, self.n_hrchy),\n dtype=np.complex128, order='F')\n \n self.impl.init_aux_vars()\n \n def construct_commutator(self,\n x, coef_l, coef_r,\n callback=lambda lidx, est: None,\n callback_interval=1024):\n x_coo = sp.sparse.coo_matrix(x)\n self.impl.construct_commutator(x_coo.shape[0],\n x_coo.shape[1],\n x_coo.nnz,\n x_coo.row,\n x_coo.col,\n x_coo.data.astype(np.complex128),\n coef_l,\n coef_r,\n callback,\n callback_interval)\n\n def apply_commutator(self):\n self.impl.apply_commutator(self.rho_h.ravel(order='F'))\n\n def set_rho(self, rho):\n self.rho_h[:,:,0] = rho[:,:]\n\n def get_rho(self):\n return np.copy(self.rho_h[:,:,0])\n\n def set_rho_h(self, rho_h):\n self.rho_h[:,:,:] = rho_h[:,:,:]\n\n def get_rho_h(self):\n return np.copy(self.rho_h[:,:,:])\n\n def calc_diff(self, rho_h):\n drho_h_dt = np.zeros_like(rho_h)\n self.impl.calc_diff(drho_h_dt.ravel(order='F'),\n rho_h.ravel(order='F'),\n 1, 0)\n return drho_h_dt\n\n def get_diff_func(self):\n return lambda t, rho_h: self.calc_diff(rho_h)\n\n def solve(self, dt__unit, count,\n callback=lambda t, rho: None,\n callback_interval=1):\n self.impl.solve(self.rho_h.ravel(order='F'),\n dt__unit, dt__unit*calc_unit(),\n callback_interval, count//callback_interval,\n lambda t: callback(t, self.rho_h[:,:,0]))\n\n\nclass redfield():\n def __init__(self,\n H,\n noises,\n matrix_type='sparse',\n operator_space='Liouville',\n gpu_device=None,\n callback=lambda lidx: None,\n callback_interval=1024,\n unrolling=False,\n secular=False,\n H_c=None):\n self.n_state = H.shape[0]\n \n impl_class_name = 'redfield_z'\n\n if matrix_type == 'dense':\n impl_class_name += 'd'\n elif matrix_type == 'sparse':\n impl_class_name += 's'\n else:\n print('[Error] Unknown internal matrix type: {}.'.format(\n matrix_type))\n sys.exit(1)\n\n if operator_space == 'Hilbert':\n impl_class_name += 'h'\n elif operator_space == 'Liouville':\n impl_class_name += 'l'\n else:\n print('[Error] Unknown internal operator space: {}.'.format(\n operator_space))\n sys.exit(1)\n \n if unrolling and self.n_state in [2, 3]:\n impl_class_name += '_{}'.format(self.n_state)\n \n if (not gpu_device is None):\n if support_gpu_parallelization:\n impl_class_name += '_gpu'\n else:\n print('[Error] gpu parallelization is not supported.')\n print(' specified gpu device: {}.'.format(gpu_device))\n sys.exit(1)\n \n self.impl = getattr(pylibheom, impl_class_name)()\n \n if (not gpu_device is None):\n self.impl.set_device_number(gpu_device)\n \n E, self.Z = np.linalg.eig(H)\n self.impl.set_hamiltonian(get_coo_matrix(np.diag(E).astype(np.complex128)))\n if H_c is None:\n H_c = np.zeros_like(H)\n \n self.impl.set_redfield_options(get_coo_matrix(self.Z.T.conj()@H_c@(self.Z).astype(np.complex128)),\n secular)\n\n n_noise = len(noises)\n self.impl.alloc_noises(n_noise)\n for u in range(n_noise):\n V = get_coo_matrix((self.Z.T.conj())@noises[u][\"V\"]@(self.Z).astype(np.complex128))\n if \"func\" in noises[u][\"C\"]:\n self.impl.set_noise_func(u, V, noises[u][\"C\"][\"func\"])\n else: \n gamma = noises[u][\"C\"][\"gamma\"]\n phi_0 = noises[u][\"C\"][\"phi_0\"]\n sigma = noises[u][\"C\"][\"sigma\"]\n s = noises[u][\"C\"][\"s\"]\n a = noises[u][\"C\"][\"a\"]\n S_delta = noises[u][\"C\"][\"S_delta\"]\n self.impl.set_noise(u,\n V,\n get_coo_matrix(gamma.astype(np.complex128)),\n phi_0.astype(np.complex128),\n sigma.astype(np.complex128),\n get_coo_matrix(s.astype(np.complex128)),\n complex(S_delta),\n get_coo_matrix(a.astype(np.complex128)))\n \n \n self.rho = np.zeros((self.n_state, self.n_state),\n dtype=np.complex128,\n order='F')\n \n self.impl.init_aux_vars()\n \n def construct_commutator(self,\n x, coef_l, coef_r,\n callback=lambda lidx: None,\n callback_interval=1024):\n x_coo = sp.sparse.coo_matrix((self.Z.T.conj())@x@(self.Z))\n self.impl.construct_commutator(x_coo.shape[0],\n x_coo.shape[1],\n x_coo.nnz,\n x_coo.row,\n x_coo.col,\n x_coo.data.astype(np.complex128),\n coef_l,\n coef_r,\n callback,\n callback_interval)\n\n def apply_commutator(self):\n self.impl.apply_commutator(self.rho.ravel(order='F'))\n \n def set_rho(self, rho):\n self.rho[:,:] = (self.Z.T.conj())@rho[:,:]@(self.Z)\n\n def get_rho(self):\n return np.copy((self.Z)@self.rho[:,:]@(self.Z.T.conj()))\n\n def calc_diff(self, rho):\n drho_dt = np.zeros_like(rho)\n self.impl.calc_diff(drho_dt.ravel(order='F'),\n ((self.Z.T.conj())@rho.reshape((self.n_state, self.n_state), order='F')@(self.Z)).ravel(order='F'),\n 1, 0)\n return ((self.Z)@drho_dt.reshape((self.n_state, self.n_state), order='F')@(self.Z.T.conj())).ravel(order='F')\n \n def get_diff_func(self):\n return lambda t, rho: self.calc_diff(rho)\n\n def solve(self, dt__unit, count,\n callback=lambda t, rho: None,\n callback_interval=1):\n self.impl.solve(self.rho.ravel(order='F'),\n dt__unit, dt__unit*calc_unit(),\n callback_interval, count//callback_interval,\n lambda t: callback(t, (self.Z)@self.rho[:,:]@(self.Z.T.conj())))\n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "numpy.diag", "numpy.copy", "scipy.sparse.coo_matrix", "numpy.linalg.eig" ] ]
marcusinthesky/Word2Risk
[ "0212718369f04607a1b06c009df9e6cee29fe103" ]
[ "scraper/News/spiders/biznews.py" ]
[ "import scrapy\nimport pandas as pd\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass biznewsSpider(scrapy.Spider):\n name = \"biznews\"\n \n def __init__(self, *a, **kw):\n super(biznewsSpider, self).__init__(*a, **kw)\n path = os.path.join(os.path.expanduser(\"~\"),\"Documents\",\"NMRQL\",\"Scraper\",\"News\",\"companies.csv\")\n self.companies = pd.read_csv(path).date.tolist()\n self.next_tag = 'html body#gsr.srp.tbo.vasq div#main div#cnt.big div.mw div#rcnt div.col div#center_col div div#foot span#xjs div#navcnt table#nav tbody tr td.b.navend a#pnnext.pn span::text'\n self.site = \"www.biznews.com\"\n\n def start_requests(self):\n for company in self.companies:\n self.pages = 1\n \n while True: \n l = f'https://www.bing.com/search?q=site%3a+{self.site}+\"{company.replace(\" \", \"+\")}\"&rf=1&qpvt=site%3a+{self.site}+\"+{company.replace(\" \", \"+\")}+\"&lf=&first={self.pages}0'\n r = requests.get(l)\n soup = BeautifulSoup(r.text, 'html.parser')\n pages_list = [int(i.text) for i in soup.find_all('a', attrs='sb_bp') if str.isnumeric(i.text)]\n \n if self.pages in pages_list:\n self.pages += 1\n yield scrapy.Request(l, callback=self.get_links_parse, meta={'company':company})\n else:\n break\n \n def get_links_parse(self, response):\n company = response.meta['company']\n for url in response.css(f'a[href^=\"https://{self.site}\"]::attr(href)').extract(): \n yield scrapy.Request(url, callback=self.yield_text_parse, meta={'company':company, 'url': url})\n\n def yield_text_parse(self, response):\n company = response.meta['company']\n url = response.meta['url']\n #title = response.css('div.article_header h2::text').extract_first()\n date = response.css('meta[property$=\"time\"]::attr(content)').extract_first()\n text = ' '.join(response.css('div.entry-content p::text').extract())\n \n yield {\n 'source': url,\n 'company': company,\n #'title': title,\n 'date':date,\n 'text': text\n }" ]
[ [ "pandas.read_csv" ] ]
esgomezm/deepcell-tf
[ "6693c9ed7e76793561e6c2281437acaf3e4fa441" ]
[ "deepcell/layers/location_test.py" ]
[ "# Copyright 2016-2019 The Van Valen Lab at the California Institute of\n# Technology (Caltech), with support from the Paul Allen Family Foundation,\n# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.\n# All rights reserved.\n#\n# Licensed under a modified Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE\n#\n# The Work provided may be used for non-commercial academic purposes only.\n# For any other use of the Work, including commercial use, please contact:\n# [email protected]\n#\n# Neither the name of Caltech nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the location layers\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.platform import test\n\nfrom deepcell.utils import testing_utils\nfrom deepcell import layers\n\n\n@keras_parameterized.run_all_keras_modes\nclass LocationTest(keras_parameterized.TestCase):\n\n def test_location_2d(self):\n testing_utils.layer_test(\n layers.Location2D,\n kwargs={'in_shape': (5, 6, 4),\n 'data_format': 'channels_last'},\n custom_objects={'Location2D': layers.Location2D},\n input_shape=(3, 5, 6, 4))\n testing_utils.layer_test(\n layers.Location2D,\n kwargs={'in_shape': (4, 5, 6),\n 'data_format': 'channels_first'},\n custom_objects={'Location2D': layers.Location2D},\n input_shape=(3, 4, 5, 6))\n\n def test_location_3d(self):\n testing_utils.layer_test(\n layers.Location3D,\n kwargs={'in_shape': (11, 12, 10, 4),\n 'data_format': 'channels_last'},\n custom_objects={'Location3D': layers.Location3D},\n input_shape=(3, 11, 12, 10, 4))\n testing_utils.layer_test(\n layers.Location3D,\n kwargs={'in_shape': (4, 11, 12, 10),\n 'data_format': 'channels_first'},\n custom_objects={'Location3D': layers.Location3D},\n input_shape=(3, 4, 11, 12, 10))\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.platform.test.main" ] ]
gesiscss/wikiwho_demo
[ "1549a63dc9714c1a813a77dcad481a69cd28dfcd" ]
[ "visualization/conflicts_listener.py" ]
[ "import pandas as pd\nimport numpy as np\nimport plotly\nfrom plotly import graph_objs\n\n\nclass ConflictsListener():\n\n def __init__(self, df):\n\n # time diff to seconds\n #df['diff_secs'] = df['time_diff'].dt.total_seconds()\n\n # conflict time diff to seconds \n #df['diff_secs_confl'] = np.nan\n #df['diff_secs_confl'] = df.loc[~df['conflict'].isnull(), 'diff_secs']\n\n self.df = df\n self.df_plotted = None\n\n def listen(self, _range, granularity, black, red):\n df = self.df\n\n df = df[(df.year_month.dt.date >= _range[0]) &\n (df.year_month.dt.date <= _range[1])]\n\n # calculate the aggreated values\n df = df.groupby(pd.Grouper(\n key='year_month', freq=granularity[0])).agg({'conflicts': ['sum'],\n 'elegibles': ['sum'],\n 'revisions': ['sum'],\n 'conflict': ['count', 'sum'],\n 'total': ['sum'],\n 'total_surv_48h': ['sum'],\n 'total_persistent': ['sum'],\n 'total_stopword_count': ['sum']}).reset_index()\n\n df.loc[df[('conflict', 'count')] == 0, ('conflict', 'sum')] = np.nan\n #df.loc[df[('conflicts', 'count')] == 0, ('diff_secs', 'sum')] = np.nan\n\n self.traces = []\n self.is_norm_scale = True\n df = self.__add_trace(df, black, 'rgba(0, 0, 0, 1)')\n df = self.__add_trace(df, red, 'rgba(255, 0, 0, .8)')\n\n #np.all(np.array([len(sc.x) == 1 for sc in self.traces]))\n\n _range = None\n if self.is_norm_scale:\n _range = [0, 1]\n\n # if red != 'None':\n # data.append(graph_objs.Scatter(\n # x=list(df['rev_time']), y=list(df[red]),\n # name=red,\n # marker=dict(color='rgba(255, 0, 0, .8)')))\n\n # if blue != 'None':\n # data.append(graph_objs.Scatter(\n # x=list(df['rev_time']), y=list(df[blue]),\n # name=blue,\n # marker=dict(color='rgba(0, 128, 43, 1)')))\n\n # if green != 'None':\n # data.append(graph_objs.Scatter(\n # x=list(df['rev_time']), y=list(df[green]),\n # name=green,\n # marker=dict(color='rgba(0, 153, 255, .8)')))\n\n layout = graph_objs.Layout(hovermode='closest',\n xaxis=dict(title=granularity, ticklen=5,\n zeroline=True, gridwidth=2),\n yaxis=dict(\n ticklen=5, gridwidth=2, range=_range),\n legend=dict(x=0.5, y=1.2),\n showlegend=True, barmode='group')\n\n self.df_plotted = df\n\n plotly.offline.init_notebook_mode(connected=True)\n plotly.offline.iplot({\"data\": self.traces, \"layout\": layout})\n\n def __add_trace(self, df, metric, color):\n sel = df.index\n if metric == 'None':\n return df\n elif metric == 'Conflict Score':\n df['conflict_score'] = df[\n ('conflict', 'sum')] / df[('elegibles', 'sum')]\n sel = ~df['conflict_score'].isnull()\n y = df.loc[sel, 'conflict_score']\n self.is_norm_scale = False\n\n elif metric == 'Conflict Ratio':\n df['conflict_ratio'] = df[\n ('conflicts', 'sum')] / df[('elegibles', 'sum')]\n sel = ~(df['conflict_ratio'].isnull() | (df[('conflict', 'count')] == 0))\n y = df.loc[sel, 'conflict_ratio']\n\n elif metric == 'Absolute Conflict Score':\n df['absolute_conflict_score'] = df[('conflict', 'sum')]\n sel = ~df['absolute_conflict_score'].isnull() \n y = df.loc[sel, 'absolute_conflict_score']\n self.is_norm_scale = False\n\n elif metric == 'Number of Conflicts':\n df['conflict_n'] = df[('conflicts', 'sum')]\n sel = df['conflict_n'] != 0\n y = df.loc[sel, 'conflict_n']\n self.is_norm_scale = False\n\n elif metric == 'Total':\n df['total_n'] = df[('total', 'sum')]\n sel = df['total_n'] != 0\n y = df.loc[sel, 'total_n']\n self.is_norm_scale = False\n \n elif metric == 'Total_surv_48h':\n df['total_surv_48h_n'] = df[('total_surv_48h', 'sum')]\n sel = df['total_surv_48h_n'] != 0\n y = df.loc[sel, 'total_surv_48h_n']\n self.is_norm_scale = False\n\n elif metric == 'Total_persistent':\n df['total_persistent_n'] = df[('total_persistent', 'sum')]\n sel = df['total_persistent_n'] != 0\n y = df.loc[sel, 'total_persistent_n']\n self.is_norm_scale = False\n \n elif metric == 'Total_stopword_count':\n df['total_stopword_count_n'] = df[('total_stopword_count', 'sum')]\n sel = df['total_stopword_count_n'] != 0\n y = df.loc[sel, 'total_stopword_count_n']\n self.is_norm_scale = False\n\n elif metric == 'Total Elegible Actions':\n df['elegibles_n'] = df[('elegibles', 'sum')]\n sel = df['elegibles_n'] != 0\n y = df.loc[sel, 'elegibles_n']\n self.is_norm_scale = False\n\n self.traces.append(\n graph_objs.Scatter(\n x=df.loc[sel,'year_month'], y=y,\n name=metric,\n marker=dict(color=color))\n )\n\n return df\n" ]
[ [ "pandas.Grouper" ] ]
symphony233/gbnns_dim_red
[ "2403411600a60ad4365aba3d78a81da144a456b7" ]
[ "train.py" ]
[ "from __future__ import division\nimport argparse\nimport numpy as np\nimport torch\n\nfrom dim_red.triplet import train_triplet\nfrom dim_red.angular import train_angular\n\nfrom dim_red.support_func import sanitize\nfrom dim_red.data import load_dataset\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n def aa(*args, **kwargs):\n group.add_argument(*args, **kwargs)\n\n group = parser.add_argument_group('dataset options')\n aa(\"--database\", default=\"sift\")\n aa(\"--method\", type=str, default=\"triplet\")\n\n group = parser.add_argument_group('Model hyperparameters')\n aa(\"--dout\", type=int, default=16,\n help=\"output dimension\")\n aa(\"--dint\", type=int, default=1024)\n group = parser.add_argument_group('Computation params')\n aa(\"--seed\", type=int, default=1234)\n aa(\"--device\", choices=[\"cuda\", \"cpu\", \"auto\"], default=\"auto\")\n aa(\"--val_freq\", type=int, default=10,\n help=\"frequency of validation calls\")\n aa(\"--optim\", type=str, default=\"sgd\")\n aa(\"--print_results\", type=int, default=0)\n aa(\"--save\", type=int, default=0)\n aa(\"--full\", type=int, default=0)\n aa(\"--val_freq_search\", type=int, default=5,\n help=\"frequency of validation calls\")\n aa(\"--save_knn_1k\", type=int, default=0)\n aa(\"--save_optimal\", type=int, default=0)\n aa(\"--batch_size\", type=int, default=64)\n aa(\"--epochs\", type=int, default=40)\n aa(\"--lr_schedule\", type=str, default=\"0.1,0.1,0.05,0.01\")\n aa(\"--momentum\", type=float, default=0.9)\n\n args = parser.parse_args()\n\n if args.device == \"auto\":\n args.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n print(args)\n\n results_file_name = \"/home/shekhale/results/dim_red/\" + args.database + \"/train_results_\" + args.method + \".txt\"\n if args.print_results > 0:\n with open(results_file_name, \"a\") as rfile:\n rfile.write(\"\\n\\n\")\n rfile.write(\"START TRAINING \\n\")\n\n print (\"load dataset %s\" % args.database)\n (_, xb, xq, _) = load_dataset(args.database, args.device, calc_gt=False, mnt=True)\n\n base_size = xb.shape[0]\n threshold = int(base_size * 0.01)\n perm = np.random.permutation(base_size)\n xv = xb[perm[:threshold]]\n if args.full:\n xt = xb\n else:\n xt = xb[perm[threshold:]]\n\n print(xb.shape, xt.shape, xv.shape, xq.shape)\n\n xt = sanitize(xt)\n xv = sanitize(xv)\n xb = sanitize(xb)\n xq = sanitize(xq)\n\n if args.method == \"triplet\":\n train_triplet(xb, xt, xv, xq, args, results_file_name)\n elif args.method == \"angular\":\n train_angular(xb, xt, xv, xq, args, results_file_name, perm)\n else:\n print(\"Select an available method\")" ]
[ [ "numpy.random.permutation", "torch.manual_seed", "torch.cuda.is_available", "numpy.random.seed" ] ]
tkShir/EC521-Group6-Novel-Steganographic-Scheme
[ "d01ff5b625d5ef85790451fa62e5c33f15f06f0d" ]
[ "novel_stego_protocol/textgenrnn/textgenrnn.py" ]
[ "import json\nimport re\n\nimport numpy as np\nimport tensorflow as tf\nimport tqdm\nfrom pkg_resources import resource_filename\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow import config as config\nfrom tensorflow.compat.v1.keras.backend import set_session\nfrom tensorflow.keras.callbacks import LearningRateScheduler\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.preprocessing.text import Tokenizer, text_to_word_sequence\n\nfrom .model import textgenrnn_model\nfrom .model_training import generate_sequences_from_texts\nfrom .utils import (\n generate_after_epoch,\n save_model_weights,\n textgenrnn_encode_sequence,\n textgenrnn_generate,\n textgenrnn_generate2,\n textgenrnn_texts_from_file,\n textgenrnn_texts_from_file_context,\n)\n\n\nclass textgenrnn:\n META_TOKEN = '<s>'\n config = {\n 'rnn_layers': 2,\n 'rnn_size': 128,\n 'rnn_bidirectional': False,\n 'max_length': 40,\n 'max_words': 10000,\n 'dim_embeddings': 100,\n 'word_level': False,\n 'single_text': False\n }\n default_config = config.copy()\n\n def __init__(self, weights_path=None,\n vocab_path=None,\n config_path=None,\n name=\"textgenrnn\",\n allow_growth=None):\n\n if weights_path is None:\n weights_path = resource_filename(__name__,\n 'textgenrnn_weights.hdf5')\n\n if vocab_path is None:\n vocab_path = resource_filename(__name__,\n 'textgenrnn_vocab.json')\n\n if allow_growth is not None:\n c = tf.ConfigProto()\n c.gpu_options.allow_growth = True\n set_session(tf.Session(config=c))\n\n if config_path is not None:\n with open(config_path, 'r',\n encoding='utf8', errors='ignore') as json_file:\n self.config = json.load(json_file)\n\n self.config.update({'name': name})\n self.default_config.update({'name': name})\n\n with open(vocab_path, 'r',\n encoding='utf8', errors='ignore') as json_file:\n self.vocab = json.load(json_file)\n\n self.tokenizer = Tokenizer(filters='', lower=False, char_level=True)\n self.tokenizer.word_index = self.vocab\n self.num_classes = len(self.vocab) + 1\n self.model = textgenrnn_model(self.num_classes,\n cfg=self.config,\n weights_path=weights_path)\n self.indices_char = dict((self.vocab[c], c) for c in self.vocab)\n\n def generate(self, n=1, return_as_list=False, prefix=None,\n temperature=[1.0, 0.5, 0.2, 0.2], ciphertext=b\"\",\n max_gen_length=300, interactive=False,\n top_n=3, progress=True):\n gen_texts = []\n iterable = tqdm.trange(n) if progress and n > 1 else range(n)\n for _ in iterable:\n gen_text, _ = textgenrnn_generate(self.model,\n self.vocab,\n self.indices_char,\n temperature,\n self.config['max_length'],\n self.META_TOKEN,\n self.config['word_level'],\n self.config.get(\n 'single_text', False),\n max_gen_length,\n interactive,\n top_n,\n prefix, ciphertext)\n if not return_as_list:\n # print(\"{}\\n\".format(gen_text))\n return (gen_text)\n gen_texts.append(gen_text)\n\n if return_as_list:\n return gen_texts\n\n def generate2(self, n=1, return_as_list=False, prefix=None,\n temperature=[1.0, 0.5, 0.2, 0.2], stegotext=b\"\",\n max_gen_length=300, interactive=False,\n top_n=3, progress=True):\n # print(stegotext)\n gen_texts = []\n iterable = tqdm.trange(n) if progress and n > 1 else range(n)\n for _ in iterable:\n gen_text, _ = textgenrnn_generate2(self.model,\n self.vocab,\n self.indices_char,\n temperature,\n self.config['max_length'],\n self.META_TOKEN,\n self.config['word_level'],\n self.config.get(\n 'single_text', False),\n max_gen_length,\n interactive,\n top_n,\n prefix, stegotext)\n if not return_as_list:\n # print(\"{}\\n\".format(gen_text))\n return (gen_text)\n gen_texts.append(gen_text)\n\n if return_as_list:\n return gen_texts\n\n def generate_samples(self, n=3, temperatures=[0.2, 0.5, 1.0], **kwargs):\n for temperature in temperatures:\n print('#' * 20 + '\\nTemperature: {}\\n'.format(temperature) +\n '#' * 20)\n self.generate(n, temperature=temperature, progress=False, **kwargs)\n\n def train_on_texts(self, texts, context_labels=None,\n batch_size=128,\n num_epochs=50,\n verbose=1,\n new_model=False,\n gen_epochs=1,\n train_size=1.0,\n max_gen_length=300,\n validation=True,\n dropout=0.0,\n via_new_model=False,\n save_epochs=0,\n multi_gpu=False,\n **kwargs):\n\n if new_model and not via_new_model:\n self.train_new_model(texts,\n context_labels=context_labels,\n num_epochs=num_epochs,\n gen_epochs=gen_epochs,\n train_size=train_size,\n batch_size=batch_size,\n dropout=dropout,\n validation=validation,\n save_epochs=save_epochs,\n multi_gpu=multi_gpu,\n **kwargs)\n return\n\n if context_labels:\n context_labels = LabelBinarizer().fit_transform(context_labels)\n\n if self.config['word_level']:\n # If training word level, must add spaces around each\n # punctuation. https://stackoverflow.com/a/3645946/9314418\n punct = '!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\\\n\\\\t\\'‘’“”’–—…'\n for i in range(len(texts)):\n texts[i] = re.sub('([{}])'.format(punct), r' \\1 ', texts[i])\n texts[i] = re.sub(' {2,}', ' ', texts[i])\n texts = [text_to_word_sequence(text, filters='') for text in texts]\n\n # calculate all combinations of text indices + token indices\n indices_list = [np.meshgrid(np.array(i), np.arange(\n len(text) + 1)) for i, text in enumerate(texts)]\n # indices_list = np.block(indices_list) # this hangs when indices_list is large enough\n # FIX BEGIN ------\n indices_list_o = np.block(indices_list[0])\n for i in range(len(indices_list) - 1):\n tmp = np.block(indices_list[i + 1])\n indices_list_o = np.concatenate([indices_list_o, tmp])\n indices_list = indices_list_o\n # FIX END ------\n\n # If a single text, there will be 2 extra indices, so remove them\n # Also remove first sequences which use padding\n if self.config['single_text']:\n indices_list = indices_list[self.config['max_length']:-2, :]\n\n indices_mask = np.random.rand(indices_list.shape[0]) < train_size\n\n if multi_gpu:\n num_gpus = len(config.get_visible_devices('GPU'))\n batch_size = batch_size * num_gpus\n\n gen_val = None\n val_steps = None\n if train_size < 1.0 and validation:\n indices_list_val = indices_list[~indices_mask, :]\n gen_val = generate_sequences_from_texts(\n texts, indices_list_val, self, context_labels, batch_size)\n val_steps = max(\n int(np.floor(indices_list_val.shape[0] / batch_size)), 1)\n\n indices_list = indices_list[indices_mask, :]\n\n num_tokens = indices_list.shape[0]\n assert num_tokens >= batch_size, \"Fewer tokens than batch_size.\"\n\n level = 'word' if self.config['word_level'] else 'character'\n print(\"Training on {:,} {} sequences.\".format(num_tokens, level))\n\n steps_per_epoch = max(int(np.floor(num_tokens / batch_size)), 1)\n\n gen = generate_sequences_from_texts(\n texts, indices_list, self, context_labels, batch_size)\n\n base_lr = 4e-3\n\n # scheduler function must be defined inline.\n def lr_linear_decay(epoch):\n return (base_lr * (1 - (epoch / num_epochs)))\n\n '''\n FIXME\n This part is a bit messy as we need to initialize the model within\n strategy.scope() when using multi-GPU. Can probably be cleaned up a bit.\n '''\n\n if context_labels is not None:\n if new_model:\n weights_path = None\n else:\n weights_path = \"{}_weights.hdf5\".format(self.config['name'])\n self.save(weights_path)\n\n if multi_gpu:\n from tensorflow import distribute as distribute\n strategy = distribute.MirroredStrategy()\n with strategy.scope():\n parallel_model = textgenrnn_model(self.num_classes,\n dropout=dropout,\n cfg=self.config,\n context_size=context_labels.shape[1],\n weights_path=weights_path)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=4e-3))\n model_t = parallel_model\n print(\"Training on {} GPUs.\".format(num_gpus))\n else:\n model_t = self.model\n else:\n if multi_gpu:\n from tensorflow import distribute as distribute\n if new_model:\n weights_path = None\n else:\n weights_path = \"{}_weights.hdf5\".format(self.config['name'])\n\n strategy = distribute.MirroredStrategy()\n with strategy.scope():\n # Do not locate model/merge on CPU since sample sizes are small.\n parallel_model = textgenrnn_model(self.num_classes,\n cfg=self.config,\n weights_path=weights_path)\n parallel_model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=4e-3))\n\n model_t = parallel_model\n print(\"Training on {} GPUs.\".format(num_gpus))\n else:\n model_t = self.model\n\n model_t.fit(gen, steps_per_epoch=steps_per_epoch,\n epochs=num_epochs,\n callbacks=[\n LearningRateScheduler(\n lr_linear_decay),\n generate_after_epoch(\n self, gen_epochs,\n max_gen_length),\n save_model_weights(\n self, num_epochs,\n save_epochs)],\n verbose=verbose,\n max_queue_size=10,\n validation_data=gen_val,\n validation_steps=val_steps\n )\n\n # Keep the text-only version of the model if using context labels\n if context_labels is not None:\n self.model = Model(inputs=self.model.input[0],\n outputs=self.model.output[1])\n\n def train_new_model(self, texts, context_labels=None, num_epochs=50,\n gen_epochs=1, batch_size=128, dropout=0.0,\n train_size=1.0,\n validation=True, save_epochs=0,\n multi_gpu=False, **kwargs):\n self.config = self.default_config.copy()\n self.config.update(**kwargs)\n\n print(\"Training new model w/ {}-layer, {}-cell {}LSTMs\".format(\n self.config['rnn_layers'], self.config['rnn_size'],\n 'Bidirectional ' if self.config['rnn_bidirectional'] else ''\n ))\n\n # Create text vocabulary for new texts\n # if word-level, lowercase; if char-level, uppercase\n self.tokenizer = Tokenizer(filters='',\n lower=self.config['word_level'],\n char_level=(not self.config['word_level']))\n self.tokenizer.fit_on_texts(texts)\n\n # Limit vocab to max_words\n max_words = self.config['max_words']\n self.tokenizer.word_index = {k: v for (\n k, v) in self.tokenizer.word_index.items() if v <= max_words}\n\n if not self.config.get('single_text', False):\n self.tokenizer.word_index[self.META_TOKEN] = len(\n self.tokenizer.word_index) + 1\n self.vocab = self.tokenizer.word_index\n self.num_classes = len(self.vocab) + 1\n self.indices_char = dict((self.vocab[c], c) for c in self.vocab)\n\n # Create a new, blank model w/ given params\n self.model = textgenrnn_model(self.num_classes,\n dropout=dropout,\n cfg=self.config)\n\n # Save the files needed to recreate the model\n with open('{}_vocab.json'.format(self.config['name']),\n 'w', encoding='utf8') as outfile:\n json.dump(self.tokenizer.word_index, outfile, ensure_ascii=False)\n\n with open('{}_config.json'.format(self.config['name']),\n 'w', encoding='utf8') as outfile:\n json.dump(self.config, outfile, ensure_ascii=False)\n\n self.train_on_texts(texts, new_model=True,\n via_new_model=True,\n context_labels=context_labels,\n num_epochs=num_epochs,\n gen_epochs=gen_epochs,\n train_size=train_size,\n batch_size=batch_size,\n dropout=dropout,\n validation=validation,\n save_epochs=save_epochs,\n multi_gpu=multi_gpu,\n **kwargs)\n\n def save(self, weights_path=\"textgenrnn_weights_saved.hdf5\"):\n self.model.save_weights(weights_path)\n\n def load(self, weights_path):\n self.model = textgenrnn_model(self.num_classes,\n cfg=self.config,\n weights_path=weights_path)\n\n def reset(self):\n self.config = self.default_config.copy()\n self.__init__(name=self.config['name'])\n\n def train_from_file(self, file_path, header=True, delim=\"\\n\",\n new_model=False, context=None,\n is_csv=False, **kwargs):\n\n context_labels = None\n if context:\n texts, context_labels = textgenrnn_texts_from_file_context(\n file_path)\n else:\n texts = textgenrnn_texts_from_file(file_path, header,\n delim, is_csv)\n\n print(\"{:,} texts collected.\".format(len(texts)))\n if new_model:\n self.train_new_model(\n texts, context_labels=context_labels, **kwargs)\n else:\n self.train_on_texts(texts, context_labels=context_labels, **kwargs)\n\n def train_from_largetext_file(self, file_path, new_model=True, **kwargs):\n with open(file_path, 'r', encoding='utf8', errors='ignore') as f:\n texts = [f.read()]\n\n if new_model:\n self.train_new_model(\n texts, single_text=True, **kwargs)\n else:\n self.train_on_texts(texts, single_text=True, **kwargs)\n\n def generate_to_file(self, destination_path, **kwargs):\n texts = self.generate(return_as_list=True, **kwargs)\n with open(destination_path, 'w', encoding=\"utf-8\") as f:\n for text in texts:\n f.write(\"{}\\n\".format(text))\n\n def encode_text_vectors(self, texts, pca_dims=50, tsne_dims=None,\n tsne_seed=None, return_pca=False,\n return_tsne=False):\n\n # if a single text, force it into a list:\n if isinstance(texts, str):\n texts = [texts]\n\n vector_output = Model(inputs=self.model.input,\n outputs=self.model.get_layer('attention').output)\n encoded_vectors = []\n maxlen = self.config['max_length']\n for text in texts:\n if self.config['word_level']:\n text = text_to_word_sequence(text, filters='')\n text_aug = [self.META_TOKEN] + list(text[0:maxlen])\n encoded_text = textgenrnn_encode_sequence(text_aug, self.vocab,\n maxlen)\n encoded_vector = vector_output.predict(encoded_text)\n encoded_vectors.append(encoded_vector)\n\n encoded_vectors = np.squeeze(np.array(encoded_vectors), axis=1)\n if pca_dims is not None:\n assert len(texts) > 1, \"Must use more than 1 text for PCA\"\n pca = PCA(pca_dims)\n encoded_vectors = pca.fit_transform(encoded_vectors)\n\n if tsne_dims is not None:\n tsne = TSNE(tsne_dims, random_state=tsne_seed)\n encoded_vectors = tsne.fit_transform(encoded_vectors)\n\n return_objects = encoded_vectors\n if return_pca or return_tsne:\n return_objects = [return_objects]\n if return_pca:\n return_objects.append(pca)\n if return_tsne:\n return_objects.append(tsne)\n\n return return_objects\n\n def similarity(self, text, texts, use_pca=True):\n text_encoded = self.encode_text_vectors(text, pca_dims=None)\n if use_pca:\n texts_encoded, pca = self.encode_text_vectors(texts,\n return_pca=True)\n text_encoded = pca.transform(text_encoded)\n else:\n texts_encoded = self.encode_text_vectors(texts, pca_dims=None)\n\n cos_similairity = cosine_similarity(text_encoded, texts_encoded)[0]\n text_sim_pairs = list(zip(texts, cos_similairity))\n text_sim_pairs = sorted(text_sim_pairs, key=lambda x: -x[1])\n return text_sim_pairs\n" ]
[ [ "sklearn.preprocessing.LabelBinarizer", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.models.Model", "tensorflow.keras.preprocessing.text.text_to_word_sequence", "tensorflow.distribute.MirroredStrategy", "tensorflow.keras.preprocessing.text.Tokenizer", "numpy.block", "tensorflow.config.get_visible_devices", "sklearn.manifold.TSNE", "numpy.random.rand", "sklearn.metrics.pairwise.cosine_similarity", "tensorflow.Session", "tensorflow.ConfigProto", "sklearn.decomposition.PCA", "tensorflow.config.copy", "numpy.floor", "numpy.array", "numpy.concatenate" ] ]
malisit/onnx-tensorflow
[ "3eb41dc923f350ca533f1024f602a842dd55de45" ]
[ "onnx_tf/handlers/backend/sequence_erase.py" ]
[ "import tensorflow as tf\n\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\n\n\n@onnx_op(\"SequenceErase\")\nclass SequenceErase(BackendHandler):\n\n @classmethod\n def chk_pos_in_bounds(cls, input_seq, pos):\n \"\"\"\n Check the position is in-bounds with respect to the sequence.\n Accepted range for 'position' is in [-n, n - 1], where n is the\n number of tensors in 'input_sequence'.\n\n :param input_seq: input sequence\n :param pos: position of the output tensor\n\n :return: True if position is in-bounds \n \"\"\"\n seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0]\n\n cond1 = tf.greater_equal(pos, tf.negative(seq_length))\n cond2 = tf.less_equal(pos, seq_length - 1)\n\n # pos >= -n and pos < n\n return tf.reduce_all(tf.logical_and(cond1, cond2))\n\n @classmethod\n def version_11(cls, node, **kwargs):\n tensor_dict = kwargs[\"tensor_dict\"]\n input_sequence = tensor_dict[node.inputs[0]]\n seq_length = tf.shape(input_sequence.to_sparse())[0]\n position = tensor_dict[node.inputs[1]] if len(\n node.inputs) == 2 else seq_length - 1\n\n # check whether position is in-bounds and assert if not\n result = cls.chk_pos_in_bounds(input_sequence, position)\n assert_pos = tf.Assert(tf.equal(result, True), [result])\n\n with tf.control_dependencies([assert_pos]):\n s1 = input_sequence[:position]\n s2 = input_sequence[position + 1:]\n return [tf.concat([s1, s2], axis=0)]\n" ]
[ [ "tensorflow.negative", "tensorflow.equal", "tensorflow.logical_and", "tensorflow.concat", "tensorflow.less_equal", "tensorflow.control_dependencies" ] ]
pangtao22/quasistatic_simulator
[ "7c6f99cc7237dd922f6eb0b54c580303e86b5223" ]
[ "examples/planar_hand_ball/run_planar_hand.py" ]
[ "import os\nimport numpy as np\n\nfrom pydrake.all import PiecewisePolynomial\n\nfrom examples.setup_simulations import (\n run_quasistatic_sim)\nfrom qsim.parser import QuasistaticParser, QuasistaticSystemBackend\nfrom qsim.model_paths import models_dir\nfrom qsim.simulator import GradientMode\n\n\n#%% sim setup\nq_model_path = os.path.join(models_dir, 'q_sys', 'planar_hand_ball.yml')\n\nh = 0.1\nT = int(round(2 / h)) # num of time steps to simulate forward.\nduration = T * h\n\n# model instance names.\nrobot_l_name = \"arm_left\"\nrobot_r_name = \"arm_right\"\nobject_name = \"sphere\"\n\n# trajectory and initial conditions.\nnq_a = 2\nqa_l_knots = np.zeros((2, nq_a))\nqa_l_knots[0] = [-np.pi / 4, -np.pi / 4]\nq_robot_l_traj = PiecewisePolynomial.ZeroOrderHold(\n [0, T * h], qa_l_knots.T)\n\nqa_r_knots = np.zeros((2, nq_a))\nqa_r_knots[0] = [np.pi / 4, np.pi / 4]\nq_robot_r_traj = PiecewisePolynomial.ZeroOrderHold(\n [0, T * h], qa_r_knots.T)\n\nq_a_traj_dict_str = {robot_l_name: q_robot_l_traj,\n robot_r_name: q_robot_r_traj}\n\nq_u0 = np.array([0, 0.5, 0])\n\nq0_dict_str = {object_name: q_u0,\n robot_l_name: qa_l_knots[0],\n robot_r_name: qa_r_knots[0]}\n\n\n#%% run sim.\nif __name__ == \"__main__\":\n q_parser = QuasistaticParser(q_model_path)\n q_parser.set_sim_params(is_quasi_dynamic=True, gravity=np.array([0, 0, -10.]))\n\n loggers_dict_quasistatic_str, q_sys = run_quasistatic_sim(\n q_parser=q_parser,\n h=h,\n backend=QuasistaticSystemBackend.PYTHON,\n q_a_traj_dict_str=q_a_traj_dict_str,\n q0_dict_str=q0_dict_str,\n is_visualizing=True,\n real_time_rate=1.0)\n\n#%% look into the plant.\n plant = q_sys.plant\n for model in q_sys.q_sim.models_all:\n print(model, plant.GetModelInstanceName(model),\n q_sys.q_sim.velocity_indices[model])\n\n#%% derivatives.\n q_sim = q_sys.q_sim\n name_to_model_dict = q_sim.get_model_instance_name_to_index_map()\n idx_l = name_to_model_dict[robot_l_name]\n idx_r = name_to_model_dict[robot_r_name]\n idx_o = name_to_model_dict[object_name]\n q_dict = {idx_o: [0, 0.316, 0],\n idx_l: [-0.775, -0.785],\n idx_r: [0.775, 0.785]}\n\n # numerical gradient\n dfdu_numerical = q_sim.calc_dfdu_numerical(\n q_dict=q_dict, qa_cmd_dict=q_dict, du=1e-3, h=h)\n\n # analytical gradient\n q_sim.update_mbp_positions(q_dict)\n tau_ext_dict = q_sim.calc_tau_ext([])\n q_sim.step(q_a_cmd_dict=q_dict, tau_ext_dict=tau_ext_dict, h=h,\n mode=\"qp_mp\", gradient_mode=GradientMode.kBOnly,\n grad_from_active_constraints=True)\n dfdu_active = q_sim.get_Dq_nextDqa_cmd()\n\n\n#%% index for tau_a.\n indices = []\n for model in q_sys.q_sim.models_actuated:\n indices += q_sys.q_sim.velocity_indices[model].tolist()\n indices.sort()\n indices_map = {j: i for i, j in enumerate(indices)}\n\n#%% construct q and v vectors of MBP from log.\n logger_qu = loggers_dict_quasistatic_str[object_name]\n q_log = np.zeros((T, plant.num_positions()))\n v_log = np.zeros((T, plant.num_velocities()))\n tau_a_log = np.zeros((T - 1, plant.num_actuated_dofs()))\n\n for name, logger in loggers_dict_quasistatic_str.items():\n model = name_to_model_dict[name]\n for i, j in enumerate(q_sys.q_sim.velocity_indices[model]):\n q_log[:, j] = logger.data().T[:, i]\n\n v_log[1:, :] = (q_log[1:, :] - q_log[:-1, :]) / h\n\n for name in robot_stiffness_dict.keys():\n model = name_to_model_dict[name]\n logger_qa = loggers_dict_quasistatic_str[name]\n idx_v = q_sys.q_sim.velocity_indices[model]\n idx_tau_a = [indices_map[i] for i in idx_v]\n for l in range(T - 1):\n qa_l = logger_qa.data().T[l]\n qa_l1_cmd = q_a_traj_dict_str[name].value((l + 1) * h).squeeze()\n tau_a_log[l][idx_tau_a] = Kp * (qa_l1_cmd - qa_l)\n\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
Paul-St-Young/eried
[ "63dbcab435a1fbe65b3b727a6c4b743497f60862" ]
[ "examples/02_min-h4/eri/diff_evals.py" ]
[ "#!/usr/bin/env python3\nimport yaml\nimport numpy as np\n\ndef read_evals(fyml):\n with open(fyml, 'r') as f:\n evd = yaml.safe_load(f)\n elist = evd['evals']\n return np.array(elist)\n\ndef main():\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('nup', type=int)\n parser.add_argument('ndn', type=int)\n parser.add_argument('--lam', type=float, default=1)\n parser.add_argument('--e2e', type=float, default=1)\n parser.add_argument('--tol', type=float, default=1e-12)\n parser.add_argument('--verbose', action='store_true')\n args = parser.parse_args()\n nup = args.nup\n ndn = args.ndn\n\n prefix = 'evals-l%f-e%f-nup%d-ndn%d' % (args.lam, args.e2e, nup, ndn)\n fyml0 = '../fci/%s.yml' % prefix\n fyml1 = '../eri/%s.yml' % prefix\n\n e0 = read_evals(fyml0)\n e1 = read_evals(fyml1)\n de = e1-e0\n sel = abs(de) > args.tol\n idx = np.where(sel)[0]\n print(idx)\n print(de[sel])\n\nif __name__ == '__main__':\n main() # set no global variable\n" ]
[ [ "numpy.array", "numpy.where" ] ]
TheRockStarDBA/sqlmlutils
[ "956bdd72638a649f0e613f100fbb81c900dcb65e" ]
[ "Python/sqlmlutils/sqlpythonexecutor.py" ]
[ "# Copyright(c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT license.\r\n\r\nfrom typing import Callable\r\nimport dill\r\nfrom pandas import DataFrame\r\n\r\nfrom .connectioninfo import ConnectionInfo\r\nfrom .sqlqueryexecutor import execute_query, execute_raw_query\r\nfrom .sqlbuilder import SpeesBuilder, SpeesBuilderFromFunction, StoredProcedureBuilder, \\\r\n ExecuteStoredProcedureBuilder, DropStoredProcedureBuilder\r\nfrom .sqlbuilder import StoredProcedureBuilderFromFunction, RETURN_COLUMN_NAME\r\n\r\n\r\nclass SQLPythonExecutor:\r\n\r\n def __init__(self, connection_info: ConnectionInfo):\r\n self._connection_info = connection_info\r\n\r\n def execute_function_in_sql(self,\r\n func: Callable, *args,\r\n input_data_query: str = \"\",\r\n **kwargs):\r\n \"\"\"Execute a function in SQL Server.\r\n\r\n :param func: function to execute_function_in_sql. NOTE: This function is shipped to SQL as text.\r\n Functions should be self contained and import statements should be inline.\r\n :param args: positional args to pass to function to execute_function_in_sql.\r\n :param input_data_query: sql query to fill the first argument of the function. The argument gets the result of\r\n the query as a pandas DataFrame (uses the @input_data_1 parameter in sp_execute_external_script)\r\n :param kwargs: keyword arguments to pass to function to execute_function_in_sql.\r\n :return: value returned by func\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>> def foo(val1, val2):\r\n >>> import math\r\n >>> print(val1)\r\n >>> return [math.cos(val2), math.cos(val2)]\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AirlineTestDB\"))\r\n >>> ret = sqlpy.execute_function_in_sql(foo, val1=\"blah\", val2=5)\r\n blah\r\n >>> print(ret)\r\n [0.28366218546322625, 0.28366218546322625]\r\n \"\"\"\r\n rows = execute_query(SpeesBuilderFromFunction(func, input_data_query, *args, **kwargs), self._connection_info)\r\n return self._get_results(rows)\r\n\r\n def execute_script_in_sql(self,\r\n path_to_script: str,\r\n input_data_query: str = \"\"):\r\n \"\"\"Execute a script in SQL Server.\r\n\r\n :param path_to_script: file path to Python script to execute.\r\n :param input_data_query: sql query to fill InputDataSet global variable with.\r\n (@input_data_1 parameter in sp_execute_external_script)\r\n :return: None\r\n\r\n \"\"\"\r\n try:\r\n with open(path_to_script, 'r') as script_file:\r\n content = script_file.read()\r\n print(\"File does exist, using \" + path_to_script)\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"File does not exist!\")\r\n execute_query(SpeesBuilder(content, input_data_query=input_data_query), connection=self._connection_info)\r\n\r\n def execute_sql_query(self,\r\n sql_query: str,\r\n params = ()):\r\n \"\"\"Execute a sql query in SQL Server.\r\n\r\n :param sql_query: the sql query to execute in the server\r\n :return: table returned by the sql_query\r\n \"\"\"\r\n rows = execute_raw_query(conn=self._connection_info, query=sql_query, params=params)\r\n df = DataFrame(rows)\r\n\r\n # _mssql's execute_query() returns duplicate keys for indexing, we remove them because they are extraneous\r\n for i in range(len(df.columns)):\r\n try:\r\n del df[i]\r\n except KeyError:\r\n pass\r\n\r\n return df\r\n\r\n def create_sproc_from_function(self, name: str, func: Callable,\r\n input_params: dict = None, output_params: dict = None):\r\n \"\"\"Create a SQL Server stored procedure based on a Python function.\r\n NOTE: Type annotations are needed either in the function definition or in the input_params dictionary\r\n WARNING: Output parameters can be used when creating the stored procedure, but Stored Procedures with\r\n output parameters other than a single DataFrame cannot be executed with sqlmlutils\r\n\r\n :param name: name of stored procedure.\r\n :param func: function used to define stored procedure. parameters to the function are used to define parameters\r\n to the stored procedure. type annotations of the parameters are used to infer SQL types of parameters to the\r\n stored procedure. currently supported type annotations are \"str\", \"int\", \"float\", and \"DataFrame\".\r\n :param input_params: optional dictionary of type annotations for each argument to func;\r\n if func has type annotations this is not necessary. If both are provided, they must match\r\n :param output_params optional dictionary of type annotations for each output parameter\r\n :return: True if creation succeeded\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>> def foo(val1: int, val2: str):\r\n >>> from pandas import DataFrame\r\n >>> print(val2)\r\n >>> df = DataFrame()\r\n >>> df[\"col1\"] = [val1, val1, val1]\r\n >>> return df\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AutoRegressTestDB\"))\r\n >>> sqlpy.create_sproc_from_function(\"MyStoredProcedure\", foo, with_results_set=True)\r\n >>>\r\n >>> # You can execute_function_in_sql the procedure in the usual way from sql: exec MyStoredProcedure 5, 'bar'\r\n >>> # You can also call the stored procedure from Python\r\n >>> ret = sqlpy.execute_sproc(name=\"MyStoredProcedure\", val1=5, val2=\"bar\")\r\n >>> sqlpy.drop_sproc(name=\"MyStoredProcedure\")\r\n\r\n \"\"\"\r\n if input_params is None:\r\n input_params = {}\r\n if output_params is None:\r\n output_params = {}\r\n # Save the stored procedure in database\r\n execute_query(StoredProcedureBuilderFromFunction(name, func,\r\n input_params, output_params), self._connection_info)\r\n return True\r\n\r\n def create_sproc_from_script(self, name: str, path_to_script: str,\r\n input_params: dict = None, output_params: dict = None):\r\n \"\"\"Create a SQL Server stored procedure based on a Python script\r\n\r\n :param name: name of stored procedure.\r\n :param path_to_script: file path to Python script to create a sproc from.\r\n :param input_params: optional dictionary of type annotations for inputs in the script\r\n :param output_params optional dictionary of type annotations for each output variable\r\n :return: True if creation succeeded\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AutoRegressTestDB\"))\r\n >>> sqlpy.create_sproc_from_script(name=\"script_sproc\", path_to_script=\"path/to/script\")\r\n >>>\r\n >>> # This will execute the script in sql; with no inputs or outputs it will just run and return nothing\r\n >>> sqlpy.execute_sproc(name=\"script_sproc\")\r\n >>> sqlpy.drop_sproc(name=\"script_sproc\")\r\n\r\n \"\"\"\r\n if input_params is None:\r\n input_params = {}\r\n if output_params is None:\r\n output_params = {}\r\n # Save the stored procedure in database\r\n try:\r\n with open(path_to_script, 'r') as script_file:\r\n content = script_file.read()\r\n print(\"File does exist, using \" + path_to_script)\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"File does not exist!\")\r\n\r\n execute_query(StoredProcedureBuilder(name, content,\r\n input_params, output_params), self._connection_info)\r\n return True\r\n\r\n def check_sproc(self, name: str) -> bool:\r\n \"\"\"Check to see if a SQL Server stored procedure exists in the database.\r\n\r\n >>> from sqlmlutils import ConnectionInfo, SQLPythonExecutor\r\n >>>\r\n >>> sqlpy = SQLPythonExecutor(ConnectionInfo(\"localhost\", database=\"AutoRegressTestDB\"))\r\n >>> if sqlpy.check_sproc(\"MyStoredProcedure\"):\r\n >>> print(\"MyStoredProcedure exists\")\r\n >>> else:\r\n >>> print(\"MyStoredProcedure does not exist\")\r\n\r\n :param name: name of stored procedure.\r\n :return: boolean whether the Stored Procedure exists in the database\r\n \"\"\"\r\n check_query = \"SELECT OBJECT_ID (%s, N'P')\"\r\n rows = execute_raw_query(conn=self._connection_info, query=check_query, params=name)\r\n return rows[0][0] is not None\r\n\r\n def execute_sproc(self, name: str, **kwargs) -> DataFrame:\r\n \"\"\"Call a stored procedure on a SQL Server database.\r\n WARNING: Output parameters can be used when creating the stored procedure, but Stored Procedures with\r\n output parameters other than a single DataFrame cannot be executed with sqlmlutils\r\n\r\n :param name: name of stored procedure.\r\n :param kwargs: keyword arguments to pass to stored procedure\r\n :return: DataFrame representing the output data set of the stored procedure (or empty)\r\n \"\"\"\r\n return DataFrame(execute_query(ExecuteStoredProcedureBuilder(name, **kwargs), self._connection_info))\r\n\r\n def drop_sproc(self, name: str):\r\n \"\"\"Drop a SQL Server stored procedure if it exists.\r\n\r\n :param name: name of stored procedure.\r\n :return: None\r\n \"\"\"\r\n if self.check_sproc(name):\r\n execute_query(DropStoredProcedureBuilder(name), self._connection_info)\r\n\r\n @staticmethod\r\n def _get_results(rows):\r\n hexstring = rows[0][RETURN_COLUMN_NAME]\r\n return dill.loads(bytes.fromhex(hexstring))\r\n" ]
[ [ "pandas.DataFrame" ] ]
xadupre/keras-onnx
[ "17559f987ecce7ec40ab8a36a9596eb950f9b332" ]
[ "keras2onnx/ke2onnx/lstm.py" ]
[ "###############################################################################\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\nimport numbers\nimport numpy as np\nfrom collections.abc import Iterable\nfrom ..common import cvtfunc, name_func\nfrom ..common.onnx_ops import (\n apply_concat,\n apply_gather,\n apply_reshape,\n apply_shape,\n apply_split,\n apply_squeeze,\n apply_unsqueeze,\n apply_transpose,\n OnnxOperatorBuilder\n)\nfrom ..proto import onnx_proto, keras\nfrom . import simplernn\nfrom onnx import numpy_helper\n\nLSTM = keras.layers.LSTM\nTensorProto = onnx_proto.TensorProto\n\n\ndef convert_ifco_to_iofc(tensor_ifco):\n \"\"\"Returns a tensor in input (i), output (o), forget (f), cell (c) ordering. The\n Keras ordering is ifco, while the ONNX ordering is iofc.\n \"\"\"\n splits = np.split(tensor_ifco, 4)\n return np.concatenate((splits[0], splits[3], splits[1], splits[2]))\n\n\ndef extract_params(op, hidden_size, input_size):\n \"\"\"Returns a tuple of the LSTM parameters, and converts them into the format for ONNX.\n \"\"\"\n params = op.get_weights()\n\n # Keras: [W_x, W_h, b] each in I F C O\n # ONNX: W[iofc] I O F C\n W_x = convert_ifco_to_iofc(params[0].T).reshape(4, hidden_size, input_size)\n W_h = convert_ifco_to_iofc(params[1].T).reshape(4, hidden_size, hidden_size)\n\n b = None\n if op.use_bias:\n b = np.zeros((8, hidden_size), dtype=np.float32)\n b[:4] = convert_ifco_to_iofc(params[2]).reshape(4, hidden_size)\n\n return W_x, W_h, b\n\n\ndef build_parameters(scope, operator, container, bidirectional=False):\n \"\"\"Returns the parameter initialization values after extracting them from the LSTM layer.\n \"\"\"\n op = operator.raw_operator\n _, seq_length, input_size = simplernn.extract_input_shape(op)\n\n _name = name_func(scope, operator)\n\n tensor_w = _name('W')\n tensor_r = _name('R')\n tensor_b = ''\n\n if bidirectional:\n forward_layer = op.forward_layer\n backward_layer = op.backward_layer\n hidden_size = forward_layer.units\n\n W_x, W_h, b = extract_params(forward_layer, hidden_size, input_size)\n W_x_back, W_h_back, b_back = extract_params(backward_layer, hidden_size, input_size)\n\n W = np.concatenate([W_x, W_x_back]).flatten()\n W_shape = [2, 4 * hidden_size, input_size]\n\n R = np.concatenate([W_h, W_h_back]).flatten()\n R_shape = [2, 4 * hidden_size, hidden_size]\n\n if (b is None and b_back is not None) or (b is not None and b_back is None):\n raise ValueError('Bidirectional bias must be enabled (or disabled) for both forward '\n 'and backward layers.')\n\n if b is not None:\n B = np.concatenate([b, b_back]).flatten()\n B_shape = [2, 8 * hidden_size]\n\n else:\n hidden_size = op.units\n\n W_x, W_h, b = extract_params(op, hidden_size, input_size)\n\n W = W_x.flatten()\n W_shape = [1, 4 * hidden_size, input_size]\n\n R = W_h.flatten()\n R_shape = [1, 4 * hidden_size, hidden_size]\n\n if b is not None:\n B = b.flatten()\n B_shape = [1, 8 * hidden_size]\n\n # Create initializers\n container.add_initializer(tensor_w, TensorProto.FLOAT, W_shape, W)\n container.add_initializer(tensor_r, TensorProto.FLOAT, R_shape, R)\n\n if b is not None:\n tensor_b = _name('B')\n container.add_initializer(tensor_b, TensorProto.FLOAT, B_shape, B)\n\n return tensor_w, tensor_r, tensor_b\n\n\ndef build_initial_states(scope, operator, container, bidirectional=False):\n \"\"\"Builds the initial hidden and cell states for the LSTM layer.\n \"\"\"\n _name = name_func(scope, operator)\n\n initial_h = simplernn.build_initial_states(scope, operator, container, bidirectional)\n\n # Determine if the cell states are set\n has_c = (\n (len(operator.inputs) > 1 and not bidirectional) or\n (len(operator.inputs) > 3 and bidirectional)\n )\n if not has_c:\n return initial_h, ''\n\n op = operator.raw_operator\n initial_c = _name('initial_c')\n\n if bidirectional:\n forward_layer = op.forward_layer\n hidden_size = forward_layer.units\n desired_shape = [1, -1, hidden_size]\n\n # Combine the forward and backward_layers\n forward_h = _name('initial_c_forward')\n backward_h = _name('initial_c_backward')\n apply_reshape(scope, operator.inputs[2].full_name, forward_h, container, desired_shape=desired_shape)\n apply_reshape(scope, operator.inputs[4].full_name, backward_h, container, desired_shape=desired_shape)\n\n apply_concat(scope, [forward_h, backward_h], initial_c, container)\n\n else:\n # Unsqueeze dim 0 to represent num_directions\n input_c = operator.inputs[2].full_name\n apply_unsqueeze(scope, input_c, initial_c, container, axes=[0])\n\n return initial_h, initial_c\n\n\ndef build_attributes(scope, operator, container, bidirectional=False):\n \"\"\"Returns a dictionary of attributes for the LSTM layer.\n \"\"\"\n op = operator.raw_operator\n\n attrs = {}\n\n if bidirectional:\n forward_layer = op.forward_layer\n backward_layer = op.backward_layer\n\n attrs['direction'] = 'bidirectional'\n attrs['hidden_size'] = forward_layer.units\n attrs.update(simplernn.extract_activations([\n forward_layer.recurrent_activation,\n forward_layer.activation,\n forward_layer.activation,\n backward_layer.recurrent_activation,\n backward_layer.activation,\n backward_layer.activation,\n ]))\n\n else:\n attrs['direction'] = 'reverse' if op.go_backwards else 'forward'\n attrs['hidden_size'] = op.units\n attrs.update(simplernn.extract_activations([\n op.recurrent_activation,\n op.activation,\n op.activation,\n ]))\n return attrs\n\n\ndef build_output(scope, operator, container, output_names, direction='forward'):\n \"\"\"Builds the output operators for the LSTM layer.\n \"\"\"\n bidirectional = True if direction == 'bidirectional' else False\n\n if bidirectional:\n return simplernn.build_output(scope, operator, container, output_names[:-1], bidirectional)\n\n lstm_y, lstm_h, lstm_c = output_names\n\n op = operator.raw_operator\n output_seq = op.return_sequences\n _, seq_length, input_size = simplernn.extract_input_shape(op)\n\n _name = name_func(scope, operator)\n\n output_name = operator.outputs[0].full_name\n\n time_major = simplernn.is_time_major(op, bidirectional)\n # Create output-adjusting operators\n if output_seq:\n # Squeeze the num_direction dim as we know its size is 1 for\n # lstm(forward/reverse).\n is_reverse = True if direction == 'reverse' else False\n lstm_out = output_name if time_major else _name('y_squeezed')\n squeeze_out = lstm_out if not is_reverse else _name('y_squeezed')\n apply_squeeze(scope, lstm_y, squeeze_out, container, axes=[1])\n\n if time_major:\n if is_reverse:\n reverse_sequence(scope, container, lstm_out, output_name, name=_name('reverse_seq'), axes=[0])\n\n else:\n # Onnx LSTM produces time major output. Add a transpose operator to\n # make it batch_major, if the keras op was not time_major.\n # This transforms [ S, B, I] -> [ B, S, I ] where B is\n # batch_size and S is seq_len.\n perm = [1, 0, 2]\n transpose_out = output_name if not is_reverse else _name('transpose')\n apply_transpose(scope, squeeze_out, transpose_out, container, perm=perm)\n if is_reverse:\n reverse_sequence(scope, container, transpose_out, output_name, name=_name('reverse_seq'), axes=[1])\n\n else:\n apply_squeeze(scope, lstm_h, output_name, container, axes=[0])\n\n\ndef reverse_sequence(scope, container, input_name, output_name, name, axes):\n oopb = OnnxOperatorBuilder(container, scope)\n rv2_in_names = [input_name]\n apply_shape(scope, input_name, input_name + '_shape', container)\n rv2_node_name = name\n inputs = rv2_in_names\n\n axis = axes[0]\n batch_axis = 1 if axis != 1 else 0\n\n const_batch = numpy_helper.from_array(np.array([batch_axis], dtype=np.int64), rv2_node_name + '_const_batch')\n container.add_initializer_from_tensor(const_batch)\n const_axis = numpy_helper.from_array(np.array([axis], dtype=np.int64), rv2_node_name + '_const_axis')\n container.add_initializer_from_tensor(const_axis)\n\n apply_gather(scope, [input_name + '_shape', const_batch.name], rv2_node_name + '_gather_batch', container)\n apply_gather(scope, [input_name + '_shape', const_axis.name], rv2_node_name + '_gather_axis', container)\n seq_array = oopb.add_node('Expand', [rv2_node_name + '_gather_axis', rv2_node_name + '_gather_batch'],\n rv2_node_name + '_expand')\n inputs.append(seq_array)\n\n res_seq_node = oopb.add_node('ReverseSequence', inputs, name=rv2_node_name + '_rev_seq', batch_axis=batch_axis,\n time_axis=axis, op_version=10)\n\n oopb.apply_op_with_output('apply_identity', [res_seq_node], [output_name],\n name=rv2_node_name + '_Identity')\n\n\ndef build_output_states(scope, operator, container, output_names, bidirectional=False):\n \"\"\"Builds the output hidden states for the LSTM layer.\n \"\"\"\n _, lstm_h, lstm_c = output_names\n op = operator.raw_operator\n\n if bidirectional:\n forward_layer = op.forward_layer\n output_state = forward_layer.return_state\n\n if not output_state:\n return\n\n # Split lstm_h and lstm_c into forward and backward components\n squeeze_names = []\n output_names = [o.full_name for o in operator.outputs[1:]]\n name_map = {lstm_h: output_names[::2], lstm_c: output_names[1::2]}\n\n for state_name, outputs in name_map.items():\n split_names = ['{}_{}'.format(state_name, d) for d in ('forward', 'backward')]\n\n apply_split(scope, state_name, split_names, container)\n squeeze_names.extend(list(zip(split_names, outputs)))\n\n for split_name, output_name in squeeze_names:\n apply_squeeze(scope, split_name, output_name, container)\n\n else:\n output_state = op.return_state\n\n if not output_state:\n return\n\n output_h = operator.outputs[1].full_name\n output_c = operator.outputs[2].full_name\n apply_squeeze(scope, lstm_h, output_h, container)\n apply_squeeze(scope, lstm_c, output_c, container)\n\n\ndef _calculate_keras_lstm_output_shapes(operator):\n op = operator.raw_operator\n if isinstance(op.output_shape[0], Iterable):\n operator.outputs[0].type.shape = list(i if isinstance(i, numbers.Integral) else None\n for i in op.output_shape[0])\n else:\n operator.outputs[0].type.shape = list(i if isinstance(i, numbers.Integral) else None for i in op.output_shape)\n\n\n@cvtfunc(shape_infer=_calculate_keras_lstm_output_shapes)\ndef convert_keras_lstm(scope, operator, container, bidirectional=False):\n op = operator.raw_operator\n _name = name_func(scope, operator)\n\n if bidirectional:\n output_seq = op.forward_layer.return_sequences\n else:\n output_seq = op.return_sequences\n\n time_major = simplernn.is_time_major(op, bidirectional)\n\n # Inputs\n lstm_x = operator.inputs[0].full_name\n if not time_major:\n # If the keras op was not time_major, we add a transpose op to make the\n # input time_major as ONNX lstm expects time_major input.\n # Transform [ B, S, I ] -> [ S, B, I] where B is batch_size and S is\n # seq_len.\n lstm_x = _name('X')\n apply_transpose(scope, operator.inputs[0].full_name, lstm_x, container, perm=[1, 0, 2])\n\n tensor_w, tensor_r, tensor_b = build_parameters(scope, operator, container, bidirectional)\n sequence_lengths = simplernn.build_sequence_lengths(scope, operator, container)\n initial_h, initial_c = build_initial_states(scope, operator, container, bidirectional)\n\n input_names = [\n lstm_x,\n tensor_w,\n tensor_r,\n tensor_b,\n sequence_lengths,\n initial_h,\n initial_c,\n '', # P (optional) : No peep hole in Keras.\n ]\n\n # Attributes\n attrs = build_attributes(scope, operator, container, bidirectional)\n\n # Outputs\n output_names = [_name('Y'), _name('Y_h'), _name('Y_c')]\n\n oopb = OnnxOperatorBuilder(container, scope)\n oopb.apply_op_with_output('apply_lstm',\n input_names,\n output_names,\n name=op.name,\n output_seq=output_seq,\n **attrs)\n\n build_output(scope, operator, container, output_names, attrs['direction'])\n build_output_states(scope, operator, container, output_names, bidirectional)\n" ]
[ [ "numpy.array", "numpy.concatenate", "numpy.zeros", "numpy.split" ] ]
fwitte/chp_orc
[ "509abf4faf2a5d08ef8311a0f2a8c75e1bbba95e" ]
[ "Optimization/app.py" ]
[ "# %%\n\n\nfrom CoolProp.CoolProp import PropsSI\nimport pygmo as pg\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom orc import ORC_without_ihe, CHPORC\nfrom tespy.components import HeatExchanger, Merge, Pump, Sink, Source, Splitter\nfrom tespy.components.heat_exchangers.condenser import Condenser\nfrom tespy.connections import Bus, Connection, Ref\nfrom tespy.networks import Network\nfrom opt import MultivariateOptimizationProblem\nimport json\nimport sys\nimport os\n\ndef variant_4(baseplant):\n\n # district heating system\n dh_return_temperature = 60\n dh_feed_temperature = 40\n dh_pressure = 5\n\n # components\n geo_splitter = Splitter(\"geo splitter\")\n geo_merge = Merge(\"geo merge\")\n\n # pump for district heating system?\n dh_source = Source(\"dh return\")\n dh_sink = Sink(\"dh feed\")\n dh_heat_exchanger = HeatExchanger(\"dh heat exchanger\")\n\n baseplant.nw.del_conns(*baseplant.nw.get_conn([\"22\", \"27\"]))\n\n c22 = Connection(baseplant.nw.get_comp(\"evaporator\"), \"out1\", geo_splitter, \"in1\", label=\"22\")\n\n # district heating\n c23 = Connection(geo_splitter, \"out1\", dh_heat_exchanger, \"in1\", label=\"23\")\n c24 = Connection(dh_heat_exchanger, \"out1\", geo_merge, \"in1\", label=\"24\")\n\n # orc\n c25 = Connection(geo_splitter, \"out2\", baseplant.nw.get_comp(\"preheater\"), \"in1\", label=\"25\")\n c26 = Connection(baseplant.nw.get_comp(\"preheater\"), \"out1\", geo_merge, \"in2\", label=\"26\")\n\n c27 = Connection(\n geo_merge, \"out1\", baseplant.nw.get_comp(\"geo re-injection\"), \"in1\", label=\"27\"\n )\n baseplant.nw.add_conns(c22, c23, c24, c25, c26, c27)\n\n # district heating\n c31 = Connection(dh_source, \"out1\", dh_heat_exchanger, \"in2\", label=\"31\")\n c32 = Connection(dh_heat_exchanger, \"out2\", dh_sink, \"in1\", label=\"32\")\n\n baseplant.nw.add_conns(c31, c32)\n\n # no pr1 required, parallel to preheater\n dh_heat_exchanger.set_attr(pr2=0.98)\n c31.set_attr(\n fluid={baseplant.working_fluid: 0, \"water\": 1}, T=dh_feed_temperature, p=dh_pressure\n )\n c32.set_attr(T=dh_return_temperature)\n\n # reinjection temperature specification\n c26.set_attr(T=70)\n c24.set_attr(T=70)\n\n # solve the network\n baseplant.nw.solve(\"design\")\n baseplant.nw.print_results()\n\n\ndef variant_3(nw):\n\n # district heating system\n dh_return_temperature = 60\n dh_feed_temperature = 40\n dh_pressure = 5\n\n # components\n geo_splitter = Splitter(\"geo splitter\")\n geo_merge = Merge(\"geo merge\")\n\n # pump for district heating system?\n dh_source = Source(\"dh return\")\n dh_sink = Sink(\"dh feed\")\n dh_heat_exchanger1 = HeatExchanger(\"dh heat exchanger 1\")\n dh_heat_exchanger2 = HeatExchanger(\"dh heat exchanger 2\")\n\n nw.del_conns(*nw.get_conn([\"21\", \"27\"]))\n\n c21_0 = Connection(\n nw.get_comp(\"geo source\"), \"out1\", geo_splitter, \"in1\", label=\"21_0\"\n )\n c21_1 = Connection(\n geo_splitter, \"out1\", nw.get_comp(\"evaporator\"), \"in1\", label=\"21_1\"\n )\n c23 = Connection(geo_splitter, \"out2\", dh_heat_exchanger2, \"in1\", label=\"23\")\n\n # district heating\n c24 = Connection(dh_heat_exchanger2, \"out1\", geo_merge, \"in1\", label=\"24\")\n c25 = Connection(\n nw.get_comp(\"preheater\"), \"out1\", dh_heat_exchanger1, \"in1\", label=\"25\"\n )\n c26 = Connection(dh_heat_exchanger1, \"out1\", geo_merge, \"in2\", label=\"26\")\n\n c27 = Connection(\n geo_merge, \"out1\", nw.get_comp(\"geo re-injection\"), \"in1\", label=\"27\"\n )\n nw.add_conns(c21_0, c21_1, c23, c24, c25, c26, c27)\n\n # district heating\n c31 = Connection(dh_source, \"out1\", dh_heat_exchanger1, \"in2\", label=\"31\")\n c32 = Connection(dh_heat_exchanger1, \"out2\", dh_heat_exchanger2, \"in2\", label=\"32\")\n c33 = Connection(dh_heat_exchanger2, \"out2\", dh_sink, \"in1\", label=\"33\")\n\n nw.add_conns(c31, c32, c33)\n\n dh_heat_exchanger1.set_attr(pr1=0.98, pr2=0.98)\n # no pr1 required, parallel to ORC/dh_heat_exchanger1\n dh_heat_exchanger2.set_attr(pr2=0.98)\n c21_0.set_attr(fluid={working_fluid: 0, \"water\": 1}, T=100, p=25, m=10)\n c31.set_attr(\n fluid={working_fluid: 0, \"water\": 1}, T=dh_feed_temperature, p=dh_pressure\n )\n c32.set_attr(T=(dh_feed_temperature + dh_return_temperature) / 2)\n c33.set_attr(T=dh_return_temperature)\n\n # reinjection temperature specification\n c26.set_attr(T=70)\n c24.set_attr(T=70)\n\n # solve the network\n nw.solve(\"design\")\n\n P = []\n Q = []\n T_range = [42, 44, 46, 48, 50, 52, 54, 56, 58]\n for T in T_range:\n c32.set_attr(T=T)\n nw.solve(\"design\")\n P += [abs(nw.get_comp(\"turbine\").P.val)]\n Q += [abs(dh_heat_exchanger1.Q.val + dh_heat_exchanger2.Q.val)]\n\n fig, ax = plt.subplots(2, 1)\n ax[0].plot(T_range, P)\n ax[0].grid()\n ax[0].set_ylabel(\"Turbine power\")\n ax[1].plot(T_range, Q)\n ax[1].grid()\n ax[1].set_xlabel(\"Temperature between heat exchangers\")\n ax[1].set_ylabel(\"District heating system heat\")\n fig.savefig(working_fluid + \".png\")\n plt.close()\n\n# create base plant and supply functionalities\nplant = CHPORC(\"R134a\")\n# modify the plant structure\nvariant_4(plant)\n# solve mode with specified parameters\nplant.nw.print_results()\n\n\n# make a trivial test:\n# -(un)specifiy some boundary conditions\n# -set some connection and component variables\n# -set a lower limit constraint\n\nwith open(sys.argv[1], 'r') as f:\n input_data = json.load(f)\n f.close()\n\nboundary_conditions = input_data['boundary_conditions']\nvariables = input_data['variables']\nconstraints = input_data['constraints']\nobjective = input_data['objective']\n\nplant.set_params(**boundary_conditions)\n\nnum_gen = input_data['num_gen']\nnum_ind = input_data['num_ind']\n# this should be outside of the optimitzation class\n\noptimize = MultivariateOptimizationProblem(plant, variables, constraints, objective)\n\n# this must be outside of\nalgo = pg.ihs(gen=num_gen)\noptimize.run(algo, num_ind, num_gen)\n\nprint(optimize.individuals)\n\npath = input_data['scenario_name'] + '/'\n\nif not os.path.isdir(path):\n os.mkdir(path)\n\noptimize.individuals.to_csv(input_data['scenario_name'] + '/result.csv')\n# write optimization instance data to json file for postprocessing\n\nvariables_labels = {}\nfor obj, data in optimize.variables.items():\n for label, params in data.items():\n for param in params:\n variables_labels[obj + '-' + label + '-' + param] = param + ' at ' + obj + ' ' + label\n\n\nwith open(input_data['scenario_name'] + '/problem.json', 'w') as f:\n output = {\n key + \"_list\": optimize.__dict__[key + \"_list\"]\n for key in [\"constraint\", \"variable\", \"objective\"]\n }\n output.update(variables_labels)\n f.write(json.dumps(output))\n f.close()" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.subplots" ] ]
willxujun/tensorflow
[ "5c31a9c4a8aa94d2f41c60880bb3ca699c23328c" ]
[ "tensorflow/compiler/aot/ex2/make_graph.py" ]
[ "import argparse\nimport os\nimport sys\n\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nFLAGS = None\n\ndef mlp(_):\n # Parameters\n learning_rate = 0.1\n num_steps = 500\n batch_size = 128\n display_step = 100\n # Network Parameters\n n_hidden_1 = 256 # 1st layer number of neurons\n n_hidden_2 = 256 # 2nd layer number of neurons\n num_input = 784 # MNIST data input (img shape: 28*28)\n num_classes = 10 # MNIST total classes (0-9 digits)\n # tf Graph input\n X = tf.placeholder(\"float\", [None, num_input])\n Y = tf.placeholder(\"float\", [None, num_classes])\n # Store layers weight & bias\n weights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))\n }\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n }\n # Create model\n def neural_net(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n # Construct model\n logits = neural_net(X)\n prediction = tf.nn.softmax(logits)\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op)\n\n # Evaluate model\n correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n # Start training\n with tf.Session() as sess:\n # Run the initializer\n sess.run(init)\n for step in range(1, num_steps+1):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n print(\"Optimization Finished!\")\n # Calculate accuracy for MNIST test images\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={X: mnist.test.images,\n Y: mnist.test.labels}))\n\ndef write_graph(build_graph, out_dir):\n \"\"\"Build a graph using build_graph and write it out.\"\"\"\n g = ops.Graph()\n with g.as_default():\n build_graph(out_dir)\n filename = os.path.join(out_dir, 'test_graph_%s.pb' % build_graph.__name__)\n with open(filename, 'wb') as f:\n f.write(g.as_graph_def().SerializeToString())\n\ndef main(_):\n mlp(0)\n\n # launch the default graph\n sess = tf.Session()\n\n writer = tf.summary.FileWriter('vis', sess.graph)\n\n write_graph(mlp, FLAGS.out_dir)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument(\n '--out_dir',\n type=str,\n default='',\n help='Output directory for graphs, checkpoints and savers.')\n FLAGS, unparsed = parser.parse_known_args()\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.placeholder", "tensorflow.python.platform.app.run", "tensorflow.python.framework.ops.Graph", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.global_variables_initializer", "tensorflow.random_normal", "tensorflow.train.AdamOptimizer", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.matmul", "tensorflow.cast", "tensorflow.Session", "tensorflow.argmax", "tensorflow.summary.FileWriter", "tensorflow.nn.softmax" ] ]
thefullstackninja/effective_data_visualization_using_plotly_express
[ "043225b9a4e2333709df19be64475d8ed003daa3" ]
[ "pie_charts/basic_pie_chart_tips_by_gender.py" ]
[ "### Case study Distribution of tips by gender\n\nimport pandas as pd\nimport plotly.express as px\n\n\ndf = pd.read_csv(\"../data/tips.csv\")\n\nplot = px.pie(\n data_frame=df,\n values='tip',\n names='sex',\n title=\"Case study Distribution of tips by gender\"\n \n)\n\nplot.show()" ]
[ [ "pandas.read_csv" ] ]
yasudakn/hmr
[ "6b7a9a4d1a312c0f93140d4d4752ab2d100a4ce3" ]
[ "src/tf_smpl/batch_lbs.py" ]
[ "\"\"\" Util functions for SMPL\n@@batch_skew\n@@batch_rodrigues\n@@batch_lrotmin\n@@batch_global_rigid_transformation\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef batch_skew(vec, batch_size=None):\n \"\"\"\n vec is N x 3, batch_size is int\n\n returns N x 3 x 3. Skew_sym version of each matrix.\n \"\"\"\n with tf.name_scope(\"batch_skew\", values=[vec]):\n if batch_size is None:\n batch_size = vec.shape.as_list()[0]\n col_inds = tf.constant([1, 2, 3, 5, 6, 7])\n indices = tf.reshape(\n tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,\n [-1, 1])\n updates = tf.reshape(\n tf.stack(\n [\n -vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],\n vec[:, 0]\n ],\n axis=1), [-1])\n out_shape = [batch_size * 9]\n res = tf.scatter_nd(indices, updates, out_shape)\n res = tf.reshape(res, [batch_size, 3, 3])\n\n return res\n\n\ndef batch_rodrigues(theta, name=None):\n \"\"\"\n Theta is N x 3\n \"\"\"\n with tf.name_scope(name, \"batch_rodrigues\", [theta]):\n batch_size = theta.shape.as_list()[0]\n\n # angle = tf.norm(theta, axis=1)\n # r = tf.expand_dims(tf.div(theta, tf.expand_dims(angle + 1e-8, -1)), -1)\n # angle = tf.expand_dims(tf.norm(theta, axis=1) + 1e-8, -1)\n angle = tf.expand_dims(tf.norm(theta + 1e-8, axis=1), -1)\n r = tf.expand_dims(tf.div(theta, angle), -1)\n\n angle = tf.expand_dims(angle, -1)\n cos = tf.cos(angle)\n sin = tf.sin(angle)\n\n outer = tf.matmul(r, r, transpose_b=True, name=\"outer\")\n\n eyes = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1])\n R = cos * eyes + (1 - cos) * outer + sin * batch_skew(\n r, batch_size=batch_size)\n return R\n\n\ndef batch_lrotmin(theta, name=None):\n \"\"\" NOTE: not used bc I want to reuse R and this is simple.\n Output of this is used to compute joint-to-pose blend shape mapping.\n Equation 9 in SMPL paper.\n\n\n Args:\n pose: `Tensor`, N x 72 vector holding the axis-angle rep of K joints.\n This includes the global rotation so K=24\n\n Returns\n diff_vec : `Tensor`: N x 207 rotation matrix of 23=(K-1) joints with identity subtracted.,\n \"\"\"\n with tf.name_scope(name, \"batch_lrotmin\", [theta]):\n with tf.name_scope(\"ignore_global\"):\n theta = theta[:, 3:]\n\n # N*23 x 3 x 3\n Rs = batch_rodrigues(tf.reshape(theta, [-1, 3]))\n lrotmin = tf.reshape(Rs - tf.eye(3), [-1, 207])\n\n return lrotmin\n\n\ndef batch_global_rigid_transformation(Rs, Js, parent, rotate_base=False):\n \"\"\"\n Computes absolute joint locations given pose.\n\n rotate_base: if True, rotates the global rotation by 90 deg in x axis.\n if False, this is the original SMPL coordinate.\n\n Args:\n Rs: N x 24 x 3 x 3 rotation vector of K joints\n Js: N x 24 x 3, joint locations before posing\n parent: 24 holding the parent id for each index\n\n Returns\n new_J : `Tensor`: N x 24 x 3 location of absolute joints\n A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.\n \"\"\"\n with tf.name_scope(\"batch_forward_kinematics\", values=[Rs, Js]):\n N = Rs.shape[0].value\n if rotate_base:\n print('Flipping the SMPL coordinate frame!!!!')\n rot_x = tf.constant(\n [[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=Rs.dtype)\n rot_x = tf.reshape(tf.tile(rot_x, [N, 1]), [N, 3, 3])\n root_rotation = tf.matmul(Rs[:, 0, :, :], rot_x)\n else:\n root_rotation = Rs[:, 0, :, :]\n\n # Now Js is N x 24 x 3 x 1\n Js = tf.expand_dims(Js, -1)\n\n def make_A(R, t, name=None):\n # Rs is N x 3 x 3, ts is N x 3 x 1\n with tf.name_scope(name, \"Make_A\", [R, t]):\n R_homo = tf.pad(R, [[0, 0], [0, 1], [0, 0]])\n t_homo = tf.concat([t, tf.ones([N, 1, 1])], 1)\n return tf.concat([R_homo, t_homo], 2)\n\n A0 = make_A(root_rotation, Js[:, 0])\n results = [A0]\n for i in range(1, parent.shape[0]):\n j_here = Js[:, i] - Js[:, parent[i]]\n A_here = make_A(Rs[:, i], j_here)\n res_here = tf.matmul(\n results[parent[i]], A_here, name=\"propA%d\" % i)\n results.append(res_here)\n\n # 10 x 24 x 4 x 4\n results = tf.stack(results, axis=1)\n\n new_J = results[:, :, :3, 3]\n\n # --- Compute relative A: Skinning is based on\n # how much the bone moved (not the final location of the bone)\n # but (final_bone - init_bone)\n # ---\n Js_w0 = tf.concat([Js, tf.zeros([N, 24, 1, 1])], 2)\n init_bone = tf.matmul(results, Js_w0)\n # Append empty 4 x 3:\n init_bone = tf.pad(init_bone, [[0, 0], [0, 0], [0, 0], [3, 0]])\n A = results - init_bone\n\n return new_J, A\n" ]
[ [ "tensorflow.pad", "tensorflow.norm", "tensorflow.zeros", "tensorflow.stack", "tensorflow.reshape", "tensorflow.scatter_nd", "tensorflow.div", "tensorflow.tile", "tensorflow.range", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.eye", "tensorflow.matmul", "tensorflow.name_scope", "tensorflow.concat", "tensorflow.cos", "tensorflow.constant", "tensorflow.sin" ] ]
Parallel-in-Time/PararealF90
[ "a8318a79b92465a8a3cf775cc7fd096ff0494529" ]
[ "plot_solution.py" ]
[ "import sys\nsys.path.append('./scripts')\nfrom get_parameter import get_parameter\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\nnu, Nx, Ny, Nz, dt_fine, dt_coarse, Niter, Tend, do_io, be_verbose = get_parameter()\nsol = np.array([])\n\nfilename = \"q_final_fine.dat\"\nwith open(filename,'r') as fobj:\n while True:\n line = fobj.readline()\n if not line: break\n sol = np.append(sol, [float(line)])\n\nassert np.size(sol)==Nx*Ny*Nz, 'Length of solution does not match parameter... was probably generated with different setting.'\n\nsol.shape = ((Nx, Ny, Nz))\n\nx = np.linspace(0, 1, Nx)\ny = np.linspace(0, 1, Ny)\nxx, yy = np.meshgrid(x, y)\n\nfig = plt.figure(figsize=(8,8))\nax = fig.gca(projection='3d')\nax.view_init(elev=0., azim=-90.)\nsurf = ax.plot_surface(xx, yy, sol[:,:,0], rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)\nax.set_xlim(left = 0.0, right = 1.0)\nax.set_ylim(bottom = 0.0, top = 1.0)\n#ax.set_zlim(bottom = 0.0, top = 1.0)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.size", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.array", "numpy.meshgrid", "numpy.linspace", "matplotlib.pyplot.xlabel" ] ]
IgorHoholko/metrics
[ "5510ccd99eaec5ab8175bbd5e2ad9e66e82d10e4" ]
[ "torchmetrics/regression/psnr.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.psnr import _psnr_compute, _psnr_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass PSNR(Metric):\n r\"\"\"\n Computes `peak signal-to-noise ratio <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_ (PSNR):\n\n .. math:: \\text{PSNR}(I, J) = 10 * \\log_{10} \\left(\\frac{\\max(I)^2}{\\text{MSE}(I, J)}\\right)\n\n Where :math:`\\text{MSE}` denotes the `mean-squared-error\n <https://en.wikipedia.org/wiki/Mean_squared_error>`_ function.\n\n Args:\n data_range:\n the range of the data. If None, it is determined from the data (max - min).\n The ``data_range`` must be given when ``dim`` is not None.\n base: a base of a logarithm to use (default: 10)\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'``: no reduction will be applied\n\n dim:\n Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is\n None meaning scores will be reduced across all dimensions and all batches.\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Raises:\n ValueError:\n If ``dim`` is not ``None`` and ``data_range`` is not given.\n\n Example:\n >>> from torchmetrics import PSNR\n >>> psnr = PSNR()\n >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\n >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])\n >>> psnr(preds, target)\n tensor(2.5527)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n\n def __init__(\n self,\n data_range: Optional[float] = None,\n base: float = 10.0,\n reduction: str = 'elementwise_mean',\n dim: Optional[Union[int, Tuple[int, ...]]] = None,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n\n if dim is None and reduction != 'elementwise_mean':\n rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')\n\n if dim is None:\n self.add_state(\"sum_squared_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n else:\n self.add_state(\"sum_squared_error\", default=[])\n self.add_state(\"total\", default=[])\n\n if data_range is None:\n if dim is not None:\n # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to\n # calculate `data_range` in the future.\n raise ValueError(\"The `data_range` must be given when `dim` is not None.\")\n\n self.data_range = None\n self.add_state(\"min_target\", default=tensor(0.0), dist_reduce_fx=torch.min)\n self.add_state(\"max_target\", default=tensor(0.0), dist_reduce_fx=torch.max)\n else:\n self.add_state(\"data_range\", default=tensor(float(data_range)), dist_reduce_fx='mean')\n self.base = base\n self.reduction = reduction\n self.dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n def update(self, preds: Tensor, target: Tensor):\n \"\"\"\n Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)\n if self.dim is None:\n if self.data_range is None:\n # keep track of min and max target values\n self.min_target = min(target.min(), self.min_target)\n self.max_target = max(target.max(), self.max_target)\n\n self.sum_squared_error += sum_squared_error\n self.total += n_obs\n else:\n self.sum_squared_error.append(sum_squared_error)\n self.total.append(n_obs)\n\n def compute(self):\n \"\"\"\n Compute peak signal-to-noise ratio over state.\n \"\"\"\n if self.data_range is not None:\n data_range = self.data_range\n else:\n data_range = self.max_target - self.min_target\n\n if self.dim is None:\n sum_squared_error = self.sum_squared_error\n total = self.total\n else:\n sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])\n total = torch.cat([values.flatten() for values in self.total])\n return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)\n" ]
[ [ "torch.tensor" ] ]
baijianhua/pymath
[ "a96ebbd8c8ac646c436d8bf33cb01764a948255d" ]
[ "bak/coord.py" ]
[ "#用python绘制坐标\n#https://matplotlib.org/examples/axes_grid/demo_axisline_style.html\n#https://stackoverflow.com/questions/13430231/how-i-can-get-cartesian-coordinate-system-in-matplotlib\n#https://stackoverflow.com/questions/50798265/what-is-subplotzero-documentation-lacking\n\n# notice import as 和 from import有什么区别?\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom bak.basic_units import cm\n\nfig: Figure = plt.figure()\n# notice 得到并设置坐标坐标系\nax: Axes = fig.subplots()\nax.set_title('x axis spine at zero data coordinate')\nax.set_xlabel(\"Axes zero\")\nax.set_ylabel(\"Y\")\nax.axis()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.set_xlim(-3, 10)\nax.set_ylim(-3, 10)\n\n# todo 设定坐标轴的样式和刻度\nyAxis: Axes = ax.spines['left']\nyAxis.set_position(('data', 0))\nxAxis: Axes = ax.spines['bottom']\nxAxis.set_position(('data', 0))\n#xAxis.set_axisline_style(\"-|>\")\n\n# notice 设定x的范围\nx = np.arange(-1, 3, 0.01)\n# notice 绘制图形\nax.plot(x, 2*x, xunits=cm, yunits=cm)\n\nplt.show()\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.figure", "matplotlib.pyplot.show" ] ]
manaccac/sc2_bot
[ "3aa8b3711378b71fd0a44662cdd7148846e39530" ]
[ "bot/venv/lib/python3.7/site-packages/scipy/ndimage/_ni_docstrings.py" ]
[ "\"\"\"Docstring components common to several ndimage functions.\"\"\"\nfrom scipy._lib import doccer\n\n__all__ = ['docfiller']\n\n\n_input_doc = (\n\"\"\"input : array_like\n The input array.\"\"\")\n_axis_doc = (\n\"\"\"axis : int, optional\n The axis of `input` along which to calculate. Default is -1.\"\"\")\n_output_doc = (\n\"\"\"output : array or dtype, optional\n The array in which to place the output, or the dtype of the\n returned array. By default an array of the same dtype as input\n will be created.\"\"\")\n_size_foot_doc = (\n\"\"\"size : scalar or tuple, optional\n See footprint, below. Ignored if footprint is given.\nfootprint : array, optional\n Either `size` or `footprint` must be defined. `size` gives\n the shape that is taken from the input array, at every element\n position, to define the input to the filter function.\n `footprint` is a boolean array that specifies (implicitly) a\n shape, but also which of the elements within this shape will get\n passed to the filter function. Thus ``size=(n,m)`` is equivalent\n to ``footprint=np.ones((n,m))``. We adjust `size` to the number\n of dimensions of the input array, so that, if the input array is\n shape (10,10,10), and `size` is 2, then the actual size used is\n (2,2,2). When `footprint` is given, `size` is ignored.\"\"\")\n_mode_doc = (\n\"\"\"mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the input array is extended\n beyond its boundaries. Default is 'reflect'. Behavior for each valid\n value is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_mode_multiple_doc = (\n\"\"\"mode : str or sequence, optional\n The `mode` parameter determines how the input array is extended\n when the filter overlaps a border. By passing a sequence of modes\n with length equal to the number of dimensions of the input array,\n different modes can be specified along each axis. Default value is\n 'reflect'. The valid values and their behavior is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_cval_doc = (\n\"\"\"cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0.\"\"\")\n_origin_doc = (\n\"\"\"origin : int, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right.\"\"\")\n_origin_multiple_doc = (\n\"\"\"origin : int or sequence, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right. By passing a sequence of origins with length equal to\n the number of dimensions of the input array, different shifts can\n be specified along each axis.\"\"\")\n_extra_arguments_doc = (\n\"\"\"extra_arguments : sequence, optional\n Sequence of extra positional arguments to pass to passed function.\"\"\")\n_extra_keywords_doc = (\n\"\"\"extra_keywords : dict, optional\n dict of extra keyword arguments to pass to passed function.\"\"\")\n_prefilter_doc = (\n\"\"\"prefilter : bool, optional\n Determines if the input array is prefiltered with `spline_filter`\n before interpolation. The default is True, which will create a\n temporary `float64` array of filtered values if `order > 1`. If\n setting this to False, the output will be slightly blurred if\n `order > 1`, unless the input is prefiltered, i.e. it is the result\n of calling `spline_filter` on the original input.\"\"\")\n\ndocdict = {\n 'input': _input_doc,\n 'axis': _axis_doc,\n 'output': _output_doc,\n 'size_foot': _size_foot_doc,\n 'mode': _mode_doc,\n 'mode_multiple': _mode_multiple_doc,\n 'cval': _cval_doc,\n 'origin': _origin_doc,\n 'origin_multiple': _origin_multiple_doc,\n 'extra_arguments': _extra_arguments_doc,\n 'extra_keywords': _extra_keywords_doc,\n 'prefilter': _prefilter_doc\n }\n\ndocfiller = doccer.filldoc(docdict)\n" ]
[ [ "scipy._lib.doccer.filldoc" ] ]
JanBrabec/UCI-ML-API
[ "59f6c680ac914df55e93e05545eb198887510943" ]
[ "UCI_ML_Functions.py" ]
[ "# Functions to read, analyze, and download from UCI ML portal\n\n# ==========================================\n# Function to read UCI ML datasets table\n# ==========================================\ndef read_dataset_table(\n url=\"https://archive.ics.uci.edu/ml/datasets.php\", msg_flag=True\n):\n \"\"\"\n Reads the table of datasets from the url: \"https://archive.ics.uci.edu/ml/datasets.php\" and process it further to clean and categorize\n \"\"\"\n import pandas as pd\n\n try:\n if msg_flag:\n print(\"Reading the dataset table from UCI ML repo...\")\n datasets = pd.read_html(url)\n if msg_flag:\n print(\"Finished reading the table!\")\n except:\n print(\"Could not read the table from UCI ML portal, Sorry!\")\n\n df = datasets[5] # Fifth entry of this table is the main datasets information\n df.columns = [\n \"Name\",\n \"Data Types\",\n \"Default Task\",\n \"Attribute Types\",\n \"Number of Instances\",\n \"Number of Attributes\",\n \"Year\",\n ]\n # Remove first row which contains table header\n df = df.iloc[1:, :]\n\n return df\n\n\n# ==============================================================================================\n# Function to remove entries with unknown number of samples and cleanly define task categories\n# ==============================================================================================\ndef clean_dataset_table(df, msg_flag=True):\n \"\"\"\n Accepts the raw dataset table (a DataFrame object) and returns a cleaned up version removing entries with unknown number of samples and attributes\n Also creates a 'Task' category column indicating the main machine learning task associated with the dataset\n \"\"\"\n import time\n import pandas as pd\n\n if msg_flag:\n print(\"Cleaning up the dataset table\", end=\"\")\n for i in range(11):\n time.sleep(0.2)\n print(\".\", end=\"\")\n print(\" \", end=\"\")\n print()\n print(\"Rationalizing the task categories\", end=\"\")\n for i in range(11):\n time.sleep(0.2)\n print(\".\", end=\"\")\n print(\" \", end=\"\")\n\n pd.set_option(\"mode.chained_assignment\", None)\n\n df_copy = df.copy()\n df_clean = df_copy.dropna(subset=[\"Number of Instances\"])\n df_clean[\"Number of Instances\"] = df_clean[\"Number of Instances\"].apply(int)\n\n def size_instances(n):\n if n <= 100:\n return \"Small\"\n elif n <= 1000:\n return \"Medium\"\n elif n <= 10000:\n return \"Large\"\n else:\n return \"Extra Large\"\n\n df_clean[\"Sample size\"] = df_clean[\"Number of Instances\"].apply(size_instances)\n\n def categorize_task(task):\n if len(task) > 1:\n tasks = task.split(\", \")\n else:\n tasks = list(task)\n\n if len(tasks) == 1 and tasks[0] == \"Classification\":\n return \"Classification\"\n elif \"Clustering\" in tasks:\n return \"Clustering\"\n elif \"Regression\" in tasks:\n return \"Regression\"\n elif \"Recommender-Systems\" in tasks:\n return \"Recommender Systems\"\n elif \"Causal-Discovery\" in tasks:\n return \"Causal Discovery\"\n else:\n return \"Other/Unknown\"\n\n df_clean[\"Default Task\"] = df_clean[\"Default Task\"].apply(str)\n df_clean[\"Default Task\"] = df_clean[\"Default Task\"].apply(categorize_task)\n\n if msg_flag:\n print(\"\\nFinished processing the table!\")\n\n return df_clean\n\n\n# ======================================================================================================\n# Function to build a local table (CSV file) with name, attributes, machine learning tasks, size, etc\n# ======================================================================================================\ndef build_local_table(filename=None, msg_flag=True):\n \"\"\"\n Reads through the UCI ML portal and builds a local table with information such as: \\\n name, size, ML task, data type\n filename: Optional filename that can be chosen by the user\n \"\"\"\n df_table = read_dataset_table(msg_flag=msg_flag)\n df_clean = clean_dataset_table(df_table, msg_flag=msg_flag)\n try:\n if filename != None:\n df_clean.to_csv(filename)\n else:\n df_clean.to_csv(\"UCI table.csv\")\n except:\n print(\n \"Sorry, could not create the CSV table. Please make sure to close an already opened file, \\\n or to have sufficient permission to write files in the current directory\"\n )\n\n\n# ==================================================================\n# Function to read the main page text and create list of datasets\n# ==================================================================\ndef build_dataset_list(url=\"https://archive.ics.uci.edu/ml/datasets\", msg_flag=True):\n \"\"\"\n Scrapes through the UCI ML datasets page and builds a list of all datasets.\n \"\"\"\n\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n # Read the HTML from the URL and pass on to BeautifulSoup\n url = url\n if msg_flag:\n print(\"Opening the file connection...\")\n try:\n uh = urllib.request.urlopen(url, context=ctx)\n # print(\"HTTP status\",uh.getcode())\n html = uh.read()\n # print(f\"Reading done. Total {len(html)} characters read.\")\n except:\n print(\"Could not open the UCI ML portal successfully. Sorry!\")\n return -1\n\n soup = BeautifulSoup(html, \"html5lib\")\n\n dataset_list = []\n lst = []\n\n for link in soup.find_all(\"a\"):\n lst.append(link.attrs)\n\n if msg_flag:\n print()\n print(\"Adding datasets to the list\", end=\"\")\n\n for i in range(11):\n time.sleep(0.3)\n print(\".\", end=\"\")\n print(\" \", end=\"\")\n\n for l in lst:\n a = l[\"href\"]\n if a.find(\"/\") != -1:\n x = a.split(\"/\")\n if len(x) == 2:\n dataset_list.append(x[1])\n\n dataset_list = list(set(dataset_list))\n dataset_list = sorted(dataset_list)\n\n if msg_flag:\n print(\"\\nFinished adding datasets to the list!\")\n\n return dataset_list\n\n\n# ======================================================================================\n# Function to create dictionary of datasets' name, description, and identifier string\n# ======================================================================================\ndef build_dataset_dictionary(\n url=\"https://archive.ics.uci.edu/ml/datasets.php?format=&task=&att=&area=&numAtt=&numIns=&type=&sort=nameUp&view=list\",\n msg_flag=True,\n):\n \"\"\"\n Scrapes through the UCI ML datasets page and builds a dictionary of all datasets with names and description.\n Also stores the unique identifier corresponding to the dataset.\n This identifier string is needed by the downloader function to download the data file. Generic name won't work.\n \"\"\"\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n import re\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n url = url\n if msg_flag:\n print(\"Opening the file connection...\")\n try:\n uh = urllib.request.urlopen(url, context=ctx)\n html = uh.read()\n except:\n print(\"Could not open the UCI ML portal successfully. Sorry!\")\n return -1\n\n soup = BeautifulSoup(html, \"html5lib\")\n\n lst = []\n for tag in soup.find_all(\"p\"):\n lst.append(tag.contents)\n\n i = 0\n description_dict = {}\n\n for l in lst:\n if len(l) > 2:\n if str(l[1]).find(\"datasets/\") != -1:\n string = str(l[1])\n s = re.search('\">.*</a>', string)\n x, y = s.span()\n name = string[x + 2 : y - 4]\n desc = l[2][2:]\n tmp_list = []\n description_dict[name] = tmp_list\n description_dict[name].append(desc)\n s = re.search('\".*\"', string)\n x, y = s.span()\n identifier = string[x + 10 : y - 1]\n description_dict[name].append(identifier)\n i += 1\n if msg_flag:\n if i % 10 == 0 and i != 0:\n print(f\"Record {i} processed!\")\n\n return description_dict\n\n\n# ===============================================================\n# Function to create a DataFrame with all information together\n# ===============================================================\ndef build_full_dataframe(msg_flag=False):\n \"\"\"\n Builds a DataFrame with all information together including the url link for downloading the data.\n \"\"\"\n import pandas as pd\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n i = 0\n d = build_dataset_dictionary(msg_flag=False)\n new_d = {}\n dataset_list = build_dataset_list(msg_flag=False)\n\n for k, v in d.items():\n a = extract_url_dataset(v[1], msg_flag=msg_flag)\n if a != None:\n desc = v[0]\n identifier = v[1]\n v[0] = k\n v[1] = desc\n v.append(identifier)\n v.append(a)\n new_d[k] = v\n i += 1\n if msg_flag:\n print(f\"Dataset processed:{k}\")\n else:\n desc = v[0]\n identifier = v[1]\n v[0] = k\n v[1] = desc\n v.append(identifier)\n v.append(\"URL not available\")\n new_d[k] = v\n if msg_flag:\n print(f\"Dataset processed:{k}\")\n if msg_flag:\n print(\"\\nTotal datasets analyzed: \", i)\n\n df_dataset = pd.DataFrame(data=new_d)\n df_dataset = df_dataset.T\n df_dataset.columns = [\"Name\", \"Abstract\", \"Identifier string\", \"Datapage URL\"]\n df_dataset.index.set_names([\"Dataset\"], inplace=True)\n\n return df_dataset\n\n\n# ================================================================================================\n# Function to build a local database (CSV file) with name and URL (of raw data page) information\n# ================================================================================================\ndef build_local_database(filename=None, msg_flag=True):\n \"\"\"\n Reads through the UCI ML portal and builds a local table with information such as: \\\n name, size, ML task, data type\n filename: Optional filename that can be chosen by the user\n \"\"\"\n df_local = build_full_dataframe(msg_flag=msg_flag)\n try:\n if filename != None:\n df_local.to_csv(filename)\n else:\n df_local.to_csv(\"UCI database.csv\")\n except:\n print(\n \"Sorry, could not create the CSV table. Please make sure to close an already opened file, \\\n or to have sufficient permission to write files in the current directory\"\n )\n\n\n# ===============================================================================\n# Function to extract abstract/description of a particular dataset by searching\n# ===============================================================================\ndef return_abstract(name, local_database=None, msg_flag=False):\n \"\"\"\n Returns one-liner description (and webpage link for further information) of a particular dataset by searching the given name.\n local_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains information about all the datasets on UCI ML repo.\n msg_flag: Controls verbosity\n \"\"\"\n\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n if msg_flag:\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n if msg_flag:\n print(\"Done!\")\n\n # Number of rows\n nrows = df.shape[0]\n found = 0\n abstracts = []\n for r in range(nrows):\n if name in df.iloc[r][\"Name\"]:\n found += 1\n abstracts.append(\n df.iloc[r][\"Name\"]\n + \": \"\n + df.iloc[r][\"Abstract\"]\n + \". For more info, visit this link: \"\n + \"https://archive.ics.uci.edu/ml/datasets/\"\n + df.iloc[r][\"Identifier string\"]\n )\n if found == 0:\n print(\"Could not find your search term.\")\n return None\n else:\n print(\n f\"Total {found} instances found including partial match of the search term. Here they are...\\n\"\n )\n for a in abstracts:\n print(a)\n print(\"=\" * 100)\n\n\n# =============================================\n# Function to print all dataset descriptions\n# =============================================\ndef describe_all_dataset(msg_flag=False):\n \"\"\"\n Calls the build_dictionary function and prints description of all datasets from that.\n \"\"\"\n\n dict1 = build_dataset_dictionary(msg_flag=msg_flag)\n\n for k, v in dict1.items():\n print(f\"{k}: {v[0]}\")\n print(\"=\" * 100)\n\n\n# =======================================\n# Function to print all dataset names\n# =======================================\ndef print_all_datasets_names(msg_flag=False):\n \"\"\"\n Calls the build_dictionary function and prints names of all datasets from that.\n \"\"\"\n\n dict1 = build_dataset_dictionary(msg_flag=msg_flag)\n\n for key in dict1.keys():\n print(key)\n print(\"-\" * 100)\n\n\n# ==========================================\n# Function for extracting dataset page url\n# ==========================================\ndef extract_url_dataset(dataset, msg_flag=False):\n \"\"\"\n Given a dataset identifier this function extracts the URL for the page where the actual raw data resides.\n \"\"\"\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import time\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n dataset_dict = {}\n baseurl = \"https://archive.ics.uci.edu/ml/datasets/\"\n url = baseurl + dataset\n\n try:\n uh = urllib.request.urlopen(url, context=ctx)\n html = uh.read().decode()\n soup = BeautifulSoup(html, \"html5lib\")\n if soup.text.find(\"does not appear to exist\") != -1:\n if msg_flag:\n print(f\"{dataset} not found\")\n return None\n else:\n for link in soup.find_all(\"a\"):\n if link.attrs[\"href\"].find(\"machine-learning-databases\") != -1:\n a = link.attrs[\"href\"]\n a = a[2:]\n dataurl = \"https://archive.ics.uci.edu/ml/\" + str(a)\n # print(dataurl)\n return str(dataurl)\n # dataurls.append(dataurl)\n\n # After finishing the for-loop with a-tags, the first dataurl is added to the dictionary\n # dataset_dict['dataurl']=dataurls[0]\n except:\n # print(\"Could not retrieve\")\n return None\n\n\n# ================================\n# File download helper function\n# ================================\ndef download_file(url, directory):\n \"\"\"\n Downloads a file from a given url into the given directory.\n \"\"\"\n import requests\n from pathlib import Path\n\n local_filename = Path(directory) / Path(url.split(\"/\")[-1])\n # NOTE the stream=True parameter\n r = requests.get(url, stream=True)\n try:\n with open(local_filename, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n except:\n print(\"Sorry could not write this particular file!\")\n # f.flush()\n\n\n# =====================================================\n# Function for downloading the data set from a page\n# =====================================================\ndef download_dataset_url(url, directory, msg_flag=False, download_flag=True):\n \"\"\"\n Download all the files from the links in the given url.\n msg_flag: Controls verbosity.\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose).\n \"\"\"\n\n import urllib.request, urllib.parse, urllib.error\n from bs4 import BeautifulSoup\n import ssl\n import os\n from pathlib import Path\n\n if url == \"URL not available\":\n return None\n\n cwd = os.getcwd()\n directory = directory.replace(\":\", \"-\")\n local_directory = Path(cwd) / Path(str(directory))\n if not os.path.exists(local_directory):\n try:\n os.makedirs(local_directory)\n except:\n print(f\"Cannot create directory: {directory}\")\n\n if download_flag:\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n uh = urllib.request.urlopen(url, context=ctx)\n html = uh.read().decode()\n soup = BeautifulSoup(html, \"html5lib\")\n\n links = []\n for link in soup.find_all(\"a\"):\n links.append(link.attrs[\"href\"])\n\n links_to_download = []\n\n if \"Index\" in links:\n idx = links.index(\"Index\")\n else:\n idx = len(links) - 2\n for i in range(idx + 1, len(links)):\n links_to_download.append(url + str(links[i]))\n\n for file_url in links_to_download:\n download_file(file_url, local_directory)\n\n if msg_flag:\n print(f\"Downloaded dataset from {url}\")\n\n\n# =================================================================================================\n# User API Function for downloading a given number of datasets and storing in a local directory\n# =================================================================================================\ndef download_datasets(num=10, local_database=None, msg_flag=True, download_flag=True):\n \"\"\"\n Downloads datasets and puts them in a local directory named after the dataset.\n By default downloads first 10 datasets only. User can choose the number of dataets to be downloaded.\n msg_flag: Controls verbosity.\n\tdownload_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose).\n \"\"\"\n\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n if msg_flag:\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n if msg_flag:\n print(\"Done!\")\n\n if num < 1:\n print(\"Invalid entry for the number of datasets.\")\n else:\n for i in range(num):\n if msg_flag:\n print(f\"Downloading dataset(s) for: {df['Name'][i]}\")\n download_dataset_url(\n df[\"Datapage URL\"][i],\n df[\"Name\"][i],\n msg_flag=False,\n download_flag=download_flag,\n )\n print(\"\\nFinished downloading.\")\n\n\n# ============================================================================\n# User API function to download dataset by searching a for particular name\n# ============================================================================\ndef download_dataset_name(name, local_database=None, msg_flag=True, download_flag=True):\n \"\"\"\n Downloads a particular dataset by searching the given name.\n local_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains information about all the datasets on UCI ML repo.\n msg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose)\n \"\"\"\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n if msg_flag:\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n if msg_flag:\n print(\"Done!\")\n\n urls_to_download = {}\n\n for i in df.index.values:\n if name in i:\n urls_to_download[df.loc[i][\"Name\"]] = df.loc[i][\"Datapage URL\"]\n\n if len(urls_to_download) == 0:\n print(f'Serach term \"{name}\" not found in the database. Nothing downloaded!')\n else:\n if len(urls_to_download) > 1:\n print(\n f\"{len(urls_to_download)} instances of search term found including partial match. Downloading datasets for all...\\n\"\n )\n\n for u in urls_to_download:\n if msg_flag:\n print(f\"Downloading dataset(s) for: {u}\")\n download_dataset_url(\n urls_to_download[u],\n directory=u,\n msg_flag=False,\n download_flag=download_flag,\n )\n\n print(\"\\nFinished downloading.\")\n\n\n# =========================================================\n# Function to download all datasets in a given dataframe\n# =========================================================\ndef download_all_from_dataframe(df, msg_flag=False, download_flag=True):\n \"\"\"\n Downloads all datasets which appear in the given dataframe.\n Assumes that the datapage URL information is in the dataframe.\n msg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose)\n \"\"\"\n\n nrows = df.shape[0]\n if download_flag == False:\n print(\"Not downloading anything, just creating empty directories.\\n\")\n for r in range(nrows):\n if msg_flag:\n print(f\"Downloading the dataset: {df.iloc[r]['Name']}\")\n download_dataset_url(\n df.iloc[r][\"Datapage URL\"], df.iloc[r][\"Name\"], download_flag=download_flag\n )\n\n\n# =======================================================\n# User API Function to download datasets based on size\n# =======================================================\ndef download_datasets_size(\n size=\"Small\",\n local_database=None,\n local_table=None,\n msg_flag=False,\n download_flag=True,\n):\n \"\"\"\n Downloads all datasets which satisfy the 'size' criteria.\n size: Size of the dataset which user wants to download. Could be any of the following: 'Small', 'Medium', 'Large','Extra Large'.\n local_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains name and URL information about all the datasets on UCI ML repo.\n local_table: Name of the database (CSV file) stored locally i.e. in the same directory, which contains features information about all the datasets on UCI ML repo i.e. number of samples, type of machine learning task to be performed with the dataset.\n msg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose)\n \"\"\"\n import pandas as pd\n\n assert type(size) == str\n assert str(size) in [\"Small\", \"Medium\", \"Large\", \"Extra Large\"]\n\n if local_database != None:\n local_df_flag = True\n df_local = pd.read_csv(local_database, index_col=\"Dataset\")\n df = df_local\n else:\n local_df_flag = False\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n print(\"Master database build done!\")\n\n if local_table != None:\n local_table_flag = True\n table_local = pd.read_csv(local_table)\n df_clean = clean_dataset_table(table_local, msg_flag=msg_flag)\n else:\n local_table_flag = False\n print(\n \"Local table not supplied.\\nBuilding the master table by reading from the website...\"\n )\n df_table = read_dataset_table(msg_flag=msg_flag)\n df_clean = clean_dataset_table(df_table, msg_flag=msg_flag)\n\n df_merged = df_clean.merge(df, on=\"Name\")\n df_filter = df_merged[df_merged[\"Sample size\"] == str(size)]\n\n download_all_from_dataframe(\n df_filter, msg_flag=msg_flag, download_flag=download_flag\n )\n\n\n# ===========================================================================\n# User API Function to download datasets based on the machine learning task\n# ===========================================================================\ndef download_datasets_task(\n task=\"Classification\",\n local_database=None,\n local_table=None,\n msg_flag=False,\n download_flag=True,\n):\n \"\"\"\n Downloads all datasets which satisfy the size criteria.\n task: Machine learning task for which user wants to download the datasets. Could be any of the following:\n\t 'Classification',\n\t\t'Recommender Systems',\n\t\t'Regression',\n\t\t'Other/Unknown',\n\t\t'Clustering',\n\t\t'Causal Discovery'.\n\tlocal_database: Name of the database (CSV file) stored locally i.e. in the same directory, which contains name and URL information about all the datasets on UCI ML repo.\n\tlocal_table: Name of the database (CSV file) stored locally i.e. in the same directory, which contains features information about all the datasets on UCI ML repo i.e. number of samples, type of machine learning task to be performed with the dataset.\n\tmsg_flag: Controls verbosity\n download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose).\n \"\"\"\n import pandas as pd\n\n if local_database != None:\n local_df_flag = True\n df = pd.read_csv(local_database, index_col=\"Dataset\")\n else:\n local_df_flag = False\n print(\n \"Local database not supplied.\\nBuilding the master database by crawling the website...\"\n )\n df = build_full_dataframe(msg_flag=False)\n print(\"Master database build done!\")\n\n if local_table != None:\n local_table_flag = True\n df_clean = pd.read_csv(local_table)\n else:\n local_table_flag = False\n print(\n \"Local table not supplied.\\nBuilding the master table by reading from the website...\"\n )\n df_table = read_dataset_table(msg_flag=msg_flag)\n df_clean = clean_dataset_table(df_table, msg_flag=msg_flag)\n\n df_merged = df_clean.merge(df, on=\"Name\")\n df_filter = df_merged[df_merged[\"Default Task\"] == str(task)]\n\n download_all_from_dataframe(\n df_filter, msg_flag=msg_flag, download_flag=download_flag\n )\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.read_html", "pandas.set_option" ] ]
h-vetinari/triton
[ "d9dd97492f228020573b39a9cec14ee3b8776957" ]
[ "python/tutorials/03-matrix-multiplication.py" ]
[ "\"\"\"\nMatrix Multiplication\n======================\nIn this tutorial, you will write a 25-lines high-performance FP16 matrix multiplication\nkernel that achieves performance on par with cuBLAS.\nYou will specifically learn about:\n\n- Block-level matrix multiplications\n- Multi-dimensional pointer arithmetic\n- Program re-ordering for improved L2 cache hit rate\n- Automatic performance tuning\n\"\"\"\n\n# %%\n# Motivations\n# -------------\n# Matrix multiplications are a key building block of most modern high-performance computing systems.\n# They are notoriously hard to optimize, hence their implementation is generally done by\n# hardware vendors themselves as part of so-called \"kernel libraries\" (e.g., cuBLAS).\n# Unfortunately, these libraries are often proprietary and cannot be easily customized\n# to accomodate the needs of modern deep learning workloads (e.g., fused activation functions).\n# In this tutorial, you will learn how to implement efficient matrix multiplications by\n# yourself with Triton, in a way that is easy to customize and extend.\n#\n# Roughly speaking, the kernel that we will write will implement the following blocked\n# algorithm to multiply a (M, K) by a (K, N) matrix:\n#\n# .. code-block:: python\n#\n# # do in parallel\n# for m in range(0, M, BLOCK_SIZE_M):\n# # do in parallel\n# for n in range(0, N, BLOCK_SIZE_N):\n# acc = zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=float32)\n# for k in range(0, K, BLOCK_SIZE_K):\n# a = A[m : m+BLOCK_SIZE_M, k : k+BLOCK_SIZE_K]\n# b = B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]\n# acc += dot(a, b)\n# C[m : m+BLOCK_SIZE_M, n : n+BLOCK_SIZE_N] = acc;\n#\n# where each iteration of the doubly-nested for-loop is performed by a dedicated Triton program instance.\n\n# %%\n# Compute Kernel\n# ----------------\n#\n# The above algorithm is, actually, fairly straightforward to implement in Triton.\n# The main difficulty comes from the computation of the memory locations at which blocks\n# of :code:`A` and :code:`B` must be read in the inner loop. For that, we need\n# multi-dimensional pointer arithmetics.\n#\n# Pointer Arithmetics\n# ~~~~~~~~~~~~~~~~~~~~\n#\n# For a row-major 2D tensor :code:`X`, the memory location of :code:`X[i, j]` is given b\n# y :code:`&X[i, j] = X + i*stride_xi + j*stride_xj`.\n# Therefore, blocks of pointers for :code:`A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K]` and\n# :code:`B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]` can be defined in pseudo-code as:\n#\n# .. code-block:: python\n#\n# &A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K] = a_ptr + (m : m+BLOCK_SIZE_M)[:, None]*A.stride(0) + (k : k+BLOCK_SIZE_K)[None, :]*A.stride(1);\n# &B[k : k+BLOCK_SIZE_K, n:n+BLOCK_SIZE_N] = b_ptr + (k : k+BLOCK_SIZE_K)[:, None]*B.stride(0) + (n : n+BLOCK_SIZE_N)[None, :]*B.stride(1);\n#\n# Which means that pointers for blocks of A and B can be initialized (i.e., :code:`k=0`) in Triton as:\n#\n# .. code-block:: python\n#\n# offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n# offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n# offs_k = tl.arange(0, BLOCK_SIZE_K)\n# a_ptrs = a_ptr + (offs_am[:, None]*stride_am + offs_k [None, :]*stride_ak)\n# b_ptrs = b_ptr + (offs_k [:, None]*stride_bk + offs_bn[None, :]*stride_bn)\n#\n# And then updated in the inner loop as follows:\n#\n# .. code-block:: python\n#\n# pa += BLOCK_SIZE_K * stride_ak;\n# pb += BLOCK_SIZE_K * stride_bk;\n#\n#\n# L2 Cache Optimizations\n# ~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# As mentioned above, each program instance computes a :code:`[BLOCK_SIZE_M, BLOCK_SIZE_N]`\n# block of :code:`C`.\n# It is important to remember that the order in which these blocks are computed does\n# matter, since it affects the L2 cache hit rate of our program. and unfortunately, a\n# a simple row-major ordering\n#\n# .. code-block:: Python\n#\n# pid = triton.program_id(0);\n# grid_m = (M + BLOCK_SIZE_M - 1) // BLOCK_SIZE_M;\n# grid_n = (N + BLOCK_SIZE_N - 1) // BLOCK_SIZE_N;\n# pid_m = pid / grid_n;\n# pid_n = pid % grid_n;\n#\n# is just not going to cut it.\n#\n# One possible solution is to launch blocks in an order that promotes data reuse.\n# This can be done by 'super-grouping' blocks in groups of :code:`GROUP_M` rows before\n# switching to the next column:\n#\n# .. code-block:: python\n#\n# # program ID\n# pid = tl.program_id(axis=0)\n# # number of program ids along the M axis\n# num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)\n# # number of programs ids along the N axis\n# num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)\n# # number of programs in group\n# num_pid_in_group = GROUP_SIZE_M * num_pid_n\n# # id of the group this program is in\n# group_id = pid // num_pid_in_group\n# # row-id of the first program in the group\n# first_pid_m = group_id * GROUP_SIZE_M\n# # if `num_pid_m` isn't divisible by `GROUP_SIZE_M`, the last group is smaller\n# group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)\n# # *within groups*, programs are ordered in a column-major order\n# # row-id of the program in the *launch grid*\n# pid_m = first_pid_m + (pid % group_size_m)\n# # col-id of the program in the *launch grid*\n# pid_n = (pid % num_pid_in_group) // group_size_m\n#\n# For example, in the following matmul where each matrix is 9 blocks by 9 blocks,\n# we can see that if we compute the output in row-major ordering, we need to load 90\n# blocks into SRAM to compute the first 9 output blocks, but if we do it in grouped\n# ordering, we only need to load 54 blocks.\n# .. image:: grouped_vs_row_major_ordering.png\n#\n# In practice, this can improve the performance of our matrix multiplication kernel by\n# more than 10\\% on some hardware architecture (e.g., 220 to 245 TFLOPS on A100).\n#\n\n# %%\n# Final Result\n# -------------\n#\n\nimport torch\n\nimport triton\nimport triton.language as tl\n\n# %\n# :code:`triton.jit`'ed functions can be auto-tuned by using the `triton.autotune`\n# decorator, which consumes:\n# - A list of :code:`triton.Config` objects that define different configurations of\n# meta-parameters (e.g., BLOCK_SIZE_M) and compilation options (e.g., num_warps) to try\n# - An autotuning *key* whose change in values will trigger evaluation of all the\n# provided configs\n\n\[email protected](\n configs=[\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n ],\n key=['M', 'N', 'K'],\n)\[email protected]\ndef matmul_kernel(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension. E.g. stride_am is how much to increase a_ptr\n # by to get the element one row down (A has M rows)\n stride_am, stride_ak,\n stride_bk, stride_bn,\n stride_cm, stride_cn,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE_M: tl.constexpr,\n ACTIVATION: tl.constexpr,\n):\n \"\"\"Kernel for computing the matmul C = A x B.\n A has shape (M, K), B has shape (K, N) and C has shape (M, N)\n \"\"\"\n # -----------------------------------------------------------\n # Map program ids `pid` to the block of C it should compute.\n # This is done in a grouped ordering to promote L2 data reuse\n # See above `L2 Cache Optimizations` section for details\n pid = tl.program_id(axis=0)\n num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)\n num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)\n num_pid_in_group = GROUP_SIZE_M * num_pid_n\n group_id = pid // num_pid_in_group\n first_pid_m = group_id * GROUP_SIZE_M\n group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)\n pid_m = first_pid_m + (pid % group_size_m)\n pid_n = (pid % num_pid_in_group) // group_size_m\n\n # ----------------------------------------------------------\n # Create pointers for the first blocks of A and B.\n # We will advance this pointer as we move in the K direction\n # and accumulate\n # a_ptrs is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers\n # b_ptrs is a block of [BLOCK_SIZE_K, BLOCK_SIZE_n] pointers\n # see above `Pointer Arithmetics` section for details\n offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n offs_k = tl.arange(0, BLOCK_SIZE_K)\n a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)\n b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)\n\n # -----------------------------------------------------------\n # Iterate to compute a block of the C matrix\n # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block\n # of fp32 values for higher accuracy.\n # `accumulator` will be converted back to fp16 after the loop\n accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)\n for k in range(0, K, BLOCK_SIZE_K):\n # Note that for simplicity, we don't apply a mask here.\n # This means that if K is not a multiple of BLOCK_SIZE_K,\n # this will access out-of-bounds memory and produce an\n # error or (worse!) incorrect results.\n a = tl.load(a_ptrs)\n b = tl.load(b_ptrs)\n # We accumulate along the K dimension\n accumulator += tl.dot(a, b)\n # Advance the ptrs to the next K block\n a_ptrs += BLOCK_SIZE_K * stride_ak\n b_ptrs += BLOCK_SIZE_K * stride_bk\n # you can fuse arbitrary activation functions here\n # while the accumulator is still in FP32!\n if ACTIVATION:\n accumulator = ACTIVATION(accumulator)\n c = accumulator.to(tl.float16)\n\n # -----------------------------------------------------------\n # Write back the block of the output matrix C\n offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]\n c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)\n tl.store(c_ptrs, c, mask=c_mask)\n\n\n# we can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`\[email protected]\ndef leaky_relu(x):\n return tl.where(x >= 0, x, 0.01 * x)\n\n\n# %%\n# We can now create a convenience wrapper function that only takes two input tensors\n# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel\n\n\ndef matmul(a, b, activation=None):\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n assert a.is_contiguous(), \"matrix A must be contiguous\"\n assert b.is_contiguous(), \"matrix B must be contiguous\"\n M, K = a.shape\n K, N = b.shape\n assert (\n K % 32 == 0\n ), \"We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_SIZE_K\"\n # allocates output\n c = torch.empty((M, N), device=a.device, dtype=a.dtype)\n # 1D launch kernel where each block gets its own program.\n grid = lambda META: (\n triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n matmul_kernel[grid](\n a, b, c,\n M, N, K,\n a.stride(0), a.stride(1),\n b.stride(0), b.stride(1),\n c.stride(0), c.stride(1),\n ACTIVATION=activation,\n )\n return c\n\n\n# %%\n# Unit Test\n# -----------\n#\n# We can test our custom matrix multiplication operation against a native torch implementation (i.e., cuBLAS)\n\ntorch.manual_seed(0)\na = torch.randn((512, 512), device='cuda', dtype=torch.float16)\nb = torch.randn((512, 512), device='cuda', dtype=torch.float16)\ntriton_output = matmul(a, b, activation=None)\ntorch_output = torch.matmul(a, b)\nprint(f\"triton_output={triton_output}\")\nprint(f\"torch_output={torch_output}\")\nif triton.testing.allclose(triton_output, torch_output):\n print(\"✅ Triton and Torch match\")\nelse:\n print(\"❌ Triton and Torch differ\")\n\n# %%\n# Benchmark\n# --------------\n#\n# Square Matrix Performance\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~\n# We can now compare the performance of our kernel against that of cuBLAS. Here we focus on square matrices, but feel free to arrange this script as you wish to benchmark any other matrix shape.\n\n\[email protected]_report(\n triton.testing.Benchmark(\n x_names=['M', 'N', 'K'], # argument names to use as an x-axis for the plot\n x_vals=[\n 128 * i for i in range(2, 33)\n ], # different possible values for `x_name`\n line_arg='provider', # argument name whose value corresponds to a different line in the plot\n # possible values for `line_arg``\n line_vals=['cublas', 'cublas + relu', 'triton', 'triton + relu'],\n # label name for the lines\n line_names=[\"cuBLAS\", \"cuBLAS (+ torch.nn.LeakyReLU)\", \"Triton\", \"Triton (+ LeakyReLU)\"],\n # line styles\n styles=[('green', '-'), ('green', '--'), ('blue', '-'), ('blue', '--')],\n ylabel=\"TFLOPS\", # label name for the y-axis\n plot_name=\"matmul-performance\", # name for the plot. Used also as a file name for saving the plot.\n args={},\n )\n)\ndef benchmark(M, N, K, provider):\n a = torch.randn((M, K), device='cuda', dtype=torch.float16)\n b = torch.randn((K, N), device='cuda', dtype=torch.float16)\n if provider == 'cublas':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b))\n if provider == 'triton':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b))\n if provider == 'cublas + relu':\n torch_relu = torch.nn.ReLU(inplace=True)\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: torch_relu(torch.matmul(a, b))\n )\n if provider == 'triton + relu':\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: matmul(a, b, activation=leaky_relu)\n )\n perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)\n return perf(ms), perf(max_ms), perf(min_ms)\n\n\nbenchmark.run(show_plots=True, print_data=True)\n" ]
[ [ "torch.empty", "torch.randn", "torch.manual_seed", "torch.nn.ReLU", "torch.matmul" ] ]
jasonleeinf/ParlAI
[ "1f7f6d5b7481195b0214e835bb5d782db768d71c" ]
[ "tests/test_torch_agent.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\nfrom parlai.core.agents import Agent\n\nfrom collections import deque\n\nSKIP_TESTS = False\ntry:\n from parlai.core.torch_agent import TorchAgent, Output\n import torch\nexcept ImportError:\n SKIP_TESTS = True\n\n\nclass MockDict(Agent):\n \"\"\"Mock Dictionary Agent which just implements indexing and txt2vec.\"\"\"\n\n null_token = '__null__'\n NULL_IDX = 0\n start_token = '__start__'\n BEG_IDX = 1001\n end_token = '__end__'\n END_IDX = 1002\n p1_token = '__p1__'\n P1_IDX = 2001\n p2_token = '__p2__'\n P2_IDX = 2002\n\n def __init__(self, opt, shared=None):\n \"\"\"Initialize idx for incremental indexing.\"\"\"\n self.idx = 0\n\n def __getitem__(self, key):\n \"\"\"Return index of special token or return the token.\"\"\"\n if key == self.null_token:\n return self.NULL_IDX\n elif key == self.start_token:\n return self.BEG_IDX\n elif key == self.end_token:\n return self.END_IDX\n elif key == self.p1_token:\n return self.P1_IDX\n elif key == self.p2_token:\n return self.P2_IDX\n else:\n self.idx += 1\n return self.idx\n\n def __setitem__(self, key, value):\n pass\n\n def add_cmdline_args(self, *args, **kwargs):\n pass\n\n def txt2vec(self, txt):\n \"\"\"Return index of special tokens or range from 1 for each token.\"\"\"\n self.idx = 0\n return [self[tok] for tok in txt.split()]\n\n\nclass TorchAgent(TorchAgent):\n \"\"\"Use MockDict instead of regular DictionaryAgent.\"\"\"\n\n @staticmethod\n def dictionary_class():\n \"\"\"Replace normal dictionary class with mock one.\"\"\"\n return MockDict\n\n def train_step(self, batch):\n \"\"\"Return confirmation of training.\"\"\"\n return Output([f'Training {i}!' for i in range(len(batch.text_vec))])\n\n def eval_step(self, batch):\n \"\"\"Return confirmation of evaluation.\"\"\"\n return Output([f'Evaluating {i}!' for i in range(len(batch.text_vec))])\n\n\ndef get_agent(**kwargs):\n \"\"\"Return opt-initialized agent.\n\n :param kwargs: any kwargs you want to set using parser.set_params(**kwargs)\n \"\"\"\n if 'no_cuda' not in kwargs:\n kwargs['no_cuda'] = True\n from parlai.core.params import ParlaiParser\n parser = ParlaiParser()\n TorchAgent.add_cmdline_args(parser)\n parser.set_params(**kwargs)\n opt = parser.parse_args(print_args=False)\n return TorchAgent(opt)\n\n\nclass TestTorchAgent(unittest.TestCase):\n \"\"\"Basic tests on the util functions in TorchAgent.\"\"\"\n\n def test_mock(self):\n \"\"\"Just make sure we can instantiate a mock agent.\"\"\"\n agent = get_agent()\n self.assertTrue(isinstance(agent.dict, MockDict))\n\n def test_share(self):\n \"\"\"Make sure share works and shares dictionary.\"\"\"\n agent = get_agent()\n shared = agent.share()\n self.assertTrue('dict' in shared)\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test__vectorize_text(self):\n \"\"\"Test _vectorize_text and its different options.\"\"\"\n agent = get_agent()\n text = \"I'm sorry, Dave\"\n\n # test add_start and add_end\n vec = agent._vectorize_text(text, add_start=False, add_end=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False)\n self.assertEqual(len(vec), 4)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2, 3])\n vec = agent._vectorize_text(text, add_start=False, add_end=True)\n self.assertEqual(len(vec), 4)\n self.assertEqual(vec.tolist(), [1, 2, 3, MockDict.END_IDX])\n vec = agent._vectorize_text(text, add_start=True, add_end=True)\n self.assertEqual(len(vec), 5)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2, 3,\n MockDict.END_IDX])\n\n # now do it again with truncation=3\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [2, 3, MockDict.END_IDX])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=3)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [2, 3, MockDict.END_IDX])\n\n # now do it again with truncation=2\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [2, 3])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [3, MockDict.END_IDX])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=2)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [3, MockDict.END_IDX])\n\n # now do it again with truncation=2, don't truncate_left\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [1, 2])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [1, 2])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=2, truncate_left=False)\n self.assertEqual(len(vec), 2)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1])\n\n # now do it again with truncation=3, don't truncate_left\n vec = agent._vectorize_text(text, add_start=False, add_end=False,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=False,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2])\n vec = agent._vectorize_text(text, add_start=False, add_end=True,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [1, 2, 3])\n vec = agent._vectorize_text(text, add_start=True, add_end=True,\n truncate=3, truncate_left=False)\n self.assertEqual(len(vec), 3)\n self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2])\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test__check_truncate(self):\n \"\"\"Make sure we are truncating when needed.\"\"\"\n agent = get_agent()\n inp = torch.LongTensor([1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, None).tolist(), [1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, 4).tolist(), [1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, 3).tolist(), [1, 2, 3])\n self.assertEqual(agent._check_truncate(inp, 2).tolist(), [1, 2])\n self.assertEqual(agent._check_truncate(inp, 1).tolist(), [1])\n self.assertEqual(agent._check_truncate(inp, 0).tolist(), [])\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_vectorize(self):\n \"\"\"Test the vectorization of observations.\n\n Make sure they do not recompute results, and respect the different\n param options.\n \"\"\"\n agent = get_agent()\n obs_labs = {'text': 'No. Try not.', 'labels': ['Do.', 'Do not.']}\n obs_elabs = {'text': 'No. Try not.', 'eval_labels': ['Do.', 'Do not.']}\n\n for obs in (obs_labs, obs_elabs):\n lab_key = 'labels' if 'labels' in obs else 'eval_labels'\n lab_vec = lab_key + '_vec'\n lab_chc = lab_key + '_choice'\n\n inp = obs.copy()\n # test add_start=True, add_end=True\n agent.history.reset()\n agent.history.update_history(inp)\n out = agent.vectorize(inp, agent.history, add_start=True,\n add_end=True)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertEqual(out[lab_vec][1].item(), 1)\n self.assertEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test add_start=True, add_end=False\n inp = obs.copy()\n out = agent.vectorize(inp, agent.history, add_start=True,\n add_end=False)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertNotEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test add_start=False, add_end=True\n inp = obs.copy()\n out = agent.vectorize(inp, agent.history, add_start=False,\n add_end=True)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertNotEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test add_start=False, add_end=False\n inp = obs.copy()\n out = agent.vectorize(inp, agent.history, add_start=False,\n add_end=False)\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # note that label could be either label above\n self.assertNotEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)\n self.assertNotEqual(out[lab_vec][-1].item(), MockDict.END_IDX)\n self.assertEqual(out[lab_chc][:2], 'Do')\n\n # test caching of tensors\n out_again = agent.vectorize(out, agent.history)\n # should have cached result from before\n self.assertIs(out['text_vec'], out_again['text_vec'])\n self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])\n # next: should truncate cached result\n prev_vec = out['text_vec']\n out_again = agent.vectorize(out, agent.history,\n text_truncate=1)\n self.assertIsNot(prev_vec, out_again['text_vec'])\n self.assertEqual(out['text_vec'].tolist(), [3])\n\n # test split_lines\n agent = get_agent(split_lines=True)\n obs = {\n 'text': 'Hello.\\nMy name is Inogo Montoya.\\n'\n 'You killed my father.\\nPrepare to die.',\n }\n agent.history.update_history(obs)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(vecs,\n [[1], [1, 2, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3]])\n\n # check cache\n out_again = agent.vectorize(obs, agent.history)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(vecs,\n [[1], [1, 2, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3]])\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_batchify(self):\n \"\"\"Make sure the batchify function sets up the right fields.\"\"\"\n agent = get_agent(rank_candidates=True)\n obs_labs = [\n {'text': 'It\\'s only a flesh wound.',\n 'labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'labels': ['General Kenobi.']},\n ]\n obs_elabs = [\n {'text': 'It\\'s only a flesh wound.',\n 'eval_labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'eval_labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'eval_labels': ['General Kenobi.']},\n ]\n for obs_batch in (obs_labs, obs_elabs):\n lab_key = 'labels' if 'labels' in obs_batch[0] else 'eval_labels'\n\n # nothing has been vectorized yet so should be empty\n batch = agent.batchify(obs_batch)\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNone(batch.label_vec)\n self.assertIsNone(batch.label_lengths)\n self.assertIsNone(batch.labels)\n self.assertIsNone(batch.valid_indices)\n self.assertIsNone(batch.candidates)\n self.assertIsNone(batch.candidate_vecs)\n self.assertIsNone(batch.image)\n\n obs_vecs = []\n for o in obs_batch:\n agent.history.reset()\n agent.history.update_history(o)\n obs_vecs.append(agent.vectorize(o, agent.history,\n add_start=False, add_end=False))\n\n # is_valid should map to nothing\n def is_valid(obs):\n return False\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_batch)\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNone(batch.label_vec)\n self.assertIsNone(batch.label_lengths)\n self.assertIsNone(batch.labels)\n self.assertIsNone(batch.valid_indices)\n self.assertIsNone(batch.candidates)\n self.assertIsNone(batch.candidate_vecs)\n self.assertIsNone(batch.image)\n\n # is_valid should check for text_vec\n def is_valid(obs):\n return 'text_vec' in obs\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_vecs)\n # which fields were filled vs should be empty?\n self.assertIsNotNone(batch.text_vec)\n self.assertIsNotNone(batch.text_lengths)\n self.assertIsNotNone(batch.label_vec)\n self.assertIsNotNone(batch.label_lengths)\n self.assertIsNotNone(batch.labels)\n self.assertIsNotNone(batch.valid_indices)\n self.assertIsNone(batch.candidates)\n self.assertIsNone(batch.candidate_vecs)\n self.assertIsNone(batch.image)\n\n # contents of certain fields:\n self.assertEqual(batch.text_vec.tolist(),\n [[1, 2, 3, 4, 5, 0],\n [1, 2, 3, 4, 5, 6],\n [1, 2, 0, 0, 0, 0]])\n self.assertEqual(batch.text_lengths, [5, 6, 2])\n self.assertEqual(batch.label_vec.tolist(),\n [[1, 0, 0, 0, 0],\n [1, 2, 3, 4, 5],\n [1, 2, 0, 0, 0]])\n self.assertEqual(batch.label_lengths, [1, 5, 2])\n self.assertEqual(batch.labels, [o[lab_key][0] for o in obs_batch])\n self.assertEqual(list(batch.valid_indices), [0, 1, 2])\n\n # now sort the batch, make sure fields are in sorted order\n batch = agent.batchify(obs_vecs, sort=True)\n self.assertEqual(batch.text_vec.tolist(),\n [[1, 2, 3, 4, 5, 6],\n [1, 2, 3, 4, 5, 0],\n [1, 2, 0, 0, 0, 0]])\n self.assertEqual(batch.text_lengths, [6, 5, 2])\n self.assertEqual(batch.label_vec.tolist(),\n [[1, 2, 3, 4, 5],\n [1, 0, 0, 0, 0],\n [1, 2, 0, 0, 0]])\n self.assertEqual(batch.label_lengths, [5, 1, 2])\n labs = [o[lab_key][0] for o in obs_batch]\n self.assertEqual(batch.labels, [labs[i] for i in [1, 0, 2]])\n self.assertEqual(list(batch.valid_indices), [1, 0, 2])\n\n # now sort just on ys\n new_vecs = [vecs.copy() for vecs in obs_vecs]\n for vec in new_vecs:\n vec.pop('text')\n vec.pop('text_vec')\n\n def is_valid(obs):\n return 'labels_vec' in obs or 'eval_labels_vec' in obs\n agent.is_valid = is_valid\n\n batch = agent.batchify(new_vecs, sort=True)\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNotNone(batch.label_vec)\n self.assertIsNotNone(batch.label_lengths)\n self.assertEqual(batch.label_vec.tolist(),\n [[1, 2, 3, 4, 5],\n [1, 2, 0, 0, 0],\n [1, 0, 0, 0, 0]])\n self.assertEqual(batch.label_lengths, [5, 2, 1])\n labs = [o[lab_key][0] for o in new_vecs]\n self.assertEqual(batch.labels, [labs[i] for i in [1, 2, 0]])\n self.assertEqual(list(batch.valid_indices), [1, 2, 0])\n\n # test is_valid\n def is_valid(obs):\n return 'text_vec' in obs and len(obs['text_vec']) < 3\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_vecs)\n self.assertEqual(batch.text_vec.tolist(), [[1, 2]])\n self.assertEqual(batch.text_lengths, [2])\n self.assertEqual(batch.label_vec.tolist(), [[1, 2]])\n self.assertEqual(batch.label_lengths, [2])\n self.assertEqual(batch.labels, obs_batch[2][lab_key])\n self.assertEqual(list(batch.valid_indices), [2])\n\n agent.history.reset()\n obs_cands = [\n agent.vectorize({'label_candidates': ['A', 'B', 'C']},\n agent.history),\n agent.vectorize({'label_candidates': ['1', '2', '5', '3', 'Sir']},\n agent.history),\n agent.vectorize({'label_candidates': ['Do', 'Re', 'Mi']},\n agent.history),\n agent.vectorize({'label_candidates': ['Fa', 'So', 'La', 'Ti']},\n agent.history),\n ]\n\n # is_valid should check for label candidates vecs\n def is_valid(obs):\n return 'label_candidates_vecs' in obs\n agent.is_valid = is_valid\n\n batch = agent.batchify(obs_cands)\n self.assertTrue(agent.rank_candidates, 'Agent not set up to rank.')\n self.assertIsNone(batch.text_vec)\n self.assertIsNone(batch.text_lengths)\n self.assertIsNone(batch.label_vec)\n self.assertIsNone(batch.label_lengths)\n self.assertIsNone(batch.labels)\n self.assertIsNotNone(batch.valid_indices)\n self.assertIsNotNone(batch.candidates)\n self.assertIsNotNone(batch.candidate_vecs)\n self.assertEqual(list(batch.valid_indices), [0, 1, 2, 3])\n self.assertEqual(batch.candidates,\n [o['label_candidates'] for o in obs_cands])\n self.assertEqual(len(batch.candidate_vecs), len(obs_cands))\n for i, cs in enumerate(batch.candidate_vecs):\n self.assertEqual(len(cs), len(obs_cands[i]['label_candidates']))\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_match_batch(self):\n \"\"\"Make sure predictions are correctly aligned when available.\"\"\"\n agent = get_agent()\n\n # first try empty outputs\n reply = agent.match_batch([{}, {}, {}], [0, 1, 2], Output())\n self.assertEqual([{}, {}, {}], reply)\n reply = agent.match_batch([{}, {}, {}], [0, 1, 2], None)\n self.assertEqual([{}, {}, {}], reply)\n\n # try text in order\n reply = agent.match_batch([{}, {}, {}], [0, 1, 2],\n Output(['E.T.', 'Phone', 'Home']))\n self.assertEqual(\n [{'text': 'E.T.'}, {'text': 'Phone'}, {'text': 'Home'}], reply)\n\n # try text out of order\n reply = agent.match_batch([{}, {}, {}], [2, 0, 1],\n Output(['Home', 'E.T.', 'Phone']))\n self.assertEqual(\n [{'text': 'E.T.'}, {'text': 'Phone'}, {'text': 'Home'}], reply)\n\n # try text_candidates in order\n reply = agent.match_batch([{}, {}], [0, 1],\n Output(None, [['More human than human.',\n 'Less human than human'],\n ['Just walk into Mordor',\n 'Just QWOP into Mordor.']]))\n self.assertEqual(reply[0]['text_candidates'],\n ['More human than human.', 'Less human than human'])\n self.assertEqual(reply[1]['text_candidates'],\n ['Just walk into Mordor', 'Just QWOP into Mordor.'])\n # try text_candidates out of order\n reply = agent.match_batch([{}, {}], [1, 0],\n Output(None, [['More human than human.',\n 'Less human than human'],\n ['Just walk into Mordor',\n 'Just QWOP into Mordor.']]))\n self.assertEqual(reply[0]['text_candidates'],\n ['Just walk into Mordor', 'Just QWOP into Mordor.'])\n self.assertEqual(reply[1]['text_candidates'],\n ['More human than human.', 'Less human than human'])\n\n # try both text and text_candidates in order\n reply = agent.match_batch(\n [{}, {}], [0, 1],\n Output(['You shall be avenged...', 'Man creates dinosaurs...'],\n [['By Grabthar’s hammer.', 'By the suns of Worvan.'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.']]))\n self.assertEqual(reply[0]['text'], 'You shall be avenged...')\n self.assertEqual(reply[0]['text_candidates'],\n ['By Grabthar’s hammer.', 'By the suns of Worvan.'])\n self.assertEqual(reply[1]['text'], 'Man creates dinosaurs...')\n self.assertEqual(reply[1]['text_candidates'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.'])\n\n # try both text and text_candidates out of order\n reply = agent.match_batch(\n [{}, {}], [1, 0],\n Output(['You shall be avenged...', 'Man creates dinosaurs...'],\n [['By Grabthar’s hammer.', 'By the suns of Worvan.'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.']]))\n self.assertEqual(reply[0]['text'], 'Man creates dinosaurs...')\n self.assertEqual(reply[0]['text_candidates'],\n ['Dinosaurs eat man.', 'Woman inherits the earth.'])\n self.assertEqual(reply[1]['text'], 'You shall be avenged...')\n self.assertEqual(reply[1]['text_candidates'],\n ['By Grabthar’s hammer.', 'By the suns of Worvan.'])\n\n def test__add_person_tokens(self):\n \"\"\"Make sure person tokens are added to the write place in text.\"\"\"\n agent = get_agent()\n text = (\n \"I've seen things you people wouldn't believe.\\n\"\n \"Attack ships on fire off the shoulder of Orion.\\n\"\n \"I watched C-beams glitter in the dark near the Tannhauser gate.\\n\"\n \"All those moments will be lost in time, like tears in rain.\")\n prefix = 'PRE'\n out = agent.history._add_person_tokens(text, prefix, add_after_newln=False)\n self.assertEqual(out, prefix + ' ' + text)\n out = agent.history._add_person_tokens(text, prefix, add_after_newln=True)\n idx = text.rfind('\\n') + 1\n self.assertEqual(out, text[:idx] + prefix + ' ' + text[idx:])\n\n def test_history(self):\n \"\"\"Test different dialog history settings.\"\"\"\n # try with unlimited history\n agent = get_agent(history_size=-1)\n obs = {'text': 'I am Groot.', 'labels': ['I am Groot?'],\n 'episode_done': False}\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange, no reply\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.\\nI am Groot.')\n\n # include reply and set episode_done to clear history after this one\n end_obs = obs.copy()\n end_obs['episode_done'] = True\n agent.history.update_history(end_obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text,\n 'I am Groot.\\nI am Groot.\\nI am Groot?\\nI am Groot.')\n\n # because of episode_done, should be same as first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # now try with history size = 1\n agent = get_agent(history_size=1)\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange should change nothing\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # third exchange with reply should change nothing\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # now try with history size = 2\n agent = get_agent(history_size=2)\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange with reply should contain reply\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot?\\nI am Groot.')\n\n # third exchange without reply should have two inputs\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.\\nI am Groot.')\n\n # now try with history size = 3\n agent = get_agent(history_size=3)\n\n # first exchange\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.')\n\n # second exchange with reply should contain reply and input\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text, 'I am Groot.\\nI am Groot?\\nI am Groot.')\n\n # now test add_person_tokens\n agent = get_agent(history_size=3, person_tokens=True)\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(text, f'{agent.P1_TOKEN} I am Groot.')\n\n # second exchange, history should still contain the tokens\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text,\n f'{agent.P1_TOKEN} I am Groot.\\n'\n f'{agent.P2_TOKEN} I am Groot?\\n'\n f'{agent.P1_TOKEN} I am Groot.')\n\n # now add add_p1_after_newln\n agent = get_agent(history_size=3, person_tokens=True,\n add_p1_after_newln=True)\n ctx_obs = obs.copy() # context then utterance in this text field\n ctx_obs['text'] = 'Groot is Groot.\\nI am Groot.'\n agent.history.update_history(ctx_obs)\n text = agent.history.get_history_str()\n self.assertEqual(text,\n f'Groot is Groot.\\n{agent.P1_TOKEN} I am Groot.')\n\n # second exchange, history should still contain context text\n agent.history.update_history(obs, add_next='I am Groot?')\n text = agent.history.get_history_str()\n self.assertEqual(text,\n 'Groot is Groot.\\n'\n f'{agent.P1_TOKEN} I am Groot.\\n'\n f'{agent.P2_TOKEN} I am Groot?\\n'\n f'{agent.P1_TOKEN} I am Groot.')\n\n # test history vecs\n agent.history.reset()\n agent.history.update_history(obs)\n vec = agent.history.get_history_vec()\n self.assertEqual(\n vec,\n deque([2001, 1, 2, 3])\n )\n\n # test history vec list\n agent.history.update_history(obs)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(\n vecs,\n [[2001, 1, 2, 3], [2001, 1, 2, 3]]\n )\n\n # test clearing history\n agent.history.reset()\n text = agent.history.get_history_str()\n self.assertIsNone(text)\n vecs = agent.history.get_history_vec_list()\n self.assertEqual(\n vecs,\n []\n )\n\n # test delimiter\n agent = get_agent(\n history_size=-1,\n delimiter=' Groot! ',\n )\n agent.history.update_history(obs)\n agent.history.update_history(obs)\n text = agent.history.get_history_str()\n self.assertEqual(\n text,\n 'I am Groot. Groot! I am Groot.'\n )\n\n def test_last_reply(self):\n \"\"\"Make sure last reply returns expected values.\"\"\"\n agent = get_agent()\n # nothing to retrieve\n self.assertIsNone(agent.last_reply())\n # set agent's generated replies\n agent.replies = {\n 'batch_reply': [{'text': 'It\\'s okay! I\\'m a leaf on the wind.'}]\n }\n # If the observation was previously an episode end, we shouldn't have any\n # older reply\n self.assertEqual(agent.last_reply(), None)\n # now agent should remember what it said\n agent.observation = {'episode_done': False}\n self.assertEqual(agent.last_reply(),\n 'It\\'s okay! I\\'m a leaf on the wind.')\n # now set true observation\n agent.observation = {\n 'text': 'Will that work?',\n 'labels': ['I\\'m a leaf on the wind. Watch how I soar.'],\n 'episode_done': False,\n }\n # now agent should remember true label\n self.assertEqual(agent.last_reply(),\n 'I\\'m a leaf on the wind. Watch how I soar.')\n # but not if we tell it not to\n self.assertEqual(agent.last_reply(use_label=False),\n 'It\\'s okay! I\\'m a leaf on the wind.')\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_observe(self):\n \"\"\"Make sure agent stores and returns observation.\"\"\"\n agent = get_agent()\n obs = {\n 'text': 'I\\'ll be back.',\n 'labels': ['I\\'m back.'],\n 'episode_done': True\n }\n out = agent.observe(obs.copy())\n self.assertIsNotNone(out)\n self.assertIsNotNone(agent.observation)\n self.assertEqual(out['text'], 'I\\'ll be back.')\n # episode was done so shouldn't remember history\n out = agent.observe(obs.copy())\n self.assertEqual(out['text'], 'I\\'ll be back.')\n self.assertTrue('text_vec' in out, 'Text should be vectorized.')\n\n # now try with episode not done\n obs['episode_done'] = False\n out = agent.observe(obs.copy())\n self.assertIsNotNone(out)\n self.assertIsNotNone(agent.observation)\n self.assertEqual(out['text'], 'I\\'ll be back.')\n # should remember history\n out = agent.observe(obs.copy())\n self.assertEqual(out['text'],\n 'I\\'ll be back.\\nI\\'m back.\\nI\\'ll be back.')\n\n @unittest.skipIf(SKIP_TESTS, \"Torch not installed.\")\n def test_batch_act(self):\n \"\"\"Make sure batch act calls the right step.\"\"\"\n agent = get_agent()\n\n obs_labs = [\n {'text': 'It\\'s only a flesh wound.',\n 'labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'labels': ['General Kenobi.']},\n ]\n obs_labs_vecs = []\n for o in obs_labs:\n agent.history.reset()\n agent.history.update_history(o)\n obs_labs_vecs.append(agent.vectorize(o, agent.history))\n reply = agent.batch_act(obs_labs_vecs)\n for i in range(len(obs_labs_vecs)):\n self.assertEqual(reply[i]['text'], f'Training {i}!')\n\n obs_elabs = [\n {'text': 'It\\'s only a flesh wound.',\n 'eval_labels': ['Yield!']},\n {'text': 'The needs of the many outweigh...',\n 'eval_labels': ['The needs of the few.']},\n {'text': 'Hello there.',\n 'eval_labels': ['General Kenobi.']},\n ]\n obs_elabs_vecs = []\n for o in obs_elabs:\n agent.history.reset()\n agent.history.update_history(o)\n obs_elabs_vecs.append(agent.vectorize(o, agent.history))\n reply = agent.batch_act(obs_elabs_vecs)\n for i in range(len(obs_elabs_vecs)):\n self.assertEqual(reply[i]['text'], f'Evaluating {i}!')\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.LongTensor" ] ]
CeasarLee/ncnn
[ "178825d14a16c4059820d9f054a8d857df671027" ]
[ "tools/pnnx/tests/test_F_avg_pool1d.py" ]
[ "# Tencent is pleased to support the open source community by making ncnn available.\r\n#\r\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\r\n#\r\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\r\n# in compliance with the License. You may obtain a copy of the License at\r\n#\r\n# https://opensource.org/licenses/BSD-3-Clause\r\n#\r\n# Unless required by applicable law or agreed to in writing, software distributed\r\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\r\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations under the License.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Model(nn.Module):\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n\r\n def forward(self, x):\r\n x = F.avg_pool1d(x, kernel_size=3)\r\n x = F.avg_pool1d(x, kernel_size=4, stride=2, padding=2)\r\n x = F.avg_pool1d(x, kernel_size=3, stride=1, padding=(0), ceil_mode=False, count_include_pad=True)\r\n x = F.avg_pool1d(x, kernel_size=5, stride=2, padding=(2), ceil_mode=True, count_include_pad=False)\r\n x = F.avg_pool1d(x, kernel_size=3, stride=2, padding=1, ceil_mode=False, count_include_pad=True)\r\n x = F.avg_pool1d(x, kernel_size=2, stride=1, padding=0, ceil_mode=True, count_include_pad=True)\r\n x = F.avg_pool1d(x, kernel_size=4, stride=1, padding=2, ceil_mode=False, count_include_pad=False)\r\n return x\r\n\r\ndef test():\r\n net = Model()\r\n net.eval()\r\n\r\n torch.manual_seed(0)\r\n x = torch.rand(1, 12, 128)\r\n\r\n a = net(x)\r\n\r\n # export torchscript\r\n mod = torch.jit.trace(net, x)\r\n mod.save(\"test_F_avg_pool1d.pt\")\r\n\r\n # torchscript to pnnx\r\n import os\r\n os.system(\"../src/pnnx test_F_avg_pool1d.pt inputshape=[1,12,128]\")\r\n\r\n # pnnx inference\r\n import test_F_avg_pool1d_pnnx\r\n b = test_F_avg_pool1d_pnnx.test_inference()\r\n\r\n return torch.equal(a, b)\r\n\r\nif __name__ == \"__main__\":\r\n if test():\r\n exit(0)\r\n else:\r\n exit(1)\r\n" ]
[ [ "torch.manual_seed", "torch.rand", "torch.equal", "torch.nn.functional.avg_pool1d", "torch.jit.trace" ] ]
rmit-ir/al-ef
[ "abffe57ae171cd846ca29b5e1b5a9f337c948912" ]
[ "code/density.py" ]
[ "#!/usr/bin/env python\nimport numpy\n\ndef jaccquard_similarity(a, b):\n if len(b) == 0 or len(a) == 0: return 0.0\n return len(set(a).intersection(b))*1./len(set(a).union(set(b)))\n\ndef similarityMatrix(features):\n a = numpy.zeros((len(features), len(features)),\n dtype=numpy.float)\n ids = list(features.keys())\n id2row = {}\n for i, idi in enumerate(ids):\n id2row[idi] = i\n for j, idj in enumerate(ids):\n if i == j:\n a[i, j] = 1\n break\n a[i, j] = jaccquard_similarity(features[idi][0].keys(),\n features[idj][0].keys())\n a[j, i] = a[i, j]\n return a, id2row\n\ndef density(matrix, row):\n return numpy.mean(matrix[row,:])\n\ndef k_density(matrix, row, k=5):\n r = matrix[row,:]\n return numpy.mean(numpy.sort(r[1:k+1])[::-1])\n\ndef margin_density(distance, matrix, row):\n return (1-density(matrix, row)*(1-distance))\n\ndef margin_k_density(distance, matrix, row, k=5):\n return (1-k_density(matrix, row, k)*(1-distance))\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.sort", "numpy.mean" ] ]
adeptflax/image2image
[ "8c7c531176d261789f90a27125b31d6241bc9c27" ]
[ "taming/modules/transformer/mingpt.py" ]
[ "\"\"\"\ntaken from: https://github.com/karpathy/minGPT/\nGPT model:\n- the initial stem consists of a combination of token encoding and a positional encoding\n- the meat of it is a uniform sequence of Transformer blocks\n - each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block\n - all blocks feed into a central residual pathway similar to resnets\n- the final decoder is a linear projection into a vanilla Softmax classifier\n\"\"\"\n\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nlogger = logging.getLogger(__name__)\n\n\nclass GPTConfig:\n \"\"\" base GPT config, params common to all GPT versions \"\"\"\n embd_pdrop = 0.1\n resid_pdrop = 0.1\n attn_pdrop = 0.1\n\n def __init__(self, vocab_size, block_size, **kwargs):\n self.vocab_size = vocab_size\n self.block_size = block_size\n for k,v in kwargs.items():\n setattr(self, k, v)\n\n\nclass GPT1Config(GPTConfig):\n \"\"\" GPT-1 like network roughly 125M params \"\"\"\n n_layer = 12\n n_head = 12\n n_embd = 768\n\n\nclass GPT2Config(GPTConfig):\n \"\"\" GPT-2 like network roughly 1.5B params \"\"\"\n # TODO\n\n\nclass CausalSelfAttention(nn.Module):\n \"\"\"\n A vanilla multi-head masked self-attention layer with a projection at the end.\n It is possible to use torch.nn.MultiheadAttention here but I am including an\n explicit implementation here to show that there is nothing too scary here.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n assert config.n_embd % config.n_head == 0\n # key, query, value projections for all heads\n self.key = nn.Linear(config.n_embd, config.n_embd)\n self.query = nn.Linear(config.n_embd, config.n_embd)\n self.value = nn.Linear(config.n_embd, config.n_embd)\n # regularization\n self.attn_drop = nn.Dropout(config.attn_pdrop)\n self.resid_drop = nn.Dropout(config.resid_pdrop)\n # output projection\n self.proj = nn.Linear(config.n_embd, config.n_embd)\n # causal mask to ensure that attention is only applied to the left in the input sequence\n mask = torch.tril(torch.ones(config.block_size,\n config.block_size))\n if hasattr(config, \"n_unmasked\"):\n mask[:config.n_unmasked, :config.n_unmasked] = 1\n self.register_buffer(\"mask\", mask.view(1, 1, config.block_size, config.block_size))\n self.n_head = config.n_head\n\n def forward(self, x, layer_past=None):\n B, T, C = x.size()\n\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))\n att = F.softmax(att, dim=-1)\n att = self.attn_drop(att)\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n\n # output projection\n y = self.resid_drop(self.proj(y))\n return y\n\n\nclass Block(nn.Module):\n \"\"\" an unassuming Transformer block \"\"\"\n def __init__(self, config):\n super().__init__()\n self.ln1 = nn.LayerNorm(config.n_embd)\n self.ln2 = nn.LayerNorm(config.n_embd)\n self.attn = CausalSelfAttention(config)\n self.mlp = nn.Sequential(\n nn.Linear(config.n_embd, 4 * config.n_embd),\n nn.GELU(), # nice\n nn.Linear(4 * config.n_embd, config.n_embd),\n nn.Dropout(config.resid_pdrop),\n )\n\n def forward(self, x):\n x = x + self.attn(self.ln1(x))\n x = x + self.mlp(self.ln2(x))\n return x\n\n\nclass GPT(nn.Module):\n \"\"\" the full GPT language model, with a context size of block_size \"\"\"\n def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256,\n embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):\n super().__init__()\n config = GPTConfig(vocab_size=vocab_size, block_size=block_size,\n embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,\n n_layer=n_layer, n_head=n_head, n_embd=n_embd,\n n_unmasked=n_unmasked)\n # input embedding stem\n self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)\n self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.ln_f = nn.LayerNorm(config.n_embd)\n self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.block_size = config.block_size\n self.apply(self._init_weights)\n self.config = config\n logger.info(\"number of parameters: %e\", sum(p.numel() for p in self.parameters()))\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def forward(self, idx, embeddings=None, targets=None):\n # forward the GPT model\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n\n if embeddings is not None: # prepend explicit embeddings\n token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)\n \n t = token_embeddings.shape[1]\n assert t <= self.block_size, \"Cannot forward, model block size is exhausted.\"\n position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector\n x = self.drop(token_embeddings + position_embeddings)\n x = self.blocks(x)\n x = self.ln_f(x)\n logits = self.head(x)\n\n # if we are given some desired targets also calculate the loss\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n\n return logits, loss\n\n\nclass DummyGPT(nn.Module):\n # for debugging\n def __init__(self, add_value=1):\n super().__init__()\n self.add_value = add_value\n\n def forward(self, idx):\n return idx + self.add_value, None\n\n\nclass CodeGPT(nn.Module):\n \"\"\"Takes in semi-embeddings\"\"\"\n def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256,\n embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):\n super().__init__()\n config = GPTConfig(vocab_size=vocab_size, block_size=block_size,\n embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,\n n_layer=n_layer, n_head=n_head, n_embd=n_embd,\n n_unmasked=n_unmasked)\n # input embedding stem\n self.tok_emb = nn.Linear(in_channels, config.n_embd)\n self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.ln_f = nn.LayerNorm(config.n_embd)\n self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.block_size = config.block_size\n self.apply(self._init_weights)\n self.config = config\n logger.info(\"number of parameters: %e\", sum(p.numel() for p in self.parameters()))\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def forward(self, idx, embeddings=None, targets=None):\n # forward the GPT model\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n\n if embeddings is not None: # prepend explicit embeddings\n token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)\n\n t = token_embeddings.shape[1]\n assert t <= self.block_size, \"Cannot forward, model block size is exhausted.\"\n position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector\n x = self.drop(token_embeddings + position_embeddings)\n x = self.blocks(x)\n x = self.ln_f(x)\n logits = self.head(x)\n\n # if we are given some desired targets also calculate the loss\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n\n return logits, loss\n\n\n\n#### sampling utils\n\ndef top_k_logits(logits, k):\n v, ix = torch.topk(logits, k)\n out = logits.clone()\n out[out < v[:, [-1]]] = -float('Inf')\n return out\n\[email protected]_grad()\ndef sample(model, x, steps, temperature=1.0, sample=False, top_k=None):\n \"\"\"\n take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in\n the sequence, feeding the predictions back into the model each time. Clearly the sampling\n has quadratic complexity unlike an RNN that is only linear, and has a finite context window\n of block_size, unlike an RNN that has an infinite context window.\n \"\"\"\n block_size = model.get_block_size()\n model.eval()\n for k in range(steps):\n x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed\n logits, _ = model(x_cond)\n # pluck the logits at the final step and scale by temperature\n logits = logits[:, -1, :] / temperature\n # optionally crop probabilities to only the top k options\n if top_k is not None:\n logits = top_k_logits(logits, top_k)\n # apply softmax to convert to probabilities\n probs = F.softmax(logits, dim=-1)\n # sample from the distribution or take the most likely\n if sample:\n ix = torch.multinomial(probs, num_samples=1)\n else:\n _, ix = torch.topk(probs, k=1, dim=-1)\n # append to the sequence and continue\n x = torch.cat((x, ix), dim=1)\n\n return x\n\n\n\n#### clustering utils\n\nclass KMeans(nn.Module):\n def __init__(self, ncluster=512, nc=3, niter=10):\n super().__init__()\n self.ncluster = ncluster\n self.nc = nc\n self.niter = niter\n self.shape = (3,32,32)\n self.register_buffer(\"C\", torch.zeros(self.ncluster,nc))\n self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))\n\n def is_initialized(self):\n return self.initialized.item() == 1\n\n @torch.no_grad()\n def initialize(self, x):\n N, D = x.shape\n assert D == self.nc, D\n c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random\n for i in range(self.niter):\n # assign all pixels to the closest codebook element\n a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1)\n # move each codebook element to be the mean of the pixels that assigned to it\n c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)])\n # re-assign any poorly positioned codebook elements\n nanix = torch.any(torch.isnan(c), dim=1)\n ndead = nanix.sum().item()\n print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead))\n c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters\n\n self.C.copy_(c)\n self.initialized.fill_(1)\n\n\n def forward(self, x, reverse=False, shape=None):\n if not reverse:\n # flatten\n bs,c,h,w = x.shape\n assert c == self.nc\n x = x.reshape(bs,c,h*w,1)\n C = self.C.permute(1,0)\n C = C.reshape(1,c,1,self.ncluster)\n a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices\n return a\n else:\n # flatten\n bs, HW = x.shape\n \"\"\"\n c = self.C.reshape( 1, self.nc, 1, self.ncluster)\n c = c[bs*[0],:,:,:]\n c = c[:,:,HW*[0],:]\n x = x.reshape(bs, 1, HW, 1)\n x = x[:,3*[0],:,:]\n x = torch.gather(c, dim=3, index=x)\n \"\"\"\n x = self.C[x]\n x = x.permute(0,2,1)\n shape = shape if shape is not None else self.shape\n x = x.reshape(bs, *shape)\n\n return x\n" ]
[ [ "torch.ones", "torch.nn.Linear", "torch.nn.functional.softmax", "torch.no_grad", "torch.multinomial", "torch.nn.Embedding", "torch.tensor", "torch.nn.GELU", "torch.topk", "torch.nn.LayerNorm", "torch.randperm", "torch.zeros", "torch.isnan", "torch.cat", "torch.nn.Dropout" ] ]
hee9joon/Face-Generation
[ "caa9b4e0bb61e77ee6d32fc8687bad63f998ec9c" ]
[ "3. BEGAN (Boundary Equilibrium GAN)/celeba.py" ]
[ "from torch.utils.data import DataLoader\nfrom torchvision.datasets import ImageFolder\nimport torchvision.transforms as transforms\n\nfrom config import *\n\n\ndef get_celeba_loader(path, batch_size):\n \"\"\"CelebA Loader\"\"\"\n transform = transforms.Compose([\n transforms.Resize((config.crop_size, config.crop_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n\n celeba_dataset = ImageFolder(root=path, transform=transform)\n celeba_loader = DataLoader(celeba_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n\n return celeba_loader" ]
[ [ "torch.utils.data.DataLoader" ] ]
raydouglass/cugraph
[ "228a4e1abc95b9b15ab211d9e397cc61913275e5" ]
[ "python/cugraph/graph/test_graph.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nfrom scipy.io import mmread\n\nimport cugraph\nimport cudf\n\n\ndef read_mtx_file(mm_file):\n print('Reading ' + str(mm_file) + '...')\n return mmread(mm_file).asfptype()\n\n\ndef compare_series(series_1, series_2):\n if (len(series_1) != len(series_2)):\n print(\"Series do not match in length\")\n return 0\n for i in range(len(series_1)):\n if(series_1[i] != series_2[i]):\n print(\"Series[\" + str(i) + \"] does not match, \" + str(series_1[i])\n + \", \" + str(series_2[i]))\n return 0\n return 1\n\n\ndef compare_offsets(offset0, offset1):\n if not (len(offset0) <= len(offset1)):\n print(\"Mismatched length: \" + str(len(offset0)) + \" != \"\n + str(len(offset1)))\n return False\n for i in range(len(offset0)):\n if offset0[i] != offset1[i]:\n print(\"Series[\" + str(i) + \"]: \" + str(offset0[i]) + \" != \"\n + str(offset1[i]))\n return False\n return True\n\n\nDATASETS = ['/datasets/networks/karate.mtx',\n '/datasets/networks/dolphins.mtx',\n '/datasets/networks/netscience.mtx']\n\n\[email protected]('graph_file', DATASETS)\ndef test_add_edge_list_to_adj_list(graph_file):\n\n M = read_mtx_file(graph_file)\n sources = cudf.Series(M.row)\n destinations = cudf.Series(M.col)\n\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n offsets_exp = M.indptr\n indices_exp = M.indices\n\n # cugraph add_egde_list to_adj_list call\n G = cugraph.Graph()\n G.add_edge_list(sources, destinations, None)\n offsets_cu, indices_cu = G.view_adj_list()\n assert compare_offsets(offsets_cu, offsets_exp)\n assert compare_series(indices_cu, indices_exp)\n\n\[email protected]('graph_file', DATASETS)\ndef test_add_adj_list_to_edge_list(graph_file):\n M = read_mtx_file(graph_file)\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n\n M = M.tocoo()\n sources_exp = cudf.Series(M.row)\n destinations_exp = cudf.Series(M.col)\n\n # cugraph add_adj_list to_edge_list call\n G = cugraph.Graph()\n G.add_adj_list(offsets, indices, None)\n sources, destinations = G.view_edge_list()\n sources_cu = np.array(sources)\n destinations_cu = np.array(destinations)\n assert compare_series(sources_cu, sources_exp)\n assert compare_series(destinations_cu, destinations_exp)\n\n\[email protected]('graph_file', DATASETS)\ndef test_transpose_from_adj_list(graph_file):\n M = read_mtx_file(graph_file)\n M = M.tocsr()\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n G = cugraph.Graph()\n G.add_adj_list(offsets, indices, None)\n G.add_transpose()\n Mt = M.transpose().tocsr()\n toff, tind = G.view_transpose_adj_list()\n assert compare_series(tind, Mt.indices)\n assert compare_offsets(toff, Mt.indptr)\n\n\[email protected]('graph_file', DATASETS)\ndef test_view_edge_list_from_adj_list(graph_file):\n M = read_mtx_file(graph_file)\n M = M.tocsr()\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n G = cugraph.Graph()\n G.add_adj_list(offsets, indices, None)\n src2, dst2 = G.view_edge_list()\n M = M.tocoo()\n src1 = M.row\n dst1 = M.col\n assert compare_series(src1, src2)\n assert compare_series(dst1, dst2)\n\n\[email protected]('graph_file', DATASETS)\ndef test_delete_edge_list_delete_adj_list(graph_file):\n M = read_mtx_file(graph_file)\n sources = cudf.Series(M.row)\n destinations = cudf.Series(M.col)\n\n M = M.tocsr()\n if M is None:\n raise TypeError('Could not read the input graph')\n if M.shape[0] != M.shape[1]:\n raise TypeError('Shape is not square')\n\n offsets = cudf.Series(M.indptr)\n indices = cudf.Series(M.indices)\n\n # cugraph delete_adj_list delete_edge_list call\n G = cugraph.Graph()\n G.add_edge_list(sources, destinations, None)\n G.delete_edge_list()\n with pytest.raises(cudf.bindings.GDFError.GDFError) as excinfo:\n G.view_adj_list()\n assert excinfo.value.errcode.decode() == 'GDF_INVALID_API_CALL'\n\n G.add_adj_list(offsets, indices, None)\n G.delete_adj_list()\n with pytest.raises(cudf.bindings.GDFError.GDFError) as excinfo:\n G.view_edge_list()\n assert excinfo.value.errcode.decode() == 'GDF_INVALID_API_CALL'\n" ]
[ [ "numpy.array", "scipy.io.mmread" ] ]
54hanxiucao/gym-electric-motor
[ "911432388b00675e8a93f4a7937fdc575f106f22" ]
[ "gym_electric_motor/visualization/motor_dashboard_plots/state_plot.py" ]
[ "import numpy as np\n\nfrom .base_plots import TimePlot\n\n\nclass StatePlot(TimePlot):\n \"\"\"Plot to display the environments states and their references.\"\"\"\n\n _default_limit_line_cfg = {\n 'color': 'red',\n 'linestyle': '--',\n 'linewidth': 1\n }\n\n # Labels for each state variable.\n state_labels = {\n 'omega': r'$\\omega$/(1/s)',\n 'torque': '$T$/Nm',\n 'i': '$i$/A',\n 'i_a': '$i_{a}$/A',\n 'i_e': '$i_{e}$/A',\n 'i_b': '$i_{b}$/A',\n 'i_c': '$i_{c}$/A',\n 'i_sq': '$i_{sq}$/A',\n 'i_sd': '$i_{sd}$/A',\n 'u': '$u$/V',\n 'u_a': '$u_{a}$/V',\n 'u_b': '$u_{b}$/V',\n 'u_c': '$u_{c}$/V',\n 'u_sq': '$u_{sq}$/V',\n 'u_sd': '$u_{sd}$/V',\n 'u_e': '$u_{e}$/V',\n 'u_sup': '$u_{sup}$/V',\n 'epsilon': r'$\\epsilon$/rad'\n }\n\n def __init__(self, state):\n \"\"\"\n Args:\n state(str): Name of the state to plot\n \"\"\"\n super().__init__()\n\n self._state_line_config = self._default_time_line_cfg.copy()\n self._ref_line_config = self._default_time_line_cfg.copy()\n self._limit_line_config = self._default_limit_line_cfg.copy()\n\n #: State space of the plotted variable\n self._state_space = None\n #: State name of the plotted variable\n self._state = state\n #: Index in the state array of the plotted variable\n self._state_idx = None\n #: Maximal value of the plotted variable\n self._limits = None\n # Bool: Flag if the plotted variable is referenced.\n self._referenced = None\n\n # matplotlib-Lines for the state and reference\n self._state_line = None\n self._reference_line = None\n\n # Data containers\n self._state_data = []\n self._ref_data = []\n\n # Flag, if the passed data is normalized\n self._normalized = True\n\n def set_env(self, env):\n # Docstring of superclass\n super().set_env(env)\n ps = env.physical_system\n rg = env.reference_generator\n # Save the index of the state.\n self._state_idx = ps.state_positions[self._state]\n # The maximal values of the state.\n self._limits = ps.limits[self._state_idx]\n self._state_space = ps.state_space.low[self._state_idx], ps.state_space.high[self._state_idx]\n # Bool: if the state is referenced.\n self._referenced = rg.referenced_states[self._state_idx]\n # Bool: if the data is already normalized to an interval of [-1, 1]\n self._normalized = self._limits != self._state_space[1]\n # Initialize the data containers\n self._state_data = np.ones(self._x_width) * np.nan\n self._ref_data = np.ones(self._x_width) * np.nan\n\n min_limit = self._limits * self._state_space[0] if self._normalized else self._state_space[0]\n max_limit = self._limits * self._state_space[1] if self._normalized else self._state_space[1]\n spacing = 0.1 * (max_limit - min_limit)\n\n # Set the y-axis limits to fixed initital values\n self._y_lim = (min_limit - spacing, max_limit + spacing)\n\n # Set the y-axis label\n self._label = self.state_labels.get(self._state, self._state)\n\n def initialize(self, axis):\n # Docstring of superclass\n super().initialize(axis)\n\n # Line to plot the state data\n self._state_line, = self._axis.plot(self._x_data, self._state_data, **self._state_line_config)\n self._lines = [self._state_line]\n\n # If the state is referenced plot also the reference line\n if self._referenced:\n self._reference_line, = self._axis.plot(self._x_data, self._ref_data, **self._ref_line_config)\n # Plot state line in front\n axis.lines = axis.lines[::-1]\n self._lines.append(self._reference_line)\n min_limit = self._limits * self._state_space[0] if self._normalized else self._state_space[0]\n max_limit = self._limits * self._state_space[1] if self._normalized else self._state_space[1]\n if self._state_space[0] < 0:\n self._axis.axhline(min_limit, **self._limit_line_config)\n lim = self._axis.axhline(max_limit, **self._limit_line_config)\n\n y_label = self._label\n unit_split = y_label.find('/')\n if unit_split == -1:\n unit_split = len(y_label)\n limit_label = y_label[:unit_split] + r'$_{\\mathrm{max}}$' + y_label[unit_split:]\n\n if self._referenced:\n ref_label = y_label[:unit_split] + r'$^*$' + y_label[unit_split:]\n self._axis.legend(\n (self._state_line, self._reference_line, lim), (y_label, ref_label, limit_label), loc='upper left',\n numpoints=20\n )\n else:\n self._axis.legend((self._state_line, lim), (y_label, limit_label), loc='upper left', numpoints=20)\n\n self._y_data = [self._state_data, self._ref_data]\n\n def on_step_end(self, k, state, reference, reward, done):\n super().on_step_end(k, state, reference, reward, done)\n # Write the data to the data containers\n state_ = state[self._state_idx]\n ref = reference[self._state_idx]\n idx = self.data_idx\n self._x_data[idx] = self._t\n self._state_data[idx] = state_ * self._limits\n if self._referenced:\n self._ref_data[idx] = ref * self._limits\n" ]
[ [ "numpy.ones" ] ]
belivem/Study
[ "7e4633b988985735100f2ddd17ae62b8348dbb8e" ]
[ "src/mnist_fully_network/mnist_practice/mnist_data_info.py" ]
[ "import os\nimport sys\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist_path = \"/Users/liyanan/Documents/Test/Tensorflow/data/mnist_data/\"\n\n#print mnist data\ndef mnistInfo():\n \n batch_size = 100\n\n #read mnist data\n mnist = input_data.read_data_sets(mnist_path,one_hot = True)\n\n #training data size\n print(\"Training data size ==> \"+str(mnist.train.num_examples))\n\n #validating data size\n print(\"Validating data size ==> \"+str(mnist.validation.num_examples))\n\n #testing data size\n print(\"Testing data size ==> \"+str(mnist.test.num_examples))\n\n #traing data shape\n print(\"Shape of training images ==> \"+str(mnist.train.images.shape))\n print(\"Shape of training labels ==> \"+str(mnist.train.labels.shape))\n\n #print image\n print(\"Image ==> \")\n print(mnist.train.images[0])\n\n #print lable\n print(\"Lable ==> \")\n print(mnist.train.labels[0])\n\n #next batch size \n xs,ys = mnist.train.next_batch(batch_size)\n print(\"X shape ==> \"+str(xs.shape))\n print(\"Y shape ==> \"+str(ys.shape))\n\ndef getmnist():\n mnist = input_data.read_data_sets(mnist_path,one_hot = True)\n return mnist\n\n#Get current dir and execute file\ndef getcwd():\n print(\"Get current working dir ==> \"+os.getcwd())\n print(\"Get current execute file ==> \"+sys.argv[0])\n\ndef get_minst_class_num():\n #read mnist data\n mnist = input_data.read_data_sets(mnist_path,one_hot = True)\n \n labels = tf.placeholder(tf.float32,shape=[None,10],name=\"labels\")\n class_tensor = tf.argmax(labels,axis=1)\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n print(\"class and num ==>\")\n class_def,idx,count = sess.run(tf.unique_with_counts(class_tensor),feed_dict={labels:mnist.train.labels})\n print(class_def)\n print(count)\n\n\nif __name__ == \"__main__\":\n #getcwd()\n mnistInfo()\n #get_minst_class_num()" ]
[ [ "tensorflow.unique_with_counts", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.Session", "tensorflow.argmax" ] ]
filippovitale/tensorflow
[ "fe9b5008ff63a70e4092cdc7968b1327a9470f77" ]
[ "tensorflow/python/kernel_tests/cwise_ops_test.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functional tests for coefficient-wise operations.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\nimport tensorflow as tf\n\n\n_ADD = lambda x, y: x + y\n_SUB = lambda x, y: x - y\n_MUL = lambda x, y: x * y\n_POW = lambda x, y: x ** y\n_TRUEDIV = lambda x, y: x / y\n_FLOORDIV = lambda x, y: x // y\n_MOD = lambda x, y: x % y\n_NEG = lambda x: -x\n_ABS = abs\n\n_LT = lambda x, y: x < y\n_LE = lambda x, y: x <= y\n_GT = lambda x, y: x > y\n_GE = lambda x, y: x >= y\n\n_AND = lambda x, y: x & y\n_OR = lambda x, y: x | y\n_XOR = lambda x, y: x ^ y\n_INV = lambda x: ~x\n\n\nclass UnaryOpTest(tf.test.TestCase):\n\n def _compareCpu(self, x, np_func, tf_func):\n np_ans = np_func(x)\n with self.test_session(use_gpu=False):\n inx = tf.convert_to_tensor(x)\n if x.dtype in (np.float32, np.float64):\n y = 1.1 * tf_func(inx)\n np_ans *= 1.1\n else:\n y = tf_func(inx)\n tf_cpu = y.eval()\n self.assertShapeEqual(np_ans, y)\n if x.dtype == np.float16:\n self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)\n else:\n self.assertAllClose(np_ans, tf_cpu)\n\n if x.dtype == np.complex64 and tf_func in (\n tf.sign, tf.sqrt, tf.rsqrt, tf.log):\n return # Return early\n\n if x.dtype == np.float16:\n s = list(np.shape(x))\n jacob_t, _ = tf.test.compute_gradient(inx,\n s,\n y,\n s,\n x_init_value=x)\n xf = x.astype(np.float)\n inxf = tf.convert_to_tensor(xf)\n yf = tf_func(inxf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n s,\n yf,\n s,\n x_init_value=xf)\n jacob_n = jacob_n.astype(np.float16)\n self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)\n elif x.dtype == np.float32 or x.dtype == np.complex64:\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n y,\n s,\n x_init_value=x)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n y,\n s,\n x_init_value=x)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGpu(self, x, np_func, tf_func):\n np_ans = np_func(x)\n with self.test_session(use_gpu=True):\n result = tf_func(tf.convert_to_tensor(x))\n tf_gpu = result.eval()\n if x.dtype == np.float16:\n self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)\n else:\n self.assertAllClose(np_ans, tf_gpu)\n # TODO(zhifengc/ke): make gradient checker work on GPU.\n\n def _compareBoth(self, x, np_func, tf_func):\n self._compareCpu(x, np_func, tf_func)\n self._compareGpu(x, np_func, tf_func)\n\n def _inv(self, x):\n return 1.0 / x\n\n def _rsqrt(self, x):\n return self._inv(np.sqrt(x))\n\n def _sigmoid(self, x):\n return 1.0 / (1.0 + np.exp(-x))\n\n def _replace_domain_error_with_inf(self, fn):\n def func(x):\n try:\n return fn(x)\n except ValueError as e:\n if \"domain error\" in str(e):\n return np.inf * np.ones_like(x)\n else:\n raise e\n return func\n\n def testFloatBasic(self):\n x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)\n y = (x + .5).astype(np.float32) # no zero\n z = (x + 15.5).astype(np.float32) # all positive\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(y, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(z, np.sqrt, tf.sqrt)\n self._compareBoth(z, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(z, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(y, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n self._compareBoth(\n y,\n np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),\n tf.lgamma)\n self._compareBoth(x, np.vectorize(math.erf), tf.erf)\n self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)\n\n def testFloatTanhEdge(self):\n x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)\n self._compareBoth(x, np.tanh, tf.tanh)\n x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)\n self._compareBoth(x, np.tanh, tf.tanh)\n\n def testFloatEmpty(self):\n x = np.empty((2, 0, 5), dtype=np.float32)\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(x, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(x, np.sqrt, tf.sqrt)\n self._compareBoth(x, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(x, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(x, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n # Can't use vectorize below, so just use some arbitrary function\n self._compareBoth(x, np.sign, tf.lgamma)\n self._compareBoth(x, np.sign, tf.erf)\n self._compareBoth(x, np.sign, tf.erfc)\n\n def testDoubleBasic(self):\n x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)\n y = (x + .5).astype(np.float64) # no zero\n z = (x + 15.5).astype(np.float64) # all positive\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(y, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(z, np.sqrt, tf.sqrt)\n self._compareBoth(z, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(z, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(y, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n self._compareBoth(\n y,\n np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),\n tf.lgamma)\n self._compareBoth(x, np.vectorize(math.erf), tf.erf)\n self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)\n\n def testHalfBasic(self):\n x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)\n y = (x + .5).astype(np.float16) # no zero\n z = (x + 15.5).astype(np.float16) # all positive\n self._compareBoth(x, np.abs, tf.abs)\n self._compareBoth(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(y, self._inv, tf.inv)\n self._compareBoth(x, np.square, tf.square)\n self._compareBoth(z, np.sqrt, tf.sqrt)\n self._compareBoth(z, self._rsqrt, tf.rsqrt)\n self._compareBoth(x, np.exp, tf.exp)\n self._compareBoth(z, np.log, tf.log)\n self._compareBoth(x, np.tanh, tf.tanh)\n self._compareBoth(x, self._sigmoid, tf.sigmoid)\n self._compareBoth(y, np.sign, tf.sign)\n self._compareBoth(x, np.sin, tf.sin)\n self._compareBoth(x, np.cos, tf.cos)\n self._compareBoth(\n y,\n np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),\n tf.lgamma)\n self._compareBoth(x, np.vectorize(math.erf), tf.erf)\n self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)\n\n def testInt32Basic(self):\n x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)\n self._compareCpu(x, np.abs, tf.abs)\n self._compareCpu(x, np.abs, _ABS)\n self._compareBoth(x, np.negative, tf.neg)\n self._compareBoth(x, np.negative, _NEG)\n self._compareBoth(x, np.square, tf.square)\n self._compareCpu(x, np.sign, tf.sign)\n\n def testInt64Basic(self):\n x = np.arange(\n -6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)\n self._compareCpu(x, np.abs, tf.abs)\n self._compareCpu(x, np.abs, _ABS)\n self._compareCpu(x, np.negative, tf.neg)\n self._compareCpu(x, np.negative, _NEG)\n self._compareCpu(x, np.square, tf.square)\n self._compareCpu(x, np.sign, tf.sign)\n\n def testComplex64Basic(self):\n x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(\n np.complex64)\n y = x + 0.5 # no zeros\n self._compareCpu(x, np.abs, tf.complex_abs)\n self._compareCpu(x, np.abs, _ABS)\n self._compareCpu(x, np.negative, tf.neg)\n self._compareCpu(x, np.negative, _NEG)\n self._compareCpu(y, self._inv, tf.inv)\n self._compareCpu(x, np.square, tf.square)\n self._compareCpu(x, np.sqrt, tf.sqrt)\n self._compareCpu(y, self._rsqrt, tf.rsqrt)\n self._compareCpu(x, np.exp, tf.exp)\n self._compareCpu(y, np.log, tf.log)\n self._compareCpu(x, np.tanh, tf.tanh)\n self._compareCpu(x, self._sigmoid, tf.sigmoid)\n self._compareCpu(x, np.sin, tf.sin)\n self._compareCpu(x, np.cos, tf.cos)\n\n # Numpy uses an incorrect definition of sign; use the right one instead.\n def complex_sign(x):\n return x / np.abs(x)\n self._compareCpu(y, complex_sign, tf.sign)\n\n\nclass BinaryOpTest(tf.test.TestCase):\n\n def _compareCpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=False):\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_cpu = out.eval()\n # Test that the op takes precedence over numpy operators.\n np_left = tf_func(x, iny).eval()\n np_right = tf_func(inx, y).eval()\n\n if np_ans.dtype != np.object:\n self.assertAllClose(np_ans, tf_cpu)\n self.assertAllClose(np_ans, np_left)\n self.assertAllClose(np_ans, np_right)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, x, y, np_func, tf_func,\n numeric_gradient_type=None):\n z = np_func(x, y)\n zs = list(z.shape)\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n if x.dtype in (np.float32, np.float64):\n out = 1.1 * tf_func(inx, iny)\n else:\n out = tf_func(inx, iny)\n xs = list(x.shape)\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n xs,\n out,\n zs,\n x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf_func(inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n xs,\n outf,\n zs,\n x_init_value=xf,\n delta=1e-3)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, x, y, np_func, tf_func,\n numeric_gradient_type=None):\n z = np_func(x, y)\n zs = list(z.shape)\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n if x.dtype in (np.float32, np.float64):\n out = 1.1 * tf_func(inx, iny)\n else:\n out = tf_func(inx, iny)\n ys = list(np.shape(y))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n ys,\n out,\n zs,\n x_init_value=y)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf_func(inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inyf,\n ys,\n outf,\n zs,\n x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=True):\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_gpu = out.eval()\n self.assertAllClose(np_ans, tf_gpu)\n self.assertShapeEqual(np_ans, out)\n # TODO(zhifengc/ke): make gradient checker work on GPU.\n\n def _compareBoth(self, x, y, np_func, tf_func):\n self._compareCpu(x, y, np_func, tf_func)\n if x.dtype in (np.float16, np.float32, np.float64):\n if tf_func not in (_FLOORDIV, tf.floordiv, tf.igamma, tf.igammac, tf.zeta, tf.polygamma):\n self._compareGradientX(x, y, np_func, tf_func)\n self._compareGradientY(x, y, np_func, tf_func)\n if tf_func in (tf.igamma, tf.igammac, tf.zeta, tf.polygamma):\n # These methods only support gradients in the second parameter\n self._compareGradientY(x, y, np_func, tf_func)\n self._compareGpu(x, y, np_func, tf_func)\n\n def testFloatBasic(self):\n x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)\n y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(x, y, np.add, tf.add)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)\n self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.add, _ADD)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)\n try:\n from scipy import special # pylint: disable=g-import-not-at-top\n a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)\n x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)\n self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)\n # Need x > 1\n self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)\n n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)\n except ImportError as e:\n tf.logging.warn(\"Cannot test special functions: %s\" % str(e))\n\n def testFloatDifferentShapes(self):\n x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)\n y = np.array([1, 2]).reshape(2, 1).astype(np.float32)\n with self.test_session() as sess:\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n s = tf.reduce_sum(inx * iny)\n gx, gy = sess.run(tf.gradients(s, [inx, iny]))\n # gx is simply the broadcasted y\n self.assertAllEqual(gx, np.array([1, 1, 2, 2])\n .reshape(2, 2).astype(np.float32))\n # gy is x's column summed up\n self.assertAllEqual(gy, np.array([3, 7]).\n reshape(2, 1).astype(np.float32))\n\n def testDoubleBasic(self):\n x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)\n y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)\n self._compareBoth(x, y, np.add, tf.add)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)\n self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.add, _ADD)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)\n try:\n from scipy import special # pylint: disable=g-import-not-at-top\n a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)\n x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)\n self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)\n self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)\n except ImportError as e:\n tf.logging.warn(\"Cannot test special functions: %s\" % str(e))\n\n def testInt8Basic(self):\n x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.multiply, _MUL)\n\n def testInt16Basic(self):\n x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.multiply, _MUL)\n\n def testInt32Basic(self):\n x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)\n self._compareBoth(x, y, np.add, tf.add)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.true_divide, tf.truediv)\n self._compareBoth(x, y, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.mod, tf.mod)\n self._compareBoth(x, y, np.add, _ADD)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y, np.floor_divide, _FLOORDIV)\n self._compareBoth(x, y, np.mod, _MOD)\n\n def testInt64Basic(self):\n x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)\n y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)\n self._compareBoth(x, y, np.subtract, tf.sub)\n self._compareBoth(x, y, np.multiply, tf.mul)\n self._compareBoth(x, y, np.true_divide, tf.truediv)\n self._compareBoth(x, y, np.floor_divide, tf.floordiv)\n self._compareBoth(x, y, np.mod, tf.mod)\n self._compareBoth(x, y, np.subtract, _SUB)\n self._compareBoth(x, y, np.multiply, _MUL)\n self._compareBoth(x, y, np.true_divide, _TRUEDIV)\n self._compareBoth(x, y, np.floor_divide, _FLOORDIV)\n self._compareBoth(x, y, np.mod, _MOD)\n\n def testComplex64Basic(self):\n x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(\n np.complex64)\n y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(\n np.complex64)\n self._compareCpu(x, y, np.add, tf.add)\n self._compareCpu(x, y, np.subtract, tf.sub)\n self._compareCpu(x, y, np.multiply, tf.mul)\n self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)\n self._compareCpu(x, y, np.add, _ADD)\n self._compareCpu(x, y, np.subtract, _SUB)\n self._compareCpu(x, y, np.multiply, _MUL)\n self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)\n\n def testStringComparison(self):\n x = np.array([[\"abc\", \"bh\"], [\"c\", \"\"]])\n y = np.array([[\"abc\", \"bh\"], [\"def\", \"hi\"]])\n with self.test_session(use_gpu=False) as sess:\n cmp_eq = tf.equal(x, y)\n cmp_not_eq = tf.not_equal(x, y)\n values = sess.run([cmp_eq, cmp_not_eq])\n self.assertAllEqual([[True, True], [False, False]], values[0])\n self.assertAllEqual([[False, False], [True, True]], values[1])\n\n def testString(self):\n x = np.array([[\"x_0_0\", \"x_0_1\", \"x_0_2\"],\n [\"x_1_0\", \"x_1_1\", \"x_1_2\"],\n [\"x_2_0\", \"x_2_1\", \"x_2_2\"]], dtype=np.object)\n y = np.array([[\"y_0_0\", \"y_0_1\", \"y_0_2\"],\n [\"y_1_0\", \"y_1_1\", \"y_1_2\"],\n [\"y_2_0\", \"y_2_1\", \"y_2_2\"]], dtype=np.object)\n z = np.array([[\"z_0\", \"z_1\", \"z_2\"]], dtype=np.object)\n w = np.array(\"w\", dtype=np.object)\n self._compareCpu(x, y, _ADD, _ADD)\n self._compareCpu(x, z, _ADD, _ADD)\n self._compareCpu(x, w, _ADD, _ADD)\n self._compareCpu(z, w, _ADD, _ADD)\n\n def _compareBCast(self, xs, ys, dtype, np_func, tf_func):\n x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)\n y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)\n self._compareCpu(x, y, np_func, tf_func)\n if x.dtype in (np.float16, np.float32, np.float64):\n if tf_func not in (_FLOORDIV, tf.floordiv):\n if x.dtype == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(x, y, np_func, tf_func, np.float)\n self._compareGradientY(x, y, np_func, tf_func, np.float)\n else:\n self._compareGradientX(x, y, np_func, tf_func)\n self._compareGradientY(x, y, np_func, tf_func)\n self._compareGpu(x, y, np_func, tf_func)\n\n # TODO(josh11b,vrv): Refactor this to use parameterized tests.\n def _testBCastByFunc(self, funcs, xs, ys):\n dtypes = [\n np.float16,\n np.float32,\n np.float64,\n np.int32,\n np.int64,\n np.complex64\n ]\n for dtype in dtypes:\n for (np_func, tf_func) in funcs:\n if dtype == np.complex64 and tf_func in (_FLOORDIV, tf.floordiv):\n continue # floordiv makes no sense for complex numbers\n self._compareBCast(xs, ys, dtype, np_func, tf_func)\n self._compareBCast(ys, xs, dtype, np_func, tf_func)\n\n def _testBCastA(self, xs, ys):\n funcs = [\n (np.add, tf.add),\n (np.add, _ADD),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def _testBCastB(self, xs, ys):\n funcs = [\n (np.subtract, tf.sub),\n (np.subtract, _SUB),\n (np.power, tf.pow),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def _testBCastC(self, xs, ys):\n funcs = [\n (np.multiply, tf.mul),\n (np.multiply, _MUL),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def _testBCastD(self, xs, ys):\n funcs = [\n (np.true_divide, tf.truediv),\n (np.floor_divide, tf.floordiv),\n (np.true_divide, _TRUEDIV),\n (np.floor_divide, _FLOORDIV),\n ]\n self._testBCastByFunc(funcs, xs, ys)\n\n def testBCast_0A(self):\n self._testBCastA([1, 3, 2], [1])\n\n def testBCast_0B(self):\n self._testBCastB([1, 3, 2], [1])\n\n def testBCast_0C(self):\n self._testBCastC([1, 3, 2], [1])\n\n def testBCast_0D(self):\n self._testBCastD([1, 3, 2], [1])\n\n def testBCast_1A(self):\n self._testBCastA([1, 3, 2], [2])\n\n def testBCast_1B(self):\n self._testBCastB([1, 3, 2], [2])\n\n def testBCast_1C(self):\n self._testBCastC([1, 3, 2], [2])\n\n def testBCast_1D(self):\n self._testBCastD([1, 3, 2], [2])\n\n def testBCast_2A(self):\n self._testBCastA([1, 3, 2], [3, 2])\n\n def testBCast_2B(self):\n self._testBCastB([1, 3, 2], [3, 2])\n\n def testBCast_2C(self):\n self._testBCastC([1, 3, 2], [3, 2])\n\n def testBCast_2D(self):\n self._testBCastD([1, 3, 2], [3, 2])\n\n def testBCast_3A(self):\n self._testBCastA([1, 3, 2], [3, 1])\n\n def testBCast_3B(self):\n self._testBCastB([1, 3, 2], [3, 1])\n\n def testBCast_3C(self):\n self._testBCastC([1, 3, 2], [3, 1])\n\n def testBCast_3D(self):\n self._testBCastD([1, 3, 2], [3, 1])\n\n def testBCast_4A(self):\n self._testBCastA([1, 3, 2], [1, 3, 2])\n\n def testBCast_4B(self):\n self._testBCastB([1, 3, 2], [1, 3, 2])\n\n def testBCast_4C(self):\n self._testBCastC([1, 3, 2], [1, 3, 2])\n\n def testBCast_4D(self):\n self._testBCastD([1, 3, 2], [1, 3, 2])\n\n def testBCast_5A(self):\n self._testBCastA([1, 3, 2], [2, 3, 1])\n\n def testBCast_5B(self):\n self._testBCastB([1, 3, 2], [2, 3, 1])\n\n def testBCast_5C(self):\n self._testBCastC([1, 3, 2], [2, 3, 1])\n\n def testBCast_5D(self):\n self._testBCastD([1, 3, 2], [2, 3, 1])\n\n def testBCast_6A(self):\n self._testBCastA([1, 3, 2], [2, 1, 1])\n\n def testBCast_6B(self):\n self._testBCastB([1, 3, 2], [2, 1, 1])\n\n def testBCast_6C(self):\n self._testBCastC([1, 3, 2], [2, 1, 1])\n\n def testBCast_6D(self):\n self._testBCastD([1, 3, 2], [2, 1, 1])\n\n def testBCast_7A(self):\n self._testBCastA([1, 3, 2], [1, 3, 1])\n\n def testBCast_7B(self):\n self._testBCastB([1, 3, 2], [1, 3, 1])\n\n def testBCast_7C(self):\n self._testBCastC([1, 3, 2], [1, 3, 1])\n\n def testBCast_7D(self):\n self._testBCastD([1, 3, 2], [1, 3, 1])\n\n def testBCast_8A(self):\n self._testBCastA([2, 1, 5], [2, 3, 1])\n\n def testBCast_8B(self):\n self._testBCastB([2, 1, 5], [2, 3, 1])\n\n def testBCast_8C(self):\n self._testBCastC([2, 1, 5], [2, 3, 1])\n\n def testBCast_8D(self):\n self._testBCastD([2, 1, 5], [2, 3, 1])\n\n def testBCast_9A(self):\n self._testBCastA([2, 0, 5], [2, 0, 1])\n\n def testBCast_9B(self):\n self._testBCastB([2, 0, 5], [2, 0, 1])\n\n def testBCast_9C(self):\n self._testBCastC([2, 0, 5], [2, 0, 1])\n\n def testBCast_9D(self):\n self._testBCastD([2, 0, 5], [2, 0, 1])\n\n def testBCast_10A(self):\n self._testBCastA([2, 3, 0], [2, 3, 1])\n\n def testBCast_10B(self):\n self._testBCastB([2, 3, 0], [2, 3, 1])\n\n def testBCast_10C(self):\n self._testBCastC([2, 3, 0], [2, 3, 1])\n\n def testBCast_10D(self):\n self._testBCastD([2, 3, 0], [2, 3, 1])\n\n def testBCast_11A(self):\n self._testBCastA([1, 3, 2], [1, 3, 2])\n\n def testBCast_11B(self):\n self._testBCastB([1, 3, 2], [1, 3, 2])\n\n def testBCast_11C(self):\n self._testBCastC([1, 3, 2], [1, 3, 2])\n\n def testBCast_11D(self):\n self._testBCastD([1, 3, 2], [1, 3, 2])\n\n def testBCast_12A(self):\n self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_12B(self):\n self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_12C(self):\n self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_12D(self):\n self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])\n\n def testBCast_13A(self):\n self._testBCastA([1, 3, 2, 1, 1], [1])\n\n def testBCast_13B(self):\n self._testBCastB([1, 3, 2, 1, 1], [1])\n\n def testBCast_13C(self):\n self._testBCastC([1, 3, 2, 1, 1], [1])\n\n def testBCast_13D(self):\n self._testBCastD([1, 3, 2, 1, 1], [1])\n\n def testBCast_14A(self):\n self._testBCastA([2, 3, 1, 1, 5], [1])\n\n def testBCast_14B(self):\n self._testBCastB([2, 3, 1, 1, 5], [1])\n\n def testBCast_14C(self):\n self._testBCastC([2, 3, 1, 1, 5], [1])\n\n def testBCast_14D(self):\n self._testBCastD([2, 3, 1, 1, 5], [1])\n\n def testBCast_15A(self):\n self._testBCastA([10, 3, 1, 2], [3, 1, 2])\n\n def testBCast_15B(self):\n self._testBCastB([10, 3, 1, 2], [3, 1, 2])\n\n def testBCast_15C(self):\n self._testBCastC([10, 3, 1, 2], [3, 1, 2])\n\n def testBCast_15D(self):\n self._testBCastD([10, 3, 1, 2], [3, 1, 2])\n\n def testMismatchedDimensions(self):\n for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,\n _FLOORDIV]:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Incompatible shapes\" in str(e)):\n func(tf.convert_to_tensor([10.0, 20.0, 30.0]),\n tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))\n\n\nclass ComparisonOpTest(tf.test.TestCase):\n\n def _compare(self, func, x, y, dtype):\n with self.test_session(use_gpu=False):\n out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),\n tf.convert_to_tensor(np.array([y]).astype(dtype)))\n ret = out.eval()\n return ret[0]\n\n def testScalarCompareScalar(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n data = [-1, 0, 1]\n for t in dtypes:\n for x in data:\n for y in data:\n self.assertEqual(self._compare(tf.less, x, y, t),\n x < y)\n self.assertEqual(self._compare(tf.less_equal, x, y, t),\n x <= y)\n self.assertEqual(self._compare(tf.greater, x, y, t),\n x > y)\n self.assertEqual(self._compare(tf.greater_equal, x, y, t),\n x >= y)\n self.assertEqual(self._compare(tf.equal, x, y, t),\n x == y)\n self.assertEqual(self._compare(tf.not_equal, x, y, t),\n x != y)\n\n def _compareCpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=False):\n out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))\n tf_cpu = out.eval()\n self.assertAllEqual(np_ans, tf_cpu)\n\n def _compareGpu(self, x, y, np_func, tf_func):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=True):\n out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))\n tf_gpu = out.eval()\n self.assertAllEqual(np_ans, tf_gpu)\n\n def _compareBoth(self, x, y, np_func, tf_func):\n self._compareCpu(x, y, np_func, tf_func)\n if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:\n self._compareGpu(x, y, np_func, tf_func)\n\n def testTensorCompareTensor(self):\n x = np.linspace(-15, 15, 6).reshape(1, 3, 2)\n y = np.linspace(20, -10, 6).reshape(1, 3, 2)\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compareBoth(xt, yt, np.less, tf.less)\n self._compareBoth(xt, yt, np.less_equal, tf.less_equal)\n self._compareBoth(xt, yt, np.greater, tf.greater)\n self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)\n self._compareBoth(xt, yt, np.equal, tf.equal)\n self._compareBoth(xt, yt, np.not_equal, tf.not_equal)\n # TODO(zhifengc): complex64 doesn't work on GPU yet.\n self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),\n np.equal, tf.equal)\n self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),\n np.not_equal, tf.not_equal)\n\n def _compareBCast(self, xs, ys, dtype, np_func, tf_func):\n x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)\n y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)\n self._compareCpu(x, y, np_func, tf_func)\n self._compareCpu(y, x, np_func, tf_func)\n if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:\n self._compareGpu(x, y, np_func, tf_func)\n self._compareGpu(y, x, np_func, tf_func)\n\n def _testBCastByFunc(self, np_func, tf_func):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n dtypes = [\n np.float16,\n np.float32,\n np.float64,\n np.int32,\n np.int64,\n ]\n for (xs, ys) in shapes:\n for dtype in dtypes:\n self._compareBCast(xs, ys, dtype, np_func, tf_func)\n\n def testBCastLess(self):\n self._testBCastByFunc(np.less, tf.less)\n\n def testBCastLessEqual(self):\n self._testBCastByFunc(np.less_equal, tf.less_equal)\n\n def testBCastGreater(self):\n self._testBCastByFunc(np.greater, tf.greater)\n\n def testBCastGreaterEqual(self):\n self._testBCastByFunc(np.greater_equal, tf.greater_equal)\n\n def testBCastEqual(self):\n self._testBCastByFunc(np.equal, tf.equal)\n\n def testBCastNotEqual(self):\n self._testBCastByFunc(np.not_equal, tf.not_equal)\n\n def testShapeMismatch(self):\n dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]\n funcs = [tf.less, tf.less_equal, tf.greater,\n tf.greater_equal, tf.equal, tf.not_equal]\n x = np.arange(0, 10).reshape([2, 5])\n y = np.arange(0, 10).reshape([5, 2])\n for t in dtypes:\n for f in funcs:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Incompatible shapes\" in str(e)):\n f(x.astype(t), y.astype(t))\n\n\nclass LogicalOpTest(tf.test.TestCase):\n\n def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):\n np_ans = np_func(x, y)\n with self.test_session(use_gpu=use_gpu):\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf_func(inx, iny)\n tf_val = out.eval()\n self.assertEqual(out.dtype, tf.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def _not(self, x, use_gpu=False):\n np_ans = np.logical_not(x)\n with self.test_session(use_gpu=use_gpu):\n out = tf.logical_not(tf.convert_to_tensor(x))\n tf_val = out.eval()\n self.assertEqual(out.dtype, tf.bool)\n self.assertAllEqual(np_ans, tf_val)\n self.assertShapeEqual(np_ans, out)\n\n def testScalar(self):\n data = [np.array([True]), np.array([False])]\n for use_gpu in [True, False]:\n for x in data:\n self._not(x, use_gpu)\n for x in data:\n for y in data:\n self._compareBinary(\n x, y, np.logical_and, tf.logical_and, use_gpu)\n self._compareBinary(\n x, y, np.logical_or, tf.logical_or, use_gpu)\n self._compareBinary(\n x, y, np.logical_xor, tf.logical_xor, use_gpu)\n\n def testTensor(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n for use_gpu in [True, False]:\n self._not(x, use_gpu)\n self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)\n\n def testBCast(self):\n shapes = [\n ([1, 3, 2], [1]),\n ([1, 3, 2], [2]),\n ([1, 3, 2], [3, 2]),\n ([1, 3, 2], [3, 1]),\n ([1, 3, 2], [1, 3, 2]),\n ([1, 3, 2], [2, 3, 1]),\n ([1, 3, 2], [2, 1, 1]),\n ([1, 3, 2], [1, 3, 1]),\n ([2, 1, 5], [2, 3, 1]),\n ([2, 0, 5], [2, 0, 1]),\n ([2, 3, 0], [2, 3, 1]),\n ]\n for (xs, ys) in shapes:\n x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)\n y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)\n for use_gpu in [True, False]:\n self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)\n self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)\n self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)\n\n def testShapeMismatch(self):\n x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)\n for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Incompatible shapes\" in str(e)):\n f(x, y)\n\n def testUsingAsPythonValueFails(self):\n # Ensure that we raise an error when the user attempts to treat a\n # `Tensor` as a Python `bool`.\n b = tf.constant(False)\n with self.assertRaises(TypeError):\n if b:\n pass\n\n x = tf.constant(3)\n y = tf.constant(4)\n with self.assertRaises(TypeError):\n if x > y:\n pass\n\n z = tf.constant(7)\n\n # The chained comparison should fail because Python computes `x <\n # y` and short-circuits the comparison with `z` if it is `False`.\n with self.assertRaises(TypeError):\n _ = x < y < z\n\n\nclass SelectOpTest(tf.test.TestCase):\n\n def _compare(self, c, x, y, use_gpu):\n np_ans = np.where(c, x, y)\n with self.test_session(use_gpu=use_gpu):\n out = tf.select(c, x, y)\n tf_ans = out.eval()\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(c))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n out,\n s,\n x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n s,\n outf,\n s,\n x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(c))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n s,\n out,\n s,\n x_init_value=y,\n delta=1.0)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inyf,\n s,\n outf,\n s,\n x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testBasic(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(c, xt, yt, use_gpu=True)\n\n def testGradients(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(1, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(c, xt, yt, np.float)\n self._compareGradientY(c, xt, yt, np.float)\n else:\n self._compareGradientX(c, xt, yt)\n self._compareGradientY(c, xt, yt)\n\n def testShapeMismatch(self):\n c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)\n x = np.random.rand(1, 3, 2) * 100\n y = np.random.rand(2, 5, 3) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n tf.select(c, xt, yt)\n\n def testEmptyTensor(self):\n c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)\n x = np.random.rand(1, 3, 0) * 100\n y = np.random.rand(1, 3, 0) * 100\n z_expected = np.zeros((1, 3, 0), dtype=np.float32)\n with self.test_session():\n xt = x.astype(np.float32)\n yt = y.astype(np.float32)\n z = tf.select(c, xt, yt).eval()\n self.assertAllEqual(z_expected, z)\n\n\nclass BatchSelectOpTest(tf.test.TestCase):\n \"\"\"Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+.\"\"\"\n\n def _compare(self, c, x, y, use_gpu):\n np_ans = np.dstack(\n [x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(\n [2, 0, 1])\n with self.test_session(use_gpu=use_gpu):\n out = tf.select(c, x, y)\n tf_ans = out.eval()\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, out)\n\n def _compareGradientX(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n out,\n s,\n x_init_value=x)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inxf,\n s,\n outf,\n s,\n x_init_value=xf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, c, x, y, numeric_gradient_type=None):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = tf.select(c, inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n s,\n out,\n s,\n x_init_value=y)\n if numeric_gradient_type is not None:\n xf = x.astype(numeric_gradient_type)\n yf = y.astype(numeric_gradient_type)\n inxf = tf.convert_to_tensor(xf)\n inyf = tf.convert_to_tensor(yf)\n outf = tf.select(c, inxf, inyf)\n _, jacob_n = tf.test.compute_gradient(inyf,\n s,\n outf,\n s,\n x_init_value=yf)\n jacob_n = jacob_n.astype(x.dtype)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testBasic(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n self._compare(c, xt, yt, use_gpu=False)\n if t in [np.float16, np.float32, np.float64]:\n self._compare(c, xt, yt, use_gpu=True)\n\n def testGradients(self):\n c = np.random.randint(0, 2, 16).astype(np.bool)\n x = np.random.rand(16, 2, 8) * 100\n y = np.random.rand(16, 2, 8) * 100\n for t in [np.float16, np.float32, np.float64]:\n xt = x.astype(t)\n yt = y.astype(t)\n if t == np.float16:\n # Compare fp16 theoretical gradients to fp32 numerical gradients,\n # since fp16 numerical gradients are too imprecise unless great\n # care is taken with choosing the inputs and the delta. This is\n # a weaker check (in particular, it does not test the op itself,\n # only its gradient), but it's much better than nothing.\n self._compareGradientX(c, xt, yt, np.float)\n self._compareGradientY(c, xt, yt, np.float)\n else:\n self._compareGradientX(c, xt, yt)\n self._compareGradientY(c, xt, yt)\n\n def testShapeMismatch(self):\n c = np.random.randint(0, 2, 8).astype(np.bool)\n x = np.random.rand(16, 3, 2) * 100\n y = np.random.rand(16, 3, 2) * 100\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64,\n np.complex64]:\n xt = x.astype(t)\n yt = y.astype(t)\n with self.assertRaises(ValueError):\n tf.select(c, xt, yt)\n\n\nclass MinMaxOpTest(tf.test.TestCase):\n\n def _compare(self, x, y, use_gpu):\n np_min, np_max = np.minimum(x, y), np.maximum(x, y)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)\n tf_min, tf_max = sess.run([omin, omax])\n self.assertAllEqual(np_min, tf_min)\n self.assertAllEqual(np_max, tf_max)\n\n def testBasic(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(1, 3, 2) * 100.\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testDifferentShapes(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.random.rand(2) * 100. # should broadcast\n for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:\n self._compare(x.astype(t), y.astype(t), use_gpu=False)\n self._compare(x.astype(t), y.astype(t), use_gpu=True)\n\n def testScalar(self):\n x = np.random.rand(1, 3, 2) * 100.\n y = np.asscalar(np.random.rand(1) * 100.) # should broadcast\n # dropped np.float64, int64 because TF automatically converts to 32 bit\n for t in [np.float32, np.int32]:\n self._compare(x.astype(t), t(y), use_gpu=False)\n self._compare(x.astype(t), t(y), use_gpu=True)\n\n def _compareGradientX(self, func, x, y):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n s,\n out,\n s,\n x_init_value=x)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _compareGradientY(self, func, x, y):\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n iny = tf.convert_to_tensor(y)\n out = func(inx, iny)\n s = list(np.shape(x))\n jacob_t, jacob_n = tf.test.compute_gradient(iny,\n s,\n out,\n s,\n x_init_value=y)\n if x.dtype == np.float16:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float32:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)\n elif x.dtype == np.float64:\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def testGradients(self):\n x = np.random.rand(1, 3, 2) * 100.\n # ensure x != y\n y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1\n self._compareGradientX(tf.maximum, x, y)\n self._compareGradientY(tf.maximum, x, y)\n self._compareGradientX(tf.minimum, x, y)\n self._compareGradientY(tf.minimum, x, y)\n\n\nclass MathOpsOverloadTest(tf.test.TestCase):\n\n def _computeTensorAndLiteral(self, x, y, dtype, func):\n with self.test_session(use_gpu=False):\n inx = tf.convert_to_tensor(x, dtype=dtype)\n z = func(inx, y) # Should use __add__, __sub__, etc.\n return z.eval()\n\n def _computeLiteralAndTensor(self, x, y, dtype, func):\n with self.test_session(use_gpu=False):\n iny = tf.convert_to_tensor(y, dtype=dtype)\n z = func(x, iny) # Should use __radd__, __rsub__, etc.\n return z.eval()\n\n def _compareBinary(self, x, y, dtype, np_func, tf_func):\n np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)\n self.assertAllClose(np_ans, self._computeTensorAndLiteral(\n x, y, dtype, tf_func))\n self.assertAllClose(np_ans, self._computeLiteralAndTensor(\n x, y, dtype, tf_func))\n\n def _compareUnary(self, x, dtype, np_func, tf_func):\n np_ans = np_func(x).astype(dtype.as_numpy_dtype)\n with self.test_session(use_gpu=False):\n self.assertAllClose(np_ans, tf_func(tf.convert_to_tensor(x, dtype=dtype)).eval())\n\n def testOverload(self):\n dtypes = [\n tf.float16,\n tf.float32,\n tf.float64,\n tf.int32,\n tf.int64,\n tf.complex64,\n ]\n funcs = [\n (np.add, _ADD),\n (np.subtract, _SUB),\n (np.multiply, _MUL),\n (np.power, _POW),\n (np.true_divide, _TRUEDIV),\n (np.floor_divide, _FLOORDIV),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n if dtype == tf.complex64 and tf_func == _FLOORDIV:\n continue # floordiv makes no sense for complex\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n # Mod only works for int32 and int64.\n for dtype in [tf.int32, tf.int64]:\n self._compareBinary(10, 3, dtype, np.mod, _MOD)\n\n def testOverloadComparisons(self):\n dtypes = [\n tf.float16,\n tf.float32,\n tf.float64,\n tf.int32,\n tf.int64,\n ]\n funcs = [\n (np.less, _LT),\n (np.less_equal, _LE),\n (np.greater, _GT),\n (np.greater_equal, _GE),\n ]\n for dtype in dtypes:\n for np_func, tf_func in funcs:\n self._compareBinary(10, 5, dtype, np_func, tf_func)\n logical_funcs = [\n (np.logical_and, _AND),\n (np.logical_or, _OR),\n (np.logical_xor, _XOR),\n (np.equal, tf.equal),\n (np.not_equal, tf.not_equal)\n ]\n for np_func, tf_func in logical_funcs:\n self._compareBinary(True, False, tf.bool, np_func, tf_func)\n self._compareBinary(True, True, tf.bool, np_func, tf_func)\n self._compareBinary(False, False, tf.bool, np_func, tf_func)\n self._compareBinary(False, True, tf.bool, np_func, tf_func)\n self._compareBinary([True, True, False, False],\n [True, False, True, False],\n tf.bool, np_func, tf_func)\n self._compareUnary(True, tf.bool, np.logical_not, _INV)\n self._compareUnary(False, tf.bool, np.logical_not, _INV)\n self._compareUnary([True, False], tf.bool, np.logical_not, _INV)\n\n\nclass IsFiniteInfNanTest(tf.test.TestCase):\n\n def _compare(self, x, use_gpu):\n np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(x)\n ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(\n inx), tf.is_nan(inx)\n tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])\n self.assertAllEqual(np_inf, tf_inf)\n self.assertAllEqual(np_nan, tf_nan)\n self.assertAllEqual(np_finite, tf_finite)\n self.assertShapeEqual(np_inf, oinf)\n self.assertShapeEqual(np_nan, onan)\n self.assertShapeEqual(np_finite, ofinite)\n\n def _testDtype(self, dtype):\n fi = np.finfo(dtype)\n data = np.array([0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max,\n -np.inf, np.inf, np.nan]).astype(dtype)\n self._compare(data, use_gpu=False)\n self._compare(data, use_gpu=True)\n\n def testHalf(self):\n self._testDtype(np.float16)\n\n def testFloat(self):\n self._testDtype(np.float32)\n\n def testDouble(self):\n self._testDtype(np.float64)\n\n\nclass RoundingTest(tf.test.TestCase):\n\n def _compare(self, x, use_gpu):\n np_floor, np_ceil = np.floor(x), np.ceil(x)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(x)\n ofloor, oceil = tf.floor(inx), tf.ceil(inx)\n tf_floor, tf_ceil = sess.run([ofloor, oceil])\n self.assertAllEqual(np_floor, tf_floor)\n self.assertAllEqual(np_ceil, tf_ceil)\n self.assertShapeEqual(np_floor, ofloor)\n self.assertShapeEqual(np_ceil, oceil)\n\n def _testDtype(self, dtype):\n data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)\n self._compare(data, use_gpu=True)\n self._compare(data, use_gpu=True)\n\n def testTypes(self):\n for dtype in [np.float16, np.float32, np.float64]:\n self._testDtype(dtype)\n\n\nclass ComplexMakeRealImagTest(tf.test.TestCase):\n\n def _compareMake(self, real, imag, use_gpu):\n np_ans = real + (1j) * imag\n with self.test_session(use_gpu=use_gpu):\n real = tf.convert_to_tensor(real)\n imag = tf.convert_to_tensor(imag)\n tf_ans = tf.complex(real, imag)\n out = tf_ans.eval()\n self.assertAllEqual(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def testMake(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n for use_gpu in [False, True]:\n self._compareMake(real, imag, use_gpu)\n self._compareMake(real, 12.0, use_gpu)\n self._compareMake(23.0, imag, use_gpu)\n\n def _compareRealImag(self, cplx, use_gpu):\n np_real, np_imag = np.real(cplx), np.imag(cplx)\n with self.test_session(use_gpu=use_gpu) as sess:\n inx = tf.convert_to_tensor(cplx)\n tf_real = tf.real(inx)\n tf_imag = tf.imag(inx)\n tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])\n self.assertAllEqual(np_real, tf_real_val)\n self.assertAllEqual(np_imag, tf_imag_val)\n self.assertShapeEqual(np_real, tf_real)\n self.assertShapeEqual(np_imag, tf_imag)\n\n def testRealImag(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + (1j) * imag\n self._compareRealImag(cplx, use_gpu=False)\n self._compareRealImag(cplx, use_gpu=True)\n\n def _compareConj(self, cplx, use_gpu):\n np_ans = np.conj(cplx)\n with self.test_session(use_gpu=use_gpu):\n inx = tf.convert_to_tensor(cplx)\n tf_conj = tf.conj(inx)\n tf_ans = tf_conj.eval()\n self.assertAllEqual(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, tf_conj)\n\n def testConj(self):\n real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)\n imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)\n cplx = real + (1j) * imag\n self._compareConj(cplx, use_gpu=False)\n self._compareConj(cplx, use_gpu=True)\n\n def _compareGradient(self, x):\n # x[:, 0] is real, x[:, 1] is imag. We combine real and imag into\n # complex numbers. Then, we extract real and imag parts and\n # computes the squared sum. This is obviously the same as sum(real\n # * real) + sum(imag * imag). We just want to make sure the\n # gradient function is checked.\n with self.test_session():\n inx = tf.convert_to_tensor(x)\n real, imag = tf.split(1, 2, inx)\n real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])\n cplx = tf.complex(real, imag)\n cplx = tf.conj(cplx)\n loss = tf.reduce_sum(\n tf.square(tf.real(cplx))) + tf.reduce_sum(\n tf.square(tf.imag(cplx)))\n epsilon = 1e-3\n jacob_t, jacob_n = tf.test.compute_gradient(inx,\n list(x.shape),\n loss,\n [1],\n x_init_value=x,\n delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n def testGradient(self):\n data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)\n self._compareGradient(data)\n\n def _compareMulGradient(self, data):\n # data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],\n # data[:, 2], data[:, 3] are real parts of x, imaginary parts of\n # x, real parts of y and imaginary parts of y.\n with self.test_session():\n inp = tf.convert_to_tensor(data)\n xr, xi, yr, yi = tf.split(1, 4, inp)\n\n def vec(x): # Reshape to a vector\n return tf.reshape(x, [-1])\n xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)\n\n def cplx(r, i): # Combine to a complex vector\n return tf.complex(r, i)\n x, y = cplx(xr, xi), cplx(yr, yi)\n # z is x times y in complex plane.\n z = x * y\n # Defines the loss function as the sum of all coefficients of z.\n loss = tf.reduce_sum(tf.real(z) + tf.imag(z))\n epsilon = 0.005\n jacob_t, jacob_n = tf.test.compute_gradient(inp,\n list(data.shape),\n loss,\n [1],\n x_init_value=data,\n delta=epsilon)\n self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)\n\n def testMulGradient(self):\n data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)\n self._compareMulGradient(data)\n\n\nclass AccumulateTest(tf.test.TestCase):\n\n def testSimple(self):\n with self.test_session():\n random_arrays = [np.random.rand(16, 16, 16, 16).astype(np.float32)\n for _ in range(20)]\n random_tensors = [tf.convert_to_tensor(x, dtype=tf.float32)\n for x in random_arrays]\n tf_val = tf.accumulate_n(random_tensors)\n np_val = random_arrays[0]\n for random_array in random_arrays[1:]:\n np_val += random_array\n self.assertAllClose(np_val, tf_val.eval())\n\n def testZeroArgs(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n tf_val = tf.accumulate_n([])\n tf_val.eval()\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.real", "numpy.vectorize", "tensorflow.complex", "tensorflow.reshape", "numpy.ones_like", "tensorflow.is_nan", "tensorflow.convert_to_tensor", "tensorflow.reduce_sum", "tensorflow.split", "numpy.isfinite", "tensorflow.minimum", "tensorflow.maximum", "numpy.abs", "tensorflow.is_finite", "numpy.logical_not", "numpy.random.rand", "numpy.isnan", "tensorflow.conj", "numpy.where", "tensorflow.constant", "numpy.linspace", "tensorflow.test.main", "numpy.minimum", "tensorflow.is_inf", "numpy.ceil", "numpy.zeros", "tensorflow.imag", "numpy.complex", "tensorflow.accumulate_n", "numpy.arange", "tensorflow.gradients", "tensorflow.not_equal", "numpy.prod", "numpy.maximum", "tensorflow.floor", "numpy.finfo", "numpy.array", "tensorflow.equal", "numpy.empty", "tensorflow.ceil", "numpy.conj", "tensorflow.test.compute_gradient", "numpy.isinf", "numpy.floor", "numpy.exp", "numpy.shape", "numpy.sqrt", "tensorflow.select", "numpy.random.randint", "numpy.real", "numpy.imag" ] ]
carboncoo/UNITER
[ "dfe007c2cea55430a847fd1cf318e88ae8ffe88f" ]
[ "data/data.py" ]
[ "\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\nDataset interfaces\n\"\"\"\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nimport io\nimport json\nfrom os.path import exists\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, ConcatDataset\nimport horovod.torch as hvd\nfrom tqdm import tqdm\nimport lmdb\nfrom lz4.frame import compress, decompress\n\nimport msgpack\nimport msgpack_numpy\nmsgpack_numpy.patch()\n\n\ndef _fp16_to_fp32(feat_dict):\n out = {k: arr.astype(np.float32)\n if arr.dtype == np.float16 else arr\n for k, arr in feat_dict.items()}\n return out\n\n\ndef compute_num_bb(confs, conf_th, min_bb, max_bb):\n num_bb = max(min_bb, (confs > conf_th).sum())\n num_bb = min(max_bb, num_bb)\n return num_bb\n\n\ndef _check_distributed():\n try:\n dist = hvd.size() != hvd.local_size()\n except ValueError:\n # not using horovod\n dist = False\n return dist\n\n\nclass DetectFeatLmdb(object):\n def __init__(self, img_dir, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36,\n compress=True):\n self.img_dir = img_dir\n if conf_th == -1:\n db_name = f'feat_numbb{num_bb}'\n self.name2nbb = defaultdict(lambda: num_bb)\n else:\n db_name = f'feat_th{conf_th}_max{max_bb}_min{min_bb}'\n nbb = f'nbb_th{conf_th}_max{max_bb}_min{min_bb}.json'\n if not exists(f'{img_dir}/{nbb}'):\n # nbb is not pre-computed\n self.name2nbb = None\n else:\n self.name2nbb = json.load(open(f'{img_dir}/{nbb}'))\n self.compress = compress\n if compress:\n db_name += '_compressed'\n\n if self.name2nbb is None:\n if compress:\n db_name = 'all_compressed'\n else:\n db_name = 'all'\n # only read ahead on single node training\n self.env = lmdb.open(f'{img_dir}/{db_name}',\n readonly=True, create=False,\n readahead=not _check_distributed())\n self.txn = self.env.begin(buffers=True)\n if self.name2nbb is None:\n self.name2nbb = self._compute_nbb()\n\n def _compute_nbb(self):\n name2nbb = {}\n fnames = json.loads(self.txn.get(key=b'__keys__').decode('utf-8'))\n for fname in tqdm(fnames, desc='reading images'):\n dump = self.txn.get(fname.encode('utf-8'))\n if self.compress:\n with io.BytesIO(dump) as reader:\n img_dump = np.load(reader, allow_pickle=True)\n confs = img_dump['conf']\n else:\n img_dump = msgpack.loads(dump, raw=False)\n confs = img_dump['conf']\n name2nbb[fname] = compute_num_bb(confs, self.conf_th,\n self.min_bb, self.max_bb)\n\n return name2nbb\n\n def __del__(self):\n self.env.close()\n\n def get_dump(self, file_name):\n # hack for MRC\n dump = self.txn.get(file_name.encode('utf-8'))\n nbb = self.name2nbb[file_name]\n if self.compress:\n with io.BytesIO(dump) as reader:\n img_dump = np.load(reader, allow_pickle=True)\n img_dump = _fp16_to_fp32(img_dump)\n else:\n img_dump = msgpack.loads(dump, raw=False)\n img_dump = _fp16_to_fp32(img_dump)\n img_dump = {k: arr[:nbb, ...] for k, arr in img_dump.items()}\n return img_dump\n\n def __getitem__(self, file_name):\n dump = self.txn.get(file_name.encode('utf-8'))\n nbb = self.name2nbb[file_name]\n if self.compress:\n with io.BytesIO(dump) as reader:\n img_dump = np.load(reader, allow_pickle=True)\n img_dump = {'features': img_dump['features'],\n 'norm_bb': img_dump['norm_bb']}\n else:\n img_dump = msgpack.loads(dump, raw=False)\n img_feat = torch.tensor(img_dump['features'][:nbb, :]).float()\n img_bb = torch.tensor(img_dump['norm_bb'][:nbb, :]).float()\n return img_feat, img_bb\n\n\n@contextmanager\ndef open_lmdb(db_dir, readonly=False):\n db = TxtLmdb(db_dir, readonly)\n try:\n yield db\n finally:\n del db\n\n\nclass TxtLmdb(object):\n def __init__(self, db_dir, readonly=True):\n self.readonly = readonly\n if readonly:\n # training\n self.env = lmdb.open(db_dir,\n readonly=True, create=False,\n readahead=not _check_distributed())\n self.txn = self.env.begin(buffers=True)\n self.write_cnt = None\n else:\n # prepro\n self.env = lmdb.open(db_dir, readonly=False, create=True,\n map_size=4 * 1024**4)\n self.txn = self.env.begin(write=True)\n self.write_cnt = 0\n\n def __del__(self):\n if self.write_cnt:\n self.txn.commit()\n self.env.close()\n\n def __getitem__(self, key):\n return msgpack.loads(decompress(self.txn.get(key.encode('utf-8'))),\n raw=False)\n\n def __setitem__(self, key, value):\n # NOTE: not thread safe\n if self.readonly:\n raise ValueError('readonly text DB')\n ret = self.txn.put(key.encode('utf-8'),\n compress(msgpack.dumps(value, use_bin_type=True)))\n self.write_cnt += 1\n if self.write_cnt % 1000 == 0:\n self.txn.commit()\n self.txn = self.env.begin(write=True)\n self.write_cnt = 0\n return ret\n\n\nclass TxtTokLmdb(object):\n def __init__(self, db_dir, max_txt_len=60):\n if max_txt_len == -1:\n self.id2len = json.load(open(f'{db_dir}/id2len.json'))\n else:\n self.id2len = {\n id_: len_\n for id_, len_ in json.load(open(f'{db_dir}/id2len.json')\n ).items()\n if len_ <= max_txt_len\n }\n self.db_dir = db_dir\n self.db = TxtLmdb(db_dir, readonly=True)\n meta = json.load(open(f'{db_dir}/meta.json', 'r'))\n self.cls_ = meta['CLS']\n self.sep = meta['SEP']\n self.mask = meta['MASK']\n self.v_range = meta['v_range']\n\n def __getitem__(self, id_):\n txt_dump = self.db[id_]\n return txt_dump\n\n def combine_inputs(self, *inputs):\n input_ids = [self.cls_]\n for ids in inputs:\n input_ids.extend(ids + [self.sep])\n return torch.tensor(input_ids)\n\n @property\n def txt2img(self):\n txt2img = json.load(open(f'{self.db_dir}/txt2img.json'))\n return txt2img\n\n @property\n def img2txts(self):\n img2txts = json.load(open(f'{self.db_dir}/img2txts.json'))\n return img2txts\n\n\ndef get_ids_and_lens(db):\n assert isinstance(db, TxtTokLmdb)\n lens = []\n ids = []\n for id_ in list(db.id2len.keys())[hvd.rank()::hvd.size()]:\n lens.append(db.id2len[id_])\n ids.append(id_)\n return lens, ids\n\n\nclass DetectFeatTxtTokDataset(Dataset):\n def __init__(self, txt_db, img_db):\n assert isinstance(txt_db, TxtTokLmdb)\n assert isinstance(img_db, DetectFeatLmdb)\n self.txt_db = txt_db\n self.img_db = img_db\n txt_lens, self.ids = get_ids_and_lens(txt_db)\n\n txt2img = txt_db.txt2img\n self.lens = [tl + self.img_db.name2nbb[txt2img[id_]]\n for tl, id_ in zip(txt_lens, self.ids)]\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, i):\n id_ = self.ids[i]\n example = self.txt_db[id_]\n return example\n\n def _get_img_feat(self, fname):\n img_feat, bb = self.img_db[fname]\n img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)\n num_bb = img_feat.size(0)\n return img_feat, img_bb, num_bb\n\n\ndef pad_tensors(tensors, lens=None, pad=0):\n \"\"\"B x [T, ...]\"\"\"\n if lens is None:\n lens = [t.size(0) for t in tensors]\n max_len = max(lens)\n bs = len(tensors)\n hid = tensors[0].size(-1)\n dtype = tensors[0].dtype\n output = torch.zeros(bs, max_len, hid, dtype=dtype)\n if pad:\n output.data.fill_(pad)\n for i, (t, l) in enumerate(zip(tensors, lens)):\n output.data[i, :l, ...] = t.data\n return output\n\n\ndef get_gather_index(txt_lens, num_bbs, batch_size, max_len, out_size):\n assert len(txt_lens) == len(num_bbs) == batch_size\n gather_index = torch.arange(0, out_size, dtype=torch.long,\n ).unsqueeze(0).repeat(batch_size, 1)\n\n for i, (tl, nbb) in enumerate(zip(txt_lens, num_bbs)):\n gather_index.data[i, tl:tl+nbb] = torch.arange(max_len, max_len+nbb,\n dtype=torch.long).data\n return gather_index\n\n\nclass ConcatDatasetWithLens(ConcatDataset):\n \"\"\" A thin wrapper on pytorch concat dataset for lens batching \"\"\"\n def __init__(self, datasets):\n super().__init__(datasets)\n self.lens = [l for dset in datasets for l in dset.lens]\n\n def __getattr__(self, name):\n return self._run_method_on_all_dsets(name)\n\n def _run_method_on_all_dsets(self, name):\n def run_all(*args, **kwargs):\n return [dset.__getattribute__(name)(*args, **kwargs)\n for dset in self.datasets]\n return run_all\n\n\nclass ImageLmdbGroup(object):\n def __init__(self, conf_th, max_bb, min_bb, num_bb, compress):\n self.path2imgdb = {}\n self.conf_th = conf_th\n self.max_bb = max_bb\n self.min_bb = min_bb\n self.num_bb = num_bb\n self.compress = compress\n\n def __getitem__(self, path):\n img_db = self.path2imgdb.get(path, None)\n if img_db is None:\n img_db = DetectFeatLmdb(path, self.conf_th, self.max_bb,\n self.min_bb, self.num_bb, self.compress)\n return img_db\n" ]
[ [ "numpy.load", "torch.tensor", "torch.arange", "torch.zeros", "torch.cat" ] ]
jojonki/AttentionNetworks-for-QA
[ "16f469c0719bf30c42695a1b4df6bbd84db8ea49" ]
[ "process_data.py" ]
[ "import os\nimport numpy as np\nimport json\nimport pickle\nfrom nltk.tokenize import word_tokenize\nimport random\nimport torch\nfrom torch.autograd import Variable\n\n# TODO global\nNULL = \"-NULL-\"\nUNK = \"-UNK-\"\nENT = \"-ENT-\"\n\n\ndef save_pickle(d, path):\n print('save pickle to', path)\n with open(path, mode='wb') as f:\n pickle.dump(d, f)\n\n\ndef load_pickle(path):\n print('load', path)\n with open(path, mode='rb') as f:\n return pickle.load(f)\n\n\ndef lower_list(str_list):\n return [str_var.lower() for str_var in str_list]\n\n\ndef load_task(dataset_path):\n ret_data = []\n ctx_max_len = 0 # character level length\n with open(dataset_path) as f:\n data = json.load(f)\n ver = data['version']\n print('dataset version:', ver)\n data = data['data']\n for i, d in enumerate(data):\n if i % 100 == 0:\n print('load_task:', i, '/', len(data))\n # print('load', d['title'], i, '/', len(data))\n for p in d['paragraphs']:\n if len(p['context']) > ctx_max_len:\n ctx_max_len = len(p['context'])\n c = word_tokenize(p['context'])\n cc = [list(w) for w in c]\n q, a = [], []\n for qa in p['qas']:\n q = word_tokenize(qa['question'])\n qc = [list(w) for w in q]\n a = [ans['text'] for ans in qa['answers']]\n a_beg = [ans['answer_start'] for ans in qa['answers']]\n a_end = [ans['answer_start'] + len(ans['text']) for ans in qa['answers']]\n ret_data.append((c, cc, qa['id'], q, qc, a, a_beg, a_end)) # TODO context redandancy\n return ret_data, ctx_max_len\n\n\ndef load_processed_data(fpath):\n ctx_max_len = 0 # character level length\n with open(fpath) as f:\n lines = f.readlines()\n data = []\n for l in lines:\n c_label, c, q, a, a_txt = l.rstrip().split('\\t')\n if len(c) > ctx_max_len:\n ctx_max_len = len(c)\n c, q, a = c.split(' '), q.split(' '), a.split(' ')\n # if len(c) > 30: continue # TMP\n c, q = lower_list(c), lower_list(q)\n cc = [list(w) for w in c]\n qc = [list(w) for w in q]\n a = [int(aa) for aa in a]\n a = [a[0], a[-1]]\n data.append((c_label, c, cc, q, qc, a, a_txt))\n return data, ctx_max_len\n\n\ndef load_processed_json(fpath_data, fpath_shared):\n # shared ------------\n # x: word level context list\n # cx: chara level context list\n # p: raw str level context list\n # word_counter: word to index\n # char_coun0ter: char to index\n # lower_word_counter: low word counter\n # word2vec: word2vec pretrained weights\n # lower_word2vec: lowered word2vec pretrained weights\n # data ------------\n # q: word level question\n # cq: char-word level question\n # y: word level id\n # *x: [article_id, paragraph_id]\n # *cx: same as *x\n # cy: ?\n # idxs: nothing meaning\n # ids: question id\n # answers: original answer text\n # *p: same as *x\n data = json.load(open(fpath_data))\n shared = json.load(open(fpath_shared))\n return data, shared\n\n\ndef load_glove_weights(glove_dir, embd_dim, vocab_size, word_index):\n embeddings_index = {}\n with open(os.path.join(glove_dir, 'glove.6B.' + str(embd_dim) + 'd.txt')) as f:\n for line in f:\n values = line.split()\n word = values[0]\n vector = np.array(values[1:], dtype='float32')\n embeddings_index[word] = vector\n\n print('Found %s word vectors in glove.' % len(embeddings_index))\n embedding_matrix = np.zeros((vocab_size, embd_dim))\n print('embed_matrix.shape', embedding_matrix.shape)\n found_ct = 0\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n # words not found in embedding index will be all-zeros.\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n found_ct += 1\n print(found_ct, 'words are found in glove')\n\n return embedding_matrix\n\n\ndef to_var(x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)\n\n\ndef to_np(x):\n return x.data.cpu().numpy()\n\n\ndef _make_word_vector(sentence, w2i, seq_len):\n index_vec = [w2i[w] if w in w2i else w2i[UNK] for w in sentence]\n pad_len = max(0, seq_len - len(index_vec))\n index_vec += [w2i[NULL]] * pad_len\n index_vec = index_vec[:seq_len]\n return index_vec\n\n\ndef _make_char_vector(data, c2i, sent_len, word_len):\n tmp = torch.ones(sent_len, word_len).type(torch.LongTensor) # TODO use fills\n for i, word in enumerate(data):\n for j, ch in enumerate(word):\n tmp[i][j] = c2i[ch] if ch in c2i else c2i[UNK]\n return tmp\n\n\ndef make_vector(batch, w2i, c2i, ctx_sent_len, ctx_word_len, query_sent_len, query_word_len):\n c, cc, q, cq, ans = [], [], [], [], []\n # c, cc, q, cq, a in batch\n for d in batch:\n c.append(_make_word_vector(d[0], w2i, ctx_sent_len))\n cc.append(_make_char_vector(d[1], c2i, ctx_sent_len, ctx_word_len))\n q.append(_make_word_vector(d[2], w2i, query_sent_len))\n cq.append(_make_char_vector(d[3], c2i, query_sent_len, query_word_len))\n ans.append(d[-1])\n c = to_var(torch.LongTensor(c))\n cc = to_var(torch.stack(cc, 0))\n q = to_var(torch.LongTensor(q))\n cq = to_var(torch.stack(cq, 0))\n a = to_var(torch.LongTensor(ans))\n return c, cc, q, cq, a\n\n\nclass DataSet(object):\n def __init__(self, data, shared):\n self.data = data\n self.shared = shared\n\n def size(self):\n return len(self.data['q'])\n\n def get_batches(self, batch_size, shuffle=False):\n batches = []\n batch = []\n for i in range(self.size()): # TODO shuffle, last elms\n rx = self.data['*x'][i] # [article_id, paragraph_id]\n c = lower_list(self.shared['x'][rx[0]][rx[1]][0])\n # if len(c) > 150: continue\n cc = self.shared['cx'][rx[0]][rx[1]][0]\n q = lower_list(self.data['q'][i])\n # if len(q) < 5 or len(q) > 15: continue\n cq = self.data['cq'][i]\n a = self.data['y'][i][0] # [[0, 80], [0, 82]] TODO only use 1-best\n a = (a[0][1], a[1][1]) # (80, 82) <= [[0, 80], [0, 82]]\n batch.append((c, cc, q, cq, a))\n if len(batch) == batch_size:\n batches.append(batch)\n batch = []\n if shuffle:\n random.shuffle(batches)\n return batches\n\n def get_ctx_maxlen(self):\n # char level context maxlen\n return max([len(p) for pp in self.shared['p'] for p in pp])\n\n def get_sent_maxlen(self):\n # word level sentence maxlen\n return max([len(articles[0]) for xx in self.shared['x'] for articles in xx]), max([len(q) for q in self.data['q']])\n\n def get_word_maxlen(self):\n # max word len\n return max([len(w) for xx in self.shared['x'] for articles in xx for w in articles[0]]), max([len(w) for q in self.data['q'] for w in q])\n\n def get_word_index(self, word_count_th=10, char_count_th=100):\n\n word2vec_dict = self.get_word2vec()\n word_counter = self.get_word_counter()\n char_counter = self.get_char_counter()\n w2i = {w: i for i, w in enumerate(w for w, ct in word_counter.items()\n if ct > word_count_th or (w in word2vec_dict))}\n c2i = {c: i for i, c in\n enumerate(c for c, ct in char_counter.items()\n if ct > char_count_th)}\n # w2i[NULL] = 0\n # w2i[UNK] = 1\n # w2i[ENT] = 2\n # c2i[NULL] = 0\n # c2i[UNK] = 1\n # c2i[ENT] = 2\n\n return w2i, c2i\n\n def get_word2vec(self):\n return self.shared['lower_word2vec']\n\n def get_word_counter(self):\n return self.shared['lower_word_counter']\n\n def get_char_counter(self):\n return self.shared['char_counter']\n" ]
[ [ "torch.ones", "torch.stack", "numpy.zeros", "torch.autograd.Variable", "torch.cuda.is_available", "numpy.array", "torch.LongTensor" ] ]
WangWenjun559/MITS
[ "8d7ace2b3b2a58fb33af225c2997106d9402aaf5" ]
[ "summary/sumy/sklearn/decomposition/pca.py" ]
[ "\"\"\" Principal Component Analysis\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Olivier Grisel <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Denis A. Engemann <[email protected]>\n# Michael Eickenberg <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom math import log, sqrt\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.special import gammaln\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils import check_random_state, as_float_array\nfrom ..utils import check_array\nfrom ..utils.extmath import fast_dot, fast_logdet, randomized_svd\nfrom ..utils.validation import check_is_fitted\n\n\ndef _assess_dimension_(spectrum, rank, n_samples, n_features):\n \"\"\"Compute the likelihood of a rank ``rank`` dataset\n\n The dataset is assumed to be embedded in gaussian noise of shape(n,\n dimf) having spectrum ``spectrum``.\n\n Parameters\n ----------\n spectrum: array of shape (n)\n Data spectrum.\n rank: int\n Tested rank value.\n n_samples: int\n Number of samples.\n n_features: int\n Number of features.\n\n Returns\n -------\n ll: float,\n The log-likelihood\n\n Notes\n -----\n This implements the method of `Thomas P. Minka:\n Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`\n \"\"\"\n if rank > len(spectrum):\n raise ValueError(\"The tested rank cannot exceed the rank of the\"\n \" dataset\")\n\n pu = -rank * log(2.)\n for i in range(rank):\n pu += (gammaln((n_features - i) / 2.)\n - log(np.pi) * (n_features - i) / 2.)\n\n pl = np.sum(np.log(spectrum[:rank]))\n pl = -pl * n_samples / 2.\n\n if rank == n_features:\n pv = 0\n v = 1\n else:\n v = np.sum(spectrum[rank:]) / (n_features - rank)\n pv = -np.log(v) * n_samples * (n_features - rank) / 2.\n\n m = n_features * rank - rank * (rank + 1.) / 2.\n pp = log(2. * np.pi) * (m + rank + 1.) / 2.\n\n pa = 0.\n spectrum_ = spectrum.copy()\n spectrum_[rank:n_features] = v\n for i in range(rank):\n for j in range(i + 1, len(spectrum)):\n pa += log((spectrum[i] - spectrum[j]) *\n (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)\n\n ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.\n\n return ll\n\n\ndef _infer_dimension_(spectrum, n_samples, n_features):\n \"\"\"Infers the dimension of a dataset of shape (n_samples, n_features)\n\n The dataset is described by its spectrum `spectrum`.\n \"\"\"\n n_spectrum = len(spectrum)\n ll = np.empty(n_spectrum)\n for rank in range(n_spectrum):\n ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)\n return ll.argmax()\n\n\nclass PCA(BaseEstimator, TransformerMixin):\n \"\"\"Principal component analysis (PCA)\n\n Linear dimensionality reduction using Singular Value Decomposition of the\n data and keeping only the most significant singular vectors to project the\n data to a lower dimensional space.\n\n This implementation uses the scipy.linalg implementation of the singular\n value decomposition. It only works for dense arrays and is not scalable to\n large dimensional data.\n\n The time complexity of this implementation is ``O(n ** 3)`` assuming\n n ~ n_samples ~ n_features.\n\n Read more in the :ref:`User Guide <PCA>`.\n\n Parameters\n ----------\n n_components : int, None or string\n Number of components to keep.\n if n_components is not set all components are kept::\n\n n_components == min(n_samples, n_features)\n\n if n_components == 'mle', Minka\\'s MLE is used to guess the dimension\n if ``0 < n_components < 1``, select the number of components such that\n the amount of variance that needs to be explained is greater than the\n percentage specified by n_components\n\n copy : bool\n If False, data passed to fit are overwritten and running\n fit(X).transform(X) will not yield the expected results,\n use fit_transform(X) instead.\n\n whiten : bool, optional\n When True (False by default) the `components_` vectors are divided\n by n_samples times singular values to ensure uncorrelated outputs\n with unit component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making there data respect some hard-wired assumptions.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n Components with maximum variance.\n\n explained_variance_ratio_ : array, [n_components]\n Percentage of variance explained by each of the selected components. \\\n k is not set then all components are stored and the sum of explained \\\n variances is equal to 1.0\n\n mean_ : array, [n_features]\n Per-feature empirical mean, estimated from the training set.\n\n n_components_ : int\n The estimated number of components. Relevant when n_components is set\n to 'mle' or a number between 0 and 1 to select using explained\n variance.\n\n noise_variance_ : float\n The estimated noise covariance following the Probabilistic PCA model\n from Tipping and Bishop 1999. See \"Pattern Recognition and\n Machine Learning\" by C. Bishop, 12.2.1 p. 574 or\n http://www.miketipping.com/papers/met-mppca.pdf. It is required to\n computed the estimated data covariance and score samples.\n\n Notes\n -----\n For n_components='mle', this class uses the method of `Thomas P. Minka:\n Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`\n\n Implements the probabilistic PCA model from:\n M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,\n Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622\n via the score and score_samples methods.\n See http://www.miketipping.com/papers/met-mppca.pdf\n\n Due to implementation subtleties of the Singular Value Decomposition (SVD),\n which is used in this implementation, running fit twice on the same matrix\n can lead to principal components with signs flipped (change in direction).\n For this reason, it is important to always use the same estimator object to\n transform data in a consistent fashion.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.decomposition import PCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> pca = PCA(n_components=2)\n >>> pca.fit(X)\n PCA(copy=True, n_components=2, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [ 0.99244... 0.00755...]\n\n See also\n --------\n RandomizedPCA\n KernelPCA\n SparsePCA\n TruncatedSVD\n \"\"\"\n def __init__(self, n_components=None, copy=True, whiten=False):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self._fit(X)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n U, S, V = self._fit(X)\n U = U[:, :self.n_components_]\n\n if self.whiten:\n # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)\n U *= sqrt(X.shape[0])\n else:\n # X_new = X * V = U * S * V^T * V = U * S\n U *= S[:self.n_components_]\n\n return U\n\n def _fit(self, X):\n \"\"\"Fit the model on X\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n\n Returns\n -------\n U, s, V : ndarrays\n The SVD of the input data, copied and centered when\n requested.\n \"\"\"\n X = check_array(X)\n n_samples, n_features = X.shape\n X = as_float_array(X, copy=self.copy)\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n U, S, V = linalg.svd(X, full_matrices=False)\n explained_variance_ = (S ** 2) / n_samples\n explained_variance_ratio_ = (explained_variance_ /\n explained_variance_.sum())\n\n components_ = V\n\n n_components = self.n_components\n if n_components is None:\n n_components = n_features\n elif n_components == 'mle':\n if n_samples < n_features:\n raise ValueError(\"n_components='mle' is only supported \"\n \"if n_samples >= n_features\")\n\n n_components = _infer_dimension_(explained_variance_,\n n_samples, n_features)\n elif not 0 <= n_components <= n_features:\n raise ValueError(\"n_components=%r invalid for n_features=%d\"\n % (n_components, n_features))\n\n if 0 < n_components < 1.0:\n # number of components for which the cumulated explained variance\n # percentage is superior to the desired threshold\n ratio_cumsum = explained_variance_ratio_.cumsum()\n n_components = np.sum(ratio_cumsum < n_components) + 1\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < n_features:\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n # store n_samples to revert whitening when getting covariance\n self.n_samples_ = n_samples\n\n self.components_ = components_[:n_components]\n self.explained_variance_ = explained_variance_[:n_components]\n explained_variance_ratio_ = explained_variance_ratio_[:n_components]\n self.explained_variance_ratio_ = explained_variance_ratio_\n self.n_components_ = n_components\n\n return (U, S, V)\n\n def get_covariance(self):\n \"\"\"Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances.\n\n Returns\n -------\n cov : array, shape=(n_features, n_features)\n Estimated covariance of data.\n \"\"\"\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)\n cov = np.dot(components_.T * exp_var_diff, components_)\n cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace\n return cov\n\n def get_precision(self):\n \"\"\"Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n \"\"\"\n n_features = self.components_.shape[1]\n\n # handle corner cases first\n if self.n_components_ == 0:\n return np.eye(n_features) / self.noise_variance_\n if self.n_components_ == n_features:\n return linalg.inv(self.get_covariance())\n\n # Get precision using matrix inversion lemma\n components_ = self.components_\n exp_var = self.explained_variance_\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)\n precision = np.dot(components_, components_.T) / self.noise_variance_\n precision.flat[::len(precision) + 1] += 1. / exp_var_diff\n precision = np.dot(components_.T,\n np.dot(linalg.inv(precision), components_))\n precision /= -(self.noise_variance_ ** 2)\n precision.flat[::len(precision) + 1] += 1. / self.noise_variance_\n return precision\n\n def transform(self, X):\n \"\"\"Apply the dimensionality reduction on X.\n\n X is projected on the first principal components previous extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples is the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n if self.mean_ is not None:\n X = X - self.mean_\n X_transformed = fast_dot(X, self.components_.T)\n if self.whiten:\n X_transformed /= np.sqrt(self.explained_variance_)\n return X_transformed\n\n def inverse_transform(self, X):\n \"\"\"Transform data back to its original space, i.e.,\n return an input X_original whose transform would be X\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_components)\n New data, where n_samples is the number of samples\n and n_components is the number of components.\n\n Returns\n -------\n X_original array-like, shape (n_samples, n_features)\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n if self.whiten:\n return fast_dot(\n X,\n np.sqrt(self.explained_variance_[:, np.newaxis]) *\n self.components_) + self.mean_\n else:\n return fast_dot(X, self.components_) + self.mean_\n\n def score_samples(self, X):\n \"\"\"Return the log-likelihood of each sample\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X: array, shape(n_samples, n_features)\n The data.\n\n Returns\n -------\n ll: array, shape (n_samples,)\n Log-likelihood of each sample under the current model\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n Xr = X - self.mean_\n n_features = X.shape[1]\n log_like = np.zeros(X.shape[0])\n precision = self.get_precision()\n log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)\n log_like -= .5 * (n_features * log(2. * np.pi)\n - fast_logdet(precision))\n return log_like\n\n def score(self, X, y=None):\n \"\"\"Return the average log-likelihood of all samples\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X: array, shape(n_samples, n_features)\n The data.\n\n Returns\n -------\n ll: float\n Average log-likelihood of the samples under the current model\n \"\"\"\n return np.mean(self.score_samples(X))\n\n\nclass RandomizedPCA(BaseEstimator, TransformerMixin):\n \"\"\"Principal component analysis (PCA) using randomized SVD\n\n Linear dimensionality reduction using approximated Singular Value\n Decomposition of the data and keeping only the most significant\n singular vectors to project the data to a lower dimensional space.\n\n Read more in the :ref:`User Guide <RandomizedPCA>`.\n\n Parameters\n ----------\n n_components : int, optional\n Maximum number of components to keep. When not given or None, this\n is set to n_features (the second dimension of the training data).\n\n copy : bool\n If False, data passed to fit are overwritten and running\n fit(X).transform(X) will not yield the expected results,\n use fit_transform(X) instead.\n\n iterated_power : int, optional\n Number of iterations for the power method. 3 by default.\n\n whiten : bool, optional\n When True (False by default) the `components_` vectors are divided\n by the singular values to ensure uncorrelated outputs with unit\n component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making their data respect some hard-wired assumptions.\n\n random_state : int or RandomState instance or None (default)\n Pseudo Random Number generator seed control. If None, use the\n numpy.random singleton.\n\n Attributes\n ----------\n components_ : array, [n_components, n_features]\n Components with maximum variance.\n\n explained_variance_ratio_ : array, [n_components]\n Percentage of variance explained by each of the selected components. \\\n k is not set then all components are stored and the sum of explained \\\n variances is equal to 1.0\n\n mean_ : array, [n_features]\n Per-feature empirical mean, estimated from the training set.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.decomposition import RandomizedPCA\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> pca = RandomizedPCA(n_components=2)\n >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n RandomizedPCA(copy=True, iterated_power=3, n_components=2,\n random_state=None, whiten=False)\n >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS\n [ 0.99244... 0.00755...]\n\n See also\n --------\n PCA\n TruncatedSVD\n\n References\n ----------\n\n .. [Halko2009] `Finding structure with randomness: Stochastic algorithms\n for constructing approximate matrix decompositions Halko, et al., 2009\n (arXiv:909)`\n\n .. [MRT] `A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`\n\n \"\"\"\n\n def __init__(self, n_components=None, copy=True, iterated_power=3,\n whiten=False, random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.iterated_power = iterated_power\n self.whiten = whiten\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X by extracting the first principal components.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self._fit(check_array(X))\n return self\n\n def _fit(self, X):\n \"\"\"Fit the model to the data X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n\n Returns\n -------\n X : ndarray, shape (n_samples, n_features)\n The input data, copied, centered and whitened when requested.\n \"\"\"\n random_state = check_random_state(self.random_state)\n X = np.atleast_2d(as_float_array(X, copy=self.copy))\n\n n_samples = X.shape[0]\n\n # Center data\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n if self.n_components is None:\n n_components = X.shape[1]\n else:\n n_components = self.n_components\n\n U, S, V = randomized_svd(X, n_components,\n n_iter=self.iterated_power,\n random_state=random_state)\n\n self.explained_variance_ = exp_var = (S ** 2) / n_samples\n full_var = np.var(X, axis=0).sum()\n self.explained_variance_ratio_ = exp_var / full_var\n\n if self.whiten:\n self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)\n else:\n self.components_ = V\n\n return X\n\n def transform(self, X, y=None):\n \"\"\"Apply dimensionality reduction on X.\n\n X is projected on the first principal components previous extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n if self.mean_ is not None:\n X = X - self.mean_\n\n X = fast_dot(X, self.components_.T)\n return X\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n X = check_array(X)\n X = self._fit(X)\n return fast_dot(X, self.components_.T)\n\n def inverse_transform(self, X, y=None):\n \"\"\"Transform data back to its original space.\n\n Returns an array X_original whose transform would be X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_components)\n New data, where n_samples in the number of samples\n and n_components is the number of components.\n\n Returns\n -------\n X_original array-like, shape (n_samples, n_features)\n\n Notes\n -----\n If whitening is enabled, inverse_transform does not compute the\n exact inverse operation of transform.\n \"\"\"\n check_is_fitted(self, 'mean_')\n\n X_original = fast_dot(X, self.components_)\n if self.mean_ is not None:\n X_original = X_original + self.mean_\n return X_original\n" ]
[ [ "numpy.sqrt", "numpy.sum", "numpy.eye", "numpy.empty", "numpy.zeros", "scipy.special.gammaln", "numpy.var", "scipy.linalg.inv", "numpy.log", "numpy.maximum", "numpy.dot", "numpy.mean", "scipy.linalg.svd" ] ]
caglorithm/stimulus_neural_populations
[ "58567901bed6f6bc17fc2975435138c33bb6be66" ]
[ "models/brian2/utils_net.py" ]
[ "import numpy as np\n# try to import numba\n# or define dummy decorator\ntry:\n from numba import autojit\nexcept:\n def autojit(func):\n return func\n\n# util functions for network simulation\ndef smooth_trace(trace, scale):\n scale = int(scale)\n if scale == 1 or scale == 0:\n return trace\n slen = int(len(trace) / scale)\n if slen == 0:\n return trace\n return np.array([np.mean(trace[i*scale:(i+1)*scale]) for i in xrange(slen)])\n\n@autojit\ndef choose_k_from_n(n, k):\n # use vaguely estimated metric of when sorting random numbers is better\n if float(k) / float(n) > 0.125:\n ans = np.argsort(np.random.rand(n))[:k]\n return ans\n nums = range(n)\n swaps = (np.random.rand(k) * xrange(n, n - k, -1)).astype('int') + xrange(k)\n for i in xrange(k):\n # swap with some random element from here to end - these swap positions precalculated\n nums[i], nums[swaps[i]] = nums[swaps[i]], nums[i]\n ans = nums[:k]\n return ans\n\ndef fixed_connectivity(n, k):\n prelist = np.zeros(k * n, dtype = int)\n postlist = np.zeros_like(prelist)\n for j in xrange(n):\n presynapses = choose_k_from_n(n, k)\n prelist[j * k:(j + 1) * k] = presynapses\n postlist[j * k:(j + 1) * k] = j * np.ones(k, dtype = int)\n return prelist, postlist" ]
[ [ "numpy.zeros_like", "numpy.ones", "numpy.zeros", "numpy.random.rand", "numpy.mean" ] ]
SergioRAgostinho/cvxpnpl
[ "eaa568594df0adcf0c70cc5288b24e5dc1fa9d2f", "eaa568594df0adcf0c70cc5288b24e5dc1fa9d2f" ]
[ "benchmarks/toolkit/datasets.py", "benchmarks/real/pnpl.py" ]
[ "from collections import namedtuple\nimport json\nimport os\nfrom os.path import join as pjoin\nfrom pathlib import Path\n\nimport numpy as np\nfrom plymit import Ply\nfrom PIL import Image\n\nfrom .renderer import Renderer\n\nModel = namedtuple(\n \"Model\",\n [\n \"id\",\n \"points\",\n \"normals\",\n \"color\",\n \"faces\",\n \"diameter\",\n \"min\",\n \"size\",\n \"symmetries_discrete\",\n ],\n)\n\nCamera = namedtuple(\"Camera\", [\"K\", \"size\"])\n\n\nclass Dataset:\n def __init__(self, prefix):\n print(\"Initializing \" + type(self).__name__)\n self.prefix = prefix\n self.camera = self._parse_camera()\n\n # Load models\n self.models = self._load_models()\n self.renderer = self._init_renderer()\n\n # Handle Partitions\n # we're only interested in the test partition here\n # self.train = type(self)._Partition(pjoin(self.prefix, \"train\"))\n # self.train = None\n self.test = type(self)._Partition(\n pjoin(self.prefix, \"test\"), self.models, self.renderer\n )\n\n def __iter__(self):\n return iter(self.test)\n\n def __len__(self):\n return self.test.n_frames\n\n def __getstate__(self):\n # save prefix only and reload database upon deserializing\n return {\"prefix\": self.prefix}\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.__init__(Path(self.prefix).parent)\n\n def _init_renderer(self):\n renderer = Renderer(False)\n renderer.load_models(list(self.models.values()))\n return renderer\n\n def _parse_camera(self):\n data = json.loads(open(pjoin(self.prefix, \"camera.json\")).read())\n camera = Camera(\n K=np.array(\n ((data[\"fx\"], 0, data[\"cx\"]), (0, data[\"fy\"], data[\"cy\"]), (0, 0, 1),)\n ),\n size=(data[\"width\"], data[\"height\"]),\n )\n return camera\n\n def _load_models(self):\n\n models = {}\n\n print(\"Reading ply files for models: \", end=\"\", flush=True)\n\n # load model info. models_eval are lighter\n info = json.loads(\n open(pjoin(self.prefix, \"models_eval\", \"models_info.json\")).read()\n )\n for k, v in info.items():\n\n print(k, end=\" \", flush=True)\n\n # load points, normals and color\n ply = Ply(pjoin(self.prefix, \"models\", \"obj_{:06d}.ply\".format(int(k))))\n\n # parse vertices\n points = []\n normals = []\n colors = []\n for vertex in ply.elementLists[\"vertex\"]:\n points.extend([vertex.x, vertex.y, vertex.z])\n normals.extend([vertex.nx, vertex.ny, vertex.nz])\n colors.extend([vertex.red, vertex.green, vertex.blue])\n points = np.array(points, dtype=np.float32).reshape((-1, 3))\n normals = np.array(normals, dtype=np.float32).reshape((-1, 3))\n colors = np.array(colors, dtype=np.uint8).reshape((-1, 3))\n\n # faces\n faces = []\n for f in ply.elementLists[\"face\"]:\n faces.extend(f.vertex_indices)\n faces = np.array(faces, dtype=np.uint32).reshape((-1, 3))\n\n # create model object\n models[k] = Model(\n int(k),\n points,\n normals,\n colors,\n faces,\n v[\"diameter\"],\n np.array((v[\"min_x\"], v[\"min_y\"], v[\"min_z\"])),\n np.array((v[\"size_x\"], v[\"size_y\"], v[\"size_z\"])),\n [np.array(s).reshape((4, 4)) for s in v[\"symmetries_discrete\"]]\n if \"symmetries_discrete\" in v\n else None,\n )\n print(\"DONE\", flush=True)\n return models\n\n class _Partition:\n def __init__(self, prefix, models, renderer):\n\n self.prefix = prefix\n self.models = models\n self.renderer = renderer\n\n seq_names = sorted([d.name for d in os.scandir(prefix)])\n # seq_names = [seq_names[1]]\n self.sequences = [\n Dataset._Sequence(int(n), pjoin(prefix, n), models, renderer)\n for n in seq_names\n ]\n\n # store the total number of frames in the partition\n self.n_frames = 0\n for seq in self.sequences:\n self.n_frames += len(seq)\n\n def __iter__(self):\n return iter(self.sequences)\n\n def __len__(self):\n return len(self.sequences)\n\n class _Sequence:\n def __init__(self, name, prefix, models, renderer):\n\n self.name = name\n self.prefix = prefix\n self.models = models\n self.renderer = renderer\n\n # parse gt\n gt = json.loads(open(pjoin(prefix, \"scene_gt.json\")).read())\n self.poses = [None] * len(gt.keys())\n for k, v in gt.items():\n poses = {}\n for pose in v:\n poses[pose[\"obj_id\"]] = np.hstack(\n (\n np.array(pose[\"cam_R_m2c\"]).reshape((3, 3)),\n np.array(pose[\"cam_t_m2c\"]).reshape((3, 1)),\n )\n )\n self.poses[int(k)] = poses\n\n # iterator stuff\n self.i = 0\n\n def __iter__(self):\n self.i = 0\n return self\n\n def __len__(self):\n return len(self.poses)\n # return 4\n\n def __next__(self):\n # reached the end. get out\n if self.i == len(self):\n raise StopIteration\n\n # generate object coordinates\n poses = self.poses[self.i]\n oc = self.renderer.object_coordinates(poses)\n\n # load visibility masks\n mask = self.fuse_masks(self.i, poses.keys())\n\n # return dictionary object with rgb, depth and poses\n data = {\n \"id\": self.i,\n \"rgb\": np.array(\n Image.open(pjoin(self.prefix, \"rgb\", \"{:06d}.png\".format(self.i)))\n ), # load rgb\n # \"depth\": np.array(\n # Image.open(pjoin(self.prefix, \"depth\", \"{:06d}.png\".format(self.i)))\n # ), # load depth\n \"mask\": mask,\n \"oc\": oc,\n \"poses\": poses,\n }\n self.i += 1\n return data\n\n def fuse_masks(self, frame, object_ids):\n masks = np.zeros(self.renderer.size[::-1], dtype=np.uint8)\n for i, oid in enumerate(object_ids):\n masks[\n np.array(\n Image.open(\n pjoin(self.prefix, \"mask_visib\", f\"{frame:06d}_{i:06d}.png\")\n )\n )\n > 127\n ] = oid\n return masks\n\n\nclass Linemod(Dataset):\n\n seq_names = [\n \"ape\",\n \"benchvise\",\n \"bowl\",\n \"cam\",\n \"can\",\n \"cat\",\n \"cup\",\n \"driller\",\n \"duck\",\n \"eggbox\",\n \"glue\",\n \"holepuncher\",\n \"iron\",\n \"lamp\",\n \"phone\",\n ]\n\n def __init__(self, prefix):\n super().__init__(pjoin(prefix, \"lm\"))\n\n\nclass Occlusion(Dataset):\n\n seq_names = [\"\"]\n\n def __init__(self, prefix):\n super().__init__(pjoin(prefix, \"lmo\"))\n", "import numpy as np\n\nfrom toolkit.methods.pnpl import CvxPnPL, DLT, EPnPL, OPnPL\nfrom toolkit.suites import parse_arguments, PnPLReal\nfrom toolkit.datasets import Linemod, Occlusion\n\n\n# reproducibility is a great thing\nnp.random.seed(0)\nnp.random.seed(42)\n\n\n# parse console arguments\nargs = parse_arguments()\n\n# Just a loading data scenario\nif args.load:\n session = PnPLReal.load(args.load)\n session.print(args.print_mode)\n quit()\n\n# run something\nsession = PnPLReal(methods=[CvxPnPL, DLT, EPnPL, OPnPL])\nsession.run(data=[Linemod(args.datasets_prefix), Occlusion(args.datasets_prefix)])\n# session.run(data=[Linemod(args.datasets_prefix)])\nif args.save:\n session.save(args.save)\nsession.print()\n" ]
[ [ "numpy.array", "numpy.zeros" ], [ "numpy.random.seed" ] ]
kcrumb/automl
[ "6e0cb70003c05dbbba45a7d741ec975423042f0e" ]
[ "efficientdet/utils.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utils.\"\"\"\nimport contextlib\nimport os\nimport re\nfrom typing import Text, Tuple, Union\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow.compat.v2 as tf2\nfrom tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import\n# pylint: disable=logging-format-interpolation\n\n\ndef srelu_fn(x):\n \"\"\"Smooth relu: a smooth version of relu.\"\"\"\n with tf.name_scope('srelu'):\n beta = tf.Variable(20.0, name='srelu_beta', dtype=tf.float32)**2\n beta = tf.cast(beta**2, x.dtype)\n safe_log = tf.math.log(tf.where(x > 0., beta * x + 1., tf.ones_like(x)))\n return tf.where((x > 0.), x - (1. / beta) * safe_log, tf.zeros_like(x))\n\n\ndef activation_fn(features: tf.Tensor, act_type: Text):\n \"\"\"Customized non-linear activation type.\"\"\"\n if act_type in ('silu', 'swish'):\n return tf.nn.swish(features)\n elif act_type == 'swish_native':\n return features * tf.sigmoid(features)\n elif act_type == 'hswish':\n return features * tf.nn.relu6(features + 3) / 6\n elif act_type == 'relu':\n return tf.nn.relu(features)\n elif act_type == 'relu6':\n return tf.nn.relu6(features)\n elif act_type == 'mish':\n return features * tf.math.tanh(tf.math.softplus(features))\n elif act_type == 'srelu':\n return srelu_fn(features)\n else:\n raise ValueError('Unsupported act_type {}'.format(act_type))\n\n\ndef cross_replica_mean(t, num_shards_per_group=None):\n \"\"\"Calculates the average value of input tensor across TPU replicas.\"\"\"\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if not num_shards_per_group:\n return tf.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype)\n\n group_assignment = None\n if num_shards_per_group > 1:\n if num_shards % num_shards_per_group != 0:\n raise ValueError(\n 'num_shards: %d mod shards_per_group: %d, should be 0' %\n (num_shards, num_shards_per_group))\n num_groups = num_shards // num_shards_per_group\n group_assignment = [[\n x for x in range(num_shards) if x // num_shards_per_group == y\n ] for y in range(num_groups)]\n return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(\n num_shards_per_group, t.dtype)\n\n\ndef get_ema_vars():\n \"\"\"Get all exponential moving average (ema) variables.\"\"\"\n ema_vars = tf.trainable_variables() + \\\n tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES)\n for v in tf.global_variables():\n # We maintain mva for batch norm moving mean and variance as well.\n if 'moving_mean' in v.name or 'moving_variance' in v.name:\n ema_vars.append(v)\n return list(set(ema_vars))\n\n\ndef get_ckpt_var_map(ckpt_path, ckpt_scope, var_scope, skip_mismatch=None):\n \"\"\"Get a var map for restoring from pretrained checkpoints.\n\n Args:\n ckpt_path: string. A pretrained checkpoint path.\n ckpt_scope: string. Scope name for checkpoint variables.\n var_scope: string. Scope name for model variables.\n skip_mismatch: skip variables if shape mismatch.\n\n Returns:\n var_map: a dictionary from checkpoint name to model variables.\n \"\"\"\n logging.info('Init model from checkpoint {}'.format(ckpt_path))\n if not ckpt_scope.endswith('/') or not var_scope.endswith('/'):\n raise ValueError('Please specific scope name ending with /')\n if ckpt_scope.startswith('/'):\n ckpt_scope = ckpt_scope[1:]\n if var_scope.startswith('/'):\n var_scope = var_scope[1:]\n\n var_map = {}\n # Get the list of vars to restore.\n model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)\n reader = tf.train.load_checkpoint(ckpt_path)\n ckpt_var_name_to_shape = reader.get_variable_to_shape_map()\n ckpt_var_names = set(reader.get_variable_to_shape_map().keys())\n\n for i, v in enumerate(model_vars):\n if not v.op.name.startswith(var_scope):\n logging.info('skip {} -- does not match scope {}'.format(\n v.op.name, var_scope))\n ckpt_var = ckpt_scope + v.op.name[len(var_scope):]\n if (ckpt_var not in ckpt_var_names and\n v.op.name.endswith('/ExponentialMovingAverage')):\n ckpt_var = ckpt_scope + v.op.name[:-len('/ExponentialMovingAverage')]\n\n if ckpt_var not in ckpt_var_names:\n if 'Momentum' in ckpt_var or 'RMSProp' in ckpt_var:\n # Skip optimizer variables.\n continue\n if skip_mismatch:\n logging.info('skip {} ({}) -- not in ckpt'.format(v.op.name, ckpt_var))\n continue\n raise ValueError('{} is not in ckpt {}'.format(v.op, ckpt_path))\n\n if v.shape != ckpt_var_name_to_shape[ckpt_var]:\n if skip_mismatch:\n logging.info('skip {} ({} vs {}) -- shape mismatch'.format(\n v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var]))\n continue\n raise ValueError('shape mismatch {} ({} vs {})'.format(\n v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var]))\n\n if i < 5:\n # Log the first few elements for sanity check.\n logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var))\n var_map[ckpt_var] = v\n\n return var_map\n\n\ndef get_ckpt_var_map_ema(ckpt_path, ckpt_scope, var_scope, var_exclude_expr):\n \"\"\"Get a ema var map for restoring from pretrained checkpoints.\n\n Args:\n ckpt_path: string. A pretrained checkpoint path.\n ckpt_scope: string. Scope name for checkpoint variables.\n var_scope: string. Scope name for model variables.\n var_exclude_expr: string. A regex for excluding variables.\n This is useful for finetuning with different classes, where\n var_exclude_expr='.*class-predict.*' can be used.\n\n Returns:\n var_map: a dictionary from checkpoint name to model variables.\n \"\"\"\n logging.info('Init model from checkpoint {}'.format(ckpt_path))\n if not ckpt_scope.endswith('/') or not var_scope.endswith('/'):\n raise ValueError('Please specific scope name ending with /')\n if ckpt_scope.startswith('/'):\n ckpt_scope = ckpt_scope[1:]\n if var_scope.startswith('/'):\n var_scope = var_scope[1:]\n\n var_map = {}\n # Get the list of vars to restore.\n model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)\n reader = tf.train.load_checkpoint(ckpt_path)\n ckpt_var_names = set(reader.get_variable_to_shape_map().keys())\n exclude_matcher = re.compile(var_exclude_expr) if var_exclude_expr else None\n for v in model_vars:\n if exclude_matcher and exclude_matcher.match(v.op.name):\n logging.info(\n 'skip {} -- excluded by {}'.format(v.op.name, var_exclude_expr))\n continue\n\n if not v.op.name.startswith(var_scope):\n logging.info('skip {} -- does not match scope {}'.format(\n v.op.name, var_scope))\n\n if v.op.name.endswith('/ExponentialMovingAverage'):\n logging.info('skip ema var {}'.format(v.op.name))\n continue\n\n ckpt_var = ckpt_scope + v.op.name[len(var_scope):]\n ckpt_var_ema = ckpt_var + '/ExponentialMovingAverage'\n if ckpt_var_ema in ckpt_var_names:\n var_map[ckpt_var_ema] = v\n logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var_ema))\n elif ckpt_var in ckpt_var_names:\n var_map[ckpt_var] = v\n logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var))\n else:\n logging.info('skip {} ({}) -- not in ckpt'.format(v.op.name, ckpt_var))\n return var_map\n\n\nclass TpuBatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Cross replica batch normalization.\"\"\"\n\n def __init__(self, fused=False, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n if fused in (True, None):\n raise ValueError('TpuBatchNormalization does not support fused=True.')\n super().__init__(fused=fused, **kwargs)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super()._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n num_shards = tpu_function.get_tpu_context().number_of_shards or 1\n num_shards_per_group = min(32, num_shards) # aggregate up to 32 cores.\n logging.info('TpuBatchNormalization with num_shards_per_group {}'.format(\n num_shards_per_group))\n if num_shards_per_group > 1:\n # Compute variance using: Var[X]= E[X^2] - E[X]^2.\n shard_square_of_mean = tf.math.square(shard_mean)\n shard_mean_of_square = shard_variance + shard_square_of_mean\n group_mean = cross_replica_mean(shard_mean, num_shards_per_group)\n group_mean_of_square = cross_replica_mean(\n shard_mean_of_square, num_shards_per_group)\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\nclass SyncBatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Cross replica batch normalization.\"\"\"\n\n def __init__(self, fused=False, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n if fused in (True, None):\n raise ValueError('SyncBatchNormalization does not support fused=True.')\n super().__init__(fused=fused, **kwargs)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super()._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n replica_context = tf.distribute.get_replica_context()\n num_shards = replica_context.num_replicas_in_sync or 1\n\n if num_shards > 1:\n # Compute variance using: Var[X]= E[X^2] - E[X]^2.\n shard_square_of_mean = tf.math.square(shard_mean)\n shard_mean_of_square = shard_variance + shard_square_of_mean\n shard_stack = tf.stack([shard_mean, shard_mean_of_square])\n group_mean, group_mean_of_square = tf.unstack(\n replica_context.all_reduce(tf.distribute.ReduceOp.MEAN, shard_stack))\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\nclass BatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Fixed default name of BatchNormalization to match TpuBatchNormalization.\"\"\"\n\n def __init__(self, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n super().__init__(**kwargs)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n # A temporary hack for tf1 compatibility with keras batch norm.\n for u in self.updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)\n return outputs\n\n\ndef batch_norm_class(is_training, strategy=None):\n if is_training and strategy == 'tpu':\n return TpuBatchNormalization\n elif is_training and strategy == 'gpus':\n # TODO(fsx950223): use SyncBatchNorm after TF bug is fixed (incorrect nccl\n # all_reduce). See https://github.com/tensorflow/tensorflow/issues/41980\n return BatchNormalization\n else:\n return BatchNormalization\n\n\ndef batch_normalization(inputs, training=False, strategy=None, **kwargs):\n \"\"\"A wrapper for TpuBatchNormalization.\"\"\"\n bn_layer = batch_norm_class(training, strategy)(**kwargs)\n return bn_layer(inputs, training=training)\n\n\ndef batch_norm_act(inputs,\n is_training_bn: bool,\n act_type: Union[Text, None],\n init_zero: bool = False,\n data_format: Text = 'channels_last',\n momentum: float = 0.99,\n epsilon: float = 1e-3,\n strategy: Text = None,\n name: Text = None):\n \"\"\"Performs a batch normalization followed by a non-linear activation.\n\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training_bn: `bool` for whether the model is training.\n act_type: non-linear relu function type. If None, omits the relu operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n momentum: `float`, momentume of batch norm.\n epsilon: `float`, small value for numerical stability.\n strategy: string to specify training strategy for TPU/GPU/CPU.\n name: the name of the batch normalization layer\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == 'channels_first':\n axis = 1\n else:\n axis = 3\n\n inputs = batch_normalization(\n inputs=inputs,\n axis=axis,\n momentum=momentum,\n epsilon=epsilon,\n center=True,\n scale=True,\n training=is_training_bn,\n strategy=strategy,\n gamma_initializer=gamma_initializer,\n name=name)\n\n if act_type:\n inputs = activation_fn(inputs, act_type)\n return inputs\n\n\ndef drop_connect(inputs, is_training, survival_prob):\n \"\"\"Drop the entire conv with given survival probability.\"\"\"\n # \"Deep Networks with Stochastic Depth\", https://arxiv.org/pdf/1603.09382.pdf\n if not is_training:\n return inputs\n\n # Compute tensor.\n batch_size = tf.shape(inputs)[0]\n random_tensor = survival_prob\n random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)\n binary_tensor = tf.floor(random_tensor)\n # Unlike conventional way that multiply survival_prob at test time, here we\n # divide survival_prob at training time, such that no addition compute is\n # needed at test time.\n output = inputs / survival_prob * binary_tensor\n return output\n\n\ndef num_params_flops(readable_format=True):\n \"\"\"Return number of parameters and flops.\"\"\"\n nparams = np.sum(\n [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])\n options = tf.profiler.ProfileOptionBuilder.float_operation()\n options['output'] = 'none'\n flops = tf.profiler.profile(\n tf.get_default_graph(), options=options).total_float_ops\n # We use flops to denote multiply-adds, which is counted as 2 ops in tfprof.\n flops = flops // 2\n if readable_format:\n nparams = float(nparams) * 1e-6\n flops = float(flops) * 1e-9\n return nparams, flops\n\n\nconv_kernel_initializer = tf.initializers.variance_scaling()\ndense_kernel_initializer = tf.initializers.variance_scaling()\n\n\nclass Pair(tuple):\n\n def __new__(cls, name, value):\n return super().__new__(cls, (name, value))\n\n def __init__(self, name, _): # pylint: disable=super-init-not-called\n self.name = name\n\n\ndef scalar(name, tensor):\n \"\"\"Stores a (name, Tensor) tuple in a custom collection.\"\"\"\n logging.info('Adding scale summary {}'.format(Pair(name, tensor)))\n tf.add_to_collection('scalar_summaries', Pair(name, tf.reduce_mean(tensor)))\n\n\ndef image(name, tensor):\n logging.info('Adding image summary {}'.format(Pair(name, tensor)))\n tf.add_to_collection('image_summaries', Pair(name, tensor))\n\n\ndef get_tpu_host_call(global_step, params):\n \"\"\"Get TPU host call for summaries.\"\"\"\n scalar_summaries = tf.get_collection('scalar_summaries')\n if params['img_summary_steps']:\n image_summaries = tf.get_collection('image_summaries')\n else:\n image_summaries = []\n if not scalar_summaries and not image_summaries:\n return None # No summaries to write.\n\n model_dir = params['model_dir']\n iterations_per_loop = params.get('iterations_per_loop', 100)\n img_steps = params['img_summary_steps']\n\n def host_call_fn(global_step, *args):\n \"\"\"Training host call. Creates summaries for training metrics.\"\"\"\n gs = global_step[0]\n with tf2.summary.create_file_writer(\n model_dir, max_queue=iterations_per_loop).as_default():\n with tf2.summary.record_if(True):\n for i, _ in enumerate(scalar_summaries):\n name = scalar_summaries[i][0]\n tensor = args[i][0]\n tf2.summary.scalar(name, tensor, step=gs)\n\n if img_steps:\n with tf2.summary.record_if(lambda: tf.math.equal(gs % img_steps, 0)):\n # Log images every 1k steps.\n for i, _ in enumerate(image_summaries):\n name = image_summaries[i][0]\n tensor = args[i + len(scalar_summaries)]\n tf2.summary.image(name, tensor, step=gs)\n\n return tf.summary.all_v2_summary_ops()\n\n reshaped_tensors = [tf.reshape(t, [1]) for _, t in scalar_summaries]\n reshaped_tensors += [t for _, t in image_summaries]\n global_step_t = tf.reshape(global_step, [1])\n return host_call_fn, [global_step_t] + reshaped_tensors\n\n\ndef archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):\n \"\"\"Archive a checkpoint if the metric is better.\"\"\"\n ckpt_dir, ckpt_name = os.path.split(ckpt_path)\n\n saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')\n saved_objective = float('-inf')\n if tf.io.gfile.exists(saved_objective_path):\n with tf.io.gfile.GFile(saved_objective_path, 'r') as f:\n saved_objective = float(f.read())\n if saved_objective > ckpt_objective:\n logging.info('Ckpt {} is worse than {}'.format(ckpt_objective,\n saved_objective))\n return False\n\n filenames = tf.io.gfile.glob(ckpt_path + '.*')\n if filenames is None:\n logging.info('No files to copy for checkpoint {}'.format(ckpt_path))\n return False\n\n # clear up the backup folder.\n backup_dir = os.path.join(ckpt_dir, 'backup')\n if tf.io.gfile.exists(backup_dir):\n tf.io.gfile.rmtree(backup_dir)\n\n # rename the old checkpoints to backup folder.\n dst_dir = os.path.join(ckpt_dir, 'archive')\n if tf.io.gfile.exists(dst_dir):\n logging.info('mv {} to {}'.format(dst_dir, backup_dir))\n tf.io.gfile.rename(dst_dir, backup_dir)\n\n # Write checkpoints.\n tf.io.gfile.makedirs(dst_dir)\n for f in filenames:\n dest = os.path.join(dst_dir, os.path.basename(f))\n tf.io.gfile.copy(f, dest, overwrite=True)\n ckpt_state = tf.train.generate_checkpoint_state_proto(\n dst_dir,\n model_checkpoint_path=os.path.join(dst_dir, ckpt_name))\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:\n f.write(str(ckpt_state))\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:\n f.write('%s' % ckpt_eval)\n\n # Update the best objective.\n with tf.io.gfile.GFile(saved_objective_path, 'w') as f:\n f.write('%f' % ckpt_objective)\n\n logging.info('Copying checkpoint {} to {}'.format(ckpt_path, dst_dir))\n return True\n\n\ndef parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):\n \"\"\"Parse the image size and return (height, width).\n\n Args:\n image_size: A integer, a tuple (H, W), or a string with HxW format.\n\n Returns:\n A tuple of integer (height, width).\n \"\"\"\n if isinstance(image_size, int):\n # image_size is integer, with the same width and height.\n return (image_size, image_size)\n\n if isinstance(image_size, str):\n # image_size is a string with format WxH\n width, height = image_size.lower().split('x')\n return (int(height), int(width))\n\n if isinstance(image_size, tuple):\n return image_size\n\n raise ValueError('image_size must be an int, WxH string, or (height, width)'\n 'tuple. Was %r' % image_size)\n\n\ndef get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]],\n max_level: int):\n \"\"\"Get feat widths and heights for all levels.\n\n Args:\n image_size: A integer, a tuple (H, W), or a string with HxW format.\n max_level: maximum feature level.\n\n Returns:\n feat_sizes: a list of tuples (height, width) for each level.\n \"\"\"\n image_size = parse_image_size(image_size)\n feat_sizes = [{'height': image_size[0], 'width': image_size[1]}]\n feat_size = image_size\n for _ in range(1, max_level + 1):\n feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)\n feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]})\n return feat_sizes\n\n\ndef verify_feats_size(feats,\n feat_sizes,\n min_level,\n max_level,\n data_format='channels_last'):\n \"\"\"Verify the feature map sizes.\"\"\"\n expected_output_size = feat_sizes[min_level:max_level + 1]\n for cnt, size in enumerate(expected_output_size):\n h_id, w_id = (2, 3) if data_format == 'channels_first' else (1, 2)\n if feats[cnt].shape[h_id] != size['height']:\n raise ValueError(\n 'feats[{}] has shape {} but its height should be {}.'\n '(input_height: {}, min_level: {}, max_level: {}.)'.format(\n cnt, feats[cnt].shape, size['height'], feat_sizes[0]['height'],\n min_level, max_level))\n if feats[cnt].shape[w_id] != size['width']:\n raise ValueError(\n 'feats[{}] has shape {} but its width should be {}.'\n '(input_width: {}, min_level: {}, max_level: {}.)'.format(\n cnt, feats[cnt].shape, size['width'], feat_sizes[0]['width'],\n min_level, max_level))\n\n\ndef get_precision(strategy: str, mixed_precision: bool = False):\n \"\"\"Get the precision policy for a given strategy.\"\"\"\n if mixed_precision:\n if strategy == 'tpu':\n return 'mixed_bfloat16'\n\n if tf.config.experimental.list_physical_devices('GPU'):\n return 'mixed_float16'\n\n # TODO(fsx950223): Fix CPU float16 inference\n # https://github.com/google/automl/issues/504\n logging.warning('float16 is not supported for CPU, use float32 instead')\n return 'float32'\n\n return 'float32'\n\n\[email protected]\ndef float16_scope():\n \"\"\"Scope class for float16.\"\"\"\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Returns a custom getter that methods must be called under.\"\"\"\n cast_to_float16 = False\n requested_dtype = kwargs['dtype']\n if requested_dtype == tf.float16:\n kwargs['dtype'] = tf.float32\n cast_to_float16 = True\n var = getter(*args, **kwargs)\n if cast_to_float16:\n var = tf.cast(var, tf.float16)\n return var\n\n with tf.variable_scope('', custom_getter=_custom_getter) as varscope:\n yield varscope\n\n\ndef set_precision_policy(policy_name: Text = None, loss_scale: bool = False):\n \"\"\"Set precision policy according to the name.\n\n Args:\n policy_name: precision policy name, one of 'float32', 'mixed_float16',\n 'mixed_bfloat16', or None.\n loss_scale: whether to use loss scale (only for training).\n \"\"\"\n if not policy_name:\n return\n\n assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32')\n logging.info('use mixed precision policy name %s', policy_name)\n # TODO(tanmingxing): use tf.keras.layers.enable_v2_dtype_behavior() when it\n # available in stable TF release.\n from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top,g-direct-tensorflow-import\n base_layer_utils.enable_v2_dtype_behavior()\n # mixed_float16 training is not supported for now, so disable loss_scale.\n # float32 and mixed_bfloat16 do not need loss scale for training.\n if loss_scale:\n policy = tf2.keras.mixed_precision.experimental.Policy(policy_name)\n else:\n policy = tf2.keras.mixed_precision.experimental.Policy(\n policy_name, loss_scale=None)\n tf2.keras.mixed_precision.experimental.set_policy(policy)\n\n\ndef build_model_with_precision(pp, mm, ii, tt, *args, **kwargs):\n \"\"\"Build model with its inputs/params for a specified precision context.\n\n This is highly specific to this codebase, and not intended to be general API.\n Advanced users only. DO NOT use it if you don't know what it does.\n NOTE: short argument names are intended to avoid conficts with kwargs.\n\n Args:\n pp: A string, precision policy name, such as \"mixed_float16\".\n mm: A function, for rmodel builder.\n ii: A tensor, for model inputs.\n tt: A bool, If true, it is for training; otherwise, it is for eval.\n *args: A list of model arguments.\n **kwargs: A dict, extra model parameters.\n\n Returns:\n the output of mm model.\n \"\"\"\n if pp == 'mixed_bfloat16':\n set_precision_policy(pp)\n inputs = tf.cast(ii, tf.bfloat16)\n with tf.tpu.bfloat16_scope():\n outputs = mm(inputs, *args, **kwargs)\n set_precision_policy('float32')\n elif pp == 'mixed_float16':\n set_precision_policy(pp, loss_scale=tt)\n inputs = tf.cast(ii, tf.float16)\n with float16_scope():\n outputs = mm(inputs, *args, **kwargs)\n set_precision_policy('float32')\n elif not pp or pp == 'float32':\n outputs = mm(ii, *args, **kwargs)\n else:\n raise ValueError('Unknow precision name {}'.format(pp))\n\n # Users are responsible to convert the dtype of all outputs.\n return outputs\n" ]
[ [ "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.math.square", "tensorflow.compat.v2.summary.image", "tensorflow.compat.v1.config.experimental.list_physical_devices", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation", "tensorflow.compat.v2.keras.mixed_precision.experimental.Policy", "tensorflow.compat.v1.summary.all_v2_summary_ops", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.io.gfile.rmtree", "tensorflow.python.keras.engine.base_layer_utils.enable_v2_dtype_behavior", "tensorflow.compat.v1.ones_initializer", "tensorflow.compat.v1.Variable", "tensorflow.compat.v1.io.gfile.exists", "tensorflow.compat.v1.floor", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.math.softplus", "tensorflow.compat.v1.tpu.cross_replica_sum", "tensorflow.compat.v1.io.gfile.makedirs", "tensorflow.compat.v1.cast", "tensorflow.compat.v2.summary.record_if", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.nn.relu", "tensorflow.compat.v1.io.gfile.glob", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v1.random.uniform", "tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy", "tensorflow.compat.v2.summary.scalar", "tensorflow.compat.v1.io.gfile.copy", "tensorflow.compat.v1.math.equal", "tensorflow.compat.v1.sigmoid", "tensorflow.compat.v2.summary.create_file_writer", "tensorflow.compat.v1.nn.relu6", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.tpu.bfloat16_scope", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.initializers.variance_scaling", "tensorflow.python.tpu.tpu_function.get_tpu_context", "tensorflow.compat.v1.train.load_checkpoint", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.distribute.get_replica_context", "tensorflow.compat.v1.io.gfile.rename", "tensorflow.compat.v1.io.gfile.GFile", "tensorflow.compat.v1.nn.swish", "tensorflow.compat.v1.name_scope" ] ]
Tchiik/CompViz
[ "b0a94cbf360e04cc4bcac261ae435eff462aa625" ]
[ "custom_2.py" ]
[ "\"\"\"\nMask R-CNN\nTrain on the toy bottle dataset and implement color splash effect.\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n------------------------------------------------------------\nUsage: import the module (see Jupyter notebooks for examples), or run from\n the command line as such:\n # Train a new model starting from pre-trained COCO weights\n python3 bottle.py train --dataset=/home/datascience/Workspace/maskRcnn/Mask_RCNN-master/samples/bottle/dataset --weights=coco\n # Resume training a model that you had trained earlier\n python3 bottle.py train --dataset=/path/to/bottle/dataset --weights=last\n # Train a new model starting from ImageNet weights\n python3 bottle.py train --dataset=/path/to/bottle/dataset --weights=imagenet\n # Apply color splash to an image\n python3 bottle.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>\n # Apply color splash to video using the last weights you trained\n python3 bottle.py splash --weights=last --video=<URL or path to file>\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\nimport cv2\nfrom mrcnn.visualize import display_instances\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = '/content/drive/My Drive/CompViz/'\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\nnewCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_scratch_0014.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Configurations\n############################################################\n\n\nclass CustomConfig(Config):\n \"\"\"Configuration for training on the toy dataset.\n Derives from the base Config class and overrides some values.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"damage\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 2\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 5 # Background + toy\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = 200\n\n # Skip detections with < 90% confidence\n DETECTION_MIN_CONFIDENCE = 0.9\n\n\n############################################################\n# Dataset\n############################################################\n\nclass CustomDataset(utils.Dataset):\n\n def load_custom(self, dataset_dir, subset):\n \"\"\"Load a subset of the bottle dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes. We have only one class to add.\n self.add_class(\"damage\", 1, \"bumper_scratch\")\n self.add_class(\"damage\", 2, \"door_scratch\")\n self.add_class(\"damage\", 3, \"bumper_dent\")\n self.add_class(\"damage\", 4, \"door_dent\")\n self.add_class(\"damage\", 5, \"broken_headlight\")\n\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir + '/' + subset)\n\n # Load annotations\n # VGG Image Annotator saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n annotations1 = json.load(open(os.path.join(dataset_dir + '/' + \"via_region_data.json\"),'r',encoding=\"utf8\",errors='ignore'))\n # print(annotations1)\n annotations = list(annotations1.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n \n # Add images\n for a in annotations:\n # print(a)\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. There are stores in the\n # shape_attributes (see json format above)\n polygons = [r['shape_attributes'] for r in a['regions'].values()] \n objects = [s['region_attributes'] for s in a['regions'].values()] #s['region_attributes'] ['name'] for s in a['regions']\n print(\"damage:\",objects)\n \n num_ids = [n['damage'] for n in objects]\n dic = {\"door_scratch\":1, \"bumper_scratch\":2, \"door_dent\":3, \"bumper_dent\":4,\"broken_headlight\":5}\n num_ids = [dic.get(n, n) for n in num_ids]\n \n # num_ids=[]\n # for n in objects:\n # #print(n)\n # #print(type(n))\n # try:\n # if n.key()=='bumper_scratch':\n # num_ids.append(1)\n # elif n.key()=='door_scratch':\n # num_ids.append(2)\n # elif n.key()=='bumper_dent':\n # num_ids.append(3)\n # elif n.key()=='door_dent':\n # num_ids.append(4)\n # elif n.key()=='broken_headlight':\n # num_ids.append(5)\n # except:\n # pass\n # \n\n # name_dict = {\"bumper_scratch\": 1,\"door_scratch\": 2,\"bumper_dent\": 3,\"door_dent\": 4,\"broken_headlight\": 5 }\n # key = tuple(name_dict)\n # num_ids = [name_dict[a] for a in objects]\n \n # num_ids = [int(n['Event']) for n in objects]\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n print(\"num_ids\",num_ids)\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"damage\", ## for a single class just add the name here\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons,\n num_ids=num_ids)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a bottle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"damage\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n if info[\"source\"] != \"damage\":\n return super(self.__class__, self).load_mask(image_id)\n num_ids = info['num_ids']\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n \trr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n \tmask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n # Map class names to class IDs.\n num_ids = np.array(num_ids, dtype=np.int32)\n return mask, num_ids\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"object\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n\ndef train(model):\n \"\"\"Train the model.\"\"\"\n # Training dataset.\n dataset_train = CustomDataset()\n dataset_train.load_custom(args.dataset, \"train\")\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = CustomDataset()\n dataset_val.load_custom(args.dataset, \"val\")\n dataset_val.prepare()\n\n # *** This training schedule is an example. Update to your needs ***\n # Since we're using a very small dataset, and starting from\n # COCO trained weights, we don't need to train too long. Also,\n # no need to train all layers, just the heads should do it.\n print(\"Training network heads\")\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=10,\n layers='heads')\n\n\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n # Copy color pixels from the original color image where mask is set\n if mask.shape[0] > 0:\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray\n return splash\n\n\ndef detect_and_color_splash(model, image_path=None, video_path=None):\n assert image_path or video_path\n\n # Image or video?\n if image_path:\n # Run model detection and generate the color splash effect\n print(\"Running on {}\".format(args.image))\n # Read image\n image = skimage.io.imread(args.image)\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # Save output\n file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n skimage.io.imsave(file_name, splash)\n elif video_path:\n import cv2\n # Video capture\n vcapture = cv2.VideoCapture(video_path)\n width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = vcapture.get(cv2.CAP_PROP_FPS)\n\n # Define codec and create video writer\n file_name = \"splash_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n vwriter = cv2.VideoWriter(file_name,\n cv2.VideoWriter_fourcc(*'MJPG'),\n fps, (width, height))\n\n count = 0\n success = True\n while success:\n print(\"frame: \", count)\n # Read next image\n success, image = vcapture.read()\n if success:\n # OpenCV returns images as BGR, convert to RGB\n image = image[..., ::-1]\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # RGB -> BGR to save image to video\n splash = splash[..., ::-1]\n # Add image to video writer\n vwriter.write(splash)\n count += 1\n vwriter.release()\n print(\"Saved to \", file_name)\n\n############################################################\n# Training\n############################################################\n\nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect custom class.')\n parser.add_argument(\"command\",\n metavar=\"<command>\",\n help=\"'train' or 'splash'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/path/to/custom/dataset/\",\n help='Directory of the custom dataset')\n parser.add_argument('--weights', required=True,\n metavar=\"/path/to/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/)')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image to apply the color splash effect on')\n parser.add_argument('--video', required=False,\n metavar=\"path or URL to video\",\n help='Video to apply the color splash effect on')\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n elif args.command == \"splash\":\n assert args.image or args.video,\\\n \"Provide --image or --video to apply color splash\"\n\n print(\"Weights: \", args.weights)\n print(\"Dataset: \", args.dataset)\n print(\"Logs: \", args.logs)\n\n # Configurations\n if args.command == \"train\":\n config = CustomConfig()\n else:\n class InferenceConfig(CustomConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n config = InferenceConfig()\n config.display()\n\n # Create model\n if args.command == \"train\":\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.logs)\n else:\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"newcoco\":\n # Find last trained weights\n weights_path = newCOCO_WEIGHTS_PATH\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()[1]\n elif args.weights.lower() == \"imagenet\":\n # Start from ImageNet trained weights\n weights_path = model.get_imagenet_weights()\n else:\n weights_path = args.weights\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n # Exclude the last layers because they require a matching\n # number of classes\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n else:\n model.load_weights(weights_path, by_name=True)\n\n # Train or evaluate\n if args.command == \"train\":\n train(model)\n elif args.command == \"splash\":\n detect_and_color_splash(model, image_path=args.image,\n video_path=args.video)\n else:\n print(\"'{}' is not recognized. \"\n \"Use 'train' or 'splash'\".format(args.command))" ]
[ [ "numpy.array", "numpy.sum", "numpy.where" ] ]
CubicZebra/anomalydetect
[ "ec571b47569f491f72abc9736097fb30f9289f46" ]
[ "core/nearest.py" ]
[ "import numpy as np\nfrom basic.types import vector, matrix\nfrom typing import Optional\nfrom basic.tests import dt\n\n\n# print(dt)\n# print(dt.mean(axis=0))\n\n\n_ptr = np.array([1, 1, 4, 4, 8])\n# print(dt - _ptr)\n\n\ndef _is_broadcastable(x: matrix, _x: vector) -> Optional[TypeError]:\n if x.shape[1] != _x.shape[0]:\n raise TypeError(r'arg {} is not broadcastable to target matrix'.format(_x))\n\n\ndef _euclidean_ord(x: matrix, _x: vector) -> vector:\n _is_broadcastable(x, _x)\n return np.linalg.norm(x - _x, axis=1, ord=2).argsort(kind='mergesort')\n\n\ndef to_a_table(x: matrix, tag: vector, k: Optional[int] = None):\n idx_tab = np.array([_euclidean_ord(x, item) for item in x])\n _ = np.array([item for item in 'abcdefghijklmnopqrst']) # labels for test\n # for v in range(len(idx_tab)):\n # print(_[idx_tab[v]])\n # k -> np.unique(), 二值化的\n cls, counts = np.unique(tag, return_counts=True)\n proportions = counts/counts.sum()\n np.where(tag == '2')\n # print(np.log(v[0]) - np.log(v[1])) # 计算加速: 当数据量大时,最好对k的值(最大)作出限制\n\n\n# to_a_table(dt, ['a', 'a', 'b', 'b', 'b'])\n\n\n# v = _euclidean_dis(dt, _ptr)\n# print(v)\n# print(np.argsort(v, kind='mergesort'))\n\n\nfrom multiprocessing.pool import ThreadPool\nimport time\n\n\ndef print_hello(x):\n print(r'hello, {}'.format(x))\n time.sleep(2)\n return 'name_' + x\n\n\nwith ThreadPool(2) as p:\n res = p.map(print_hello, ['aa', 'bb'])\n\nprint(res)\n\n\nif __name__ == '__main__':\n # with ThreadPool() as p:\n # res = p.map(print_hello, ['aa', 'bb'])\n #\n # print(res)\n pass\n" ]
[ [ "numpy.array", "numpy.where", "numpy.linalg.norm", "numpy.unique" ] ]
Liang-ZX/Stereo-Mask-RCNN
[ "c7c53062eacca4511fd4d091bea41cd7b5cf100d" ]
[ "demo.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n\n# Modified by Peiliang Li for Stereo RCNN demo\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport shutil\nimport time\nimport cv2\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport math as m\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.roi_layers import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv, kpts_transform_inv, border_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.stereo_rcnn.resnet import resnet\nfrom model.utils import kitti_utils\nfrom model.utils import vis_3d_utils as vis_utils\nfrom model.utils import box_estimator as box_estimator\nfrom model.dense_align import dense_align\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Test the Stereo R-CNN network')\n\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"models_stereo\",\n type=str)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=12, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=6477, type=int)\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n np.random.seed(cfg.RNG_SEED)\n\n input_dir = args.load_dir + \"/\"\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'stereo_rcnn_{}_{}.pth'.format(args.checkepoch, args.checkpoint))\n kitti_classes = np.asarray(['__background__', 'Car'])\n\n # initilize the network here.\n stereoRCNN = resnet(kitti_classes, 101, pretrained=False)\n stereoRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n stereoRCNN.load_state_dict(checkpoint['model'])\n print('load model successfully!')\n\n with torch.no_grad():\n # initilize the tensor holder here.\n im_left_data = Variable(torch.FloatTensor(1).cuda())\n im_right_data = Variable(torch.FloatTensor(1).cuda())\n im_info = Variable(torch.FloatTensor(1).cuda())\n num_boxes = Variable(torch.LongTensor(1).cuda())\n gt_boxes = Variable(torch.FloatTensor(1).cuda())\n\n stereoRCNN.cuda()\n\n eval_thresh = 0.05\n vis_thresh = 0.7\n\n stereoRCNN.eval()\n \n # read data\n img_l_path = 'demo/left.png'\n img_r_path = 'demo/right.png'\n\n img_left = cv2.imread(img_l_path)\n img_right = cv2.imread(img_r_path)\n\n # rgb -> bgr\n img_left = img_left.astype(np.float32, copy=False)\n img_right = img_right.astype(np.float32, copy=False)\n\n img_left -= cfg.PIXEL_MEANS\n img_right -= cfg.PIXEL_MEANS\n\n im_shape = img_left.shape\n im_size_min = np.min(im_shape[0:2])\n im_scale = float(cfg.TRAIN.SCALES[0]) / float(im_size_min)\n\n img_left = cv2.resize(img_left, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n img_right = cv2.resize(img_right, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n \n info = np.array([[img_left.shape[0], img_left.shape[1], \\\n im_scale]], dtype=np.float32)\n \n img_left = torch.from_numpy(img_left)\n img_left = img_left.permute(2, 0, 1).unsqueeze(0).contiguous()\n\n img_right = torch.from_numpy(img_right)\n img_right = img_right.permute(2, 0, 1).unsqueeze(0).contiguous()\n\n info = torch.from_numpy(info)\n\n im_left_data.data.resize_(img_left.size()).copy_(img_left)\n im_right_data.data.resize_(img_right.size()).copy_(img_right)\n im_info.data.resize_(info.size()).copy_(info)\n \n det_tic = time.time()\n rois_left, rois_right, cls_prob, bbox_pred, bbox_pred_dim, kpts_prob,\\\n left_prob, right_prob, rpn_loss_cls, rpn_loss_box_left_right,\\\n RCNN_loss_cls, RCNN_loss_bbox, RCNN_loss_dim_orien, RCNN_loss_kpts, rois_label =\\\n stereoRCNN(im_left_data, im_right_data, im_info, gt_boxes, gt_boxes,\\\n gt_boxes, gt_boxes, gt_boxes, num_boxes)\n \n scores = cls_prob.data\n boxes_left = rois_left.data[:, :, 1:5]\n boxes_right = rois_right.data[:, :, 1:5]\n\n bbox_pred = bbox_pred.data\n box_delta_left = bbox_pred.new(bbox_pred.size()[1], 4*len(kitti_classes)).zero_()\n box_delta_right = bbox_pred.new(bbox_pred.size()[1], 4*len(kitti_classes)).zero_()\n\n for keep_inx in range(box_delta_left.size()[0]):\n box_delta_left[keep_inx, 0::4] = bbox_pred[0,keep_inx,0::6]\n box_delta_left[keep_inx, 1::4] = bbox_pred[0,keep_inx,1::6]\n box_delta_left[keep_inx, 2::4] = bbox_pred[0,keep_inx,2::6]\n box_delta_left[keep_inx, 3::4] = bbox_pred[0,keep_inx,3::6]\n\n box_delta_right[keep_inx, 0::4] = bbox_pred[0,keep_inx,4::6]\n box_delta_right[keep_inx, 1::4] = bbox_pred[0,keep_inx,1::6]\n box_delta_right[keep_inx, 2::4] = bbox_pred[0,keep_inx,5::6]\n box_delta_right[keep_inx, 3::4] = bbox_pred[0,keep_inx,3::6]\n\n box_delta_left = box_delta_left.view(-1,4)\n box_delta_right = box_delta_right.view(-1,4)\n\n dim_orien = bbox_pred_dim.data\n dim_orien = dim_orien.view(-1,5)\n\n kpts_prob = kpts_prob.data\n kpts_prob = kpts_prob.view(-1,4*cfg.KPTS_GRID)\n max_prob, kpts_delta = torch.max(kpts_prob,1)\n\n left_prob = left_prob.data\n left_prob = left_prob.view(-1,cfg.KPTS_GRID)\n _, left_delta = torch.max(left_prob,1)\n\n right_prob = right_prob.data\n right_prob = right_prob.view(-1,cfg.KPTS_GRID)\n _, right_delta = torch.max(right_prob,1)\n\n box_delta_left = box_delta_left * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_delta_right = box_delta_right * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n dim_orien = dim_orien * torch.FloatTensor(cfg.TRAIN.DIM_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.DIM_NORMALIZE_MEANS).cuda()\n\n\n box_delta_left = box_delta_left.view(1,-1,4*len(kitti_classes))\n box_delta_right = box_delta_right.view(1, -1,4*len(kitti_classes))\n dim_orien = dim_orien.view(1, -1, 5*len(kitti_classes))\n kpts_delta = kpts_delta.view(1, -1, 1)\n left_delta = left_delta.view(1, -1, 1)\n right_delta = right_delta.view(1, -1, 1)\n max_prob = max_prob.view(1, -1, 1)\n\n pred_boxes_left = bbox_transform_inv(boxes_left, box_delta_left, 1)\n pred_boxes_right = bbox_transform_inv(boxes_right, box_delta_right, 1)\n pred_kpts, kpts_type = kpts_transform_inv(boxes_left, kpts_delta,cfg.KPTS_GRID)\n pred_left = border_transform_inv(boxes_left, left_delta,cfg.KPTS_GRID)\n pred_right = border_transform_inv(boxes_left, right_delta,cfg.KPTS_GRID)\n\n pred_boxes_left = clip_boxes(pred_boxes_left, im_info.data, 1)\n pred_boxes_right = clip_boxes(pred_boxes_right, im_info.data, 1)\n\n pred_boxes_left /= im_info[0,2].data\n pred_boxes_right /= im_info[0,2].data\n pred_kpts /= im_info[0,2].data\n pred_left /= im_info[0,2].data\n pred_right /= im_info[0,2].data\n\n scores = scores.squeeze()\n pred_boxes_left = pred_boxes_left.squeeze()\n pred_boxes_right = pred_boxes_right.squeeze()\n\n pred_kpts = torch.cat((pred_kpts, kpts_type, max_prob, pred_left, pred_right),2)\n pred_kpts = pred_kpts.squeeze()\n dim_orien = dim_orien.squeeze()\n\n det_toc = time.time()\n detect_time = det_toc - det_tic\n\n calib = kitti_utils.read_obj_calibration('demo/calib.txt')\n\n im2show_left = np.copy(cv2.imread(img_l_path))\n im2show_right = np.copy(cv2.imread(img_r_path))\n \n pointcloud = kitti_utils.get_point_cloud('demo/lidar.bin', calib)\n im_box = vis_utils.vis_lidar_in_bev(pointcloud, width=im2show_left.shape[0]*2)\n\n for j in xrange(1, len(kitti_classes)):\n inds = torch.nonzero(scores[:,j] > eval_thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:,j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n\n cls_boxes_left = pred_boxes_left[inds][:, j * 4:(j + 1) * 4]\n cls_boxes_right = pred_boxes_right[inds][:, j * 4:(j + 1) * 4]\n cls_dim_orien = dim_orien[inds][:, j * 5:(j + 1) * 5]\n \n cls_kpts = pred_kpts[inds]\n\n cls_dets_left = torch.cat((cls_boxes_left, cls_scores.unsqueeze(1)), 1)\n cls_dets_right = torch.cat((cls_boxes_right, cls_scores.unsqueeze(1)), 1)\n\n cls_dets_left = cls_dets_left[order]\n cls_dets_right = cls_dets_right[order]\n cls_dim_orien = cls_dim_orien[order]\n cls_kpts = cls_kpts[order] \n\n keep = nms(cls_boxes_left[order, :], cls_scores[order], cfg.TEST.NMS)\n keep = keep.view(-1).long()\n cls_dets_left = cls_dets_left[keep]\n cls_dets_right = cls_dets_right[keep]\n cls_dim_orien = cls_dim_orien[keep]\n cls_kpts = cls_kpts[keep]\n\n # optional operation, can check the regressed borderline keypoint using 2D box inference\n infered_kpts = kitti_utils.infer_boundary(im2show_left.shape, cls_dets_left.cpu().numpy())\n infered_kpts = torch.from_numpy(infered_kpts).type_as(cls_dets_left)\n for detect_idx in range(cls_dets_left.size()[0]):\n if cls_kpts[detect_idx,4] - cls_kpts[detect_idx,3] < \\\n 0.5*(infered_kpts[detect_idx,1]-infered_kpts[detect_idx,0]):\n cls_kpts[detect_idx,3:5] = infered_kpts[detect_idx]\n\n im2show_left = vis_detections(im2show_left, kitti_classes[j], \\\n cls_dets_left.cpu().numpy(), vis_thresh, cls_kpts.cpu().numpy())\n im2show_right = vis_detections(im2show_right, kitti_classes[j], \\\n cls_dets_right.cpu().numpy(), vis_thresh) \n\n # read intrinsic\n f = calib.p2[0,0]\n cx, cy = calib.p2[0,2], calib.p2[1,2]\n bl = (calib.p2[0,3] - calib.p3[0,3])/f\n\n boxes_all = cls_dets_left.new(0,5)\n kpts_all = cls_dets_left.new(0,5)\n poses_all = cls_dets_left.new(0,8)\n\n solve_tic = time.time()\n for detect_idx in range(cls_dets_left.size()[0]):\n if cls_dets_left[detect_idx, -1] > eval_thresh:\n box_left = cls_dets_left[detect_idx,0:4].cpu().numpy() # based on origin image\n box_right = cls_dets_right[detect_idx,0:4].cpu().numpy() \n kpts_u = cls_kpts[detect_idx,0]\n dim = cls_dim_orien[detect_idx,0:3].cpu().numpy()\n sin_alpha = cls_dim_orien[detect_idx,3]\n cos_alpha = cls_dim_orien[detect_idx,4]\n alpha = m.atan2(sin_alpha, cos_alpha)\n status, state = box_estimator.solve_x_y_z_theta_from_kpt(im2show_left.shape, calib, alpha, \\\n dim, box_left, box_right, cls_kpts[detect_idx].cpu().numpy())\n if status > 0: # not faild\n poses = im_left_data.data.new(8).zero_()\n xyz = np.array([state[0], state[1], state[2]])\n theta = state[3]\n poses[0], poses[1], poses[2], poses[3], poses[4], poses[5], poses[6], poses[7] = \\\n xyz[0], xyz[1], xyz[2], float(dim[0]), float(dim[1]), float(dim[2]), theta, alpha\n\n boxes_all = torch.cat((boxes_all,cls_dets_left[detect_idx,0:5].unsqueeze(0)),0)\n kpts_all = torch.cat((kpts_all,cls_kpts[detect_idx].unsqueeze(0)),0)\n poses_all = torch.cat((poses_all,poses.unsqueeze(0)),0)\n \n if boxes_all.dim() > 0:\n # solve disparity by dense alignment (enlarged image)\n succ, dis_final = dense_align.align_parallel(calib, im_info.data[0,2], \\\n im_left_data.data, im_right_data.data, \\\n boxes_all[:,0:4], kpts_all, poses_all[:,0:7])\n \n # do 3D rectify using the aligned disparity\n for solved_idx in range(succ.size(0)):\n if succ[solved_idx] > 0: # succ\n box_left = boxes_all[solved_idx,0:4].cpu().numpy()\n score = boxes_all[solved_idx,4].cpu().numpy()\n dim = poses_all[solved_idx,3:6].cpu().numpy()\n state_rect, z = box_estimator.solve_x_y_theta_from_kpt(im2show_left.shape, calib, \\\n poses_all[solved_idx,7].cpu().numpy(), dim, box_left, \\\n dis_final[solved_idx].cpu().numpy(), kpts_all[solved_idx].cpu().numpy())\n xyz = np.array([state_rect[0], state_rect[1], z])\n theta = state_rect[2]\n\n if score > vis_thresh:\n im_box = vis_utils.vis_box_in_bev(im_box, xyz, dim, theta, width=im2show_left.shape[0]*2)\n im2show_left = vis_utils.vis_single_box_in_img(im2show_left, calib, xyz, dim, theta)\n\n solve_time = time.time() - solve_tic\n\n sys.stdout.write('demo mode (Press Esc to exit!) \\r'\\\n .format(detect_time, solve_time))\n\n im2show = np.concatenate((im2show_left, im2show_right), axis=0)\n im2show = np.concatenate((im2show, im_box), axis=1)\n # cv2.imshow('result', im2show)\n cv2.imwrite('demo/result.png', im2show)\n\n k = cv2.waitKey(-1)\n if k == 27: # Esc key to stop\n print('exit!')\n sys.exit()\n\n\n\n\n\n" ]
[ [ "torch.FloatTensor", "torch.nonzero", "torch.load", "torch.no_grad", "numpy.random.seed", "numpy.asarray", "torch.from_numpy", "numpy.min", "torch.max", "numpy.array", "numpy.concatenate", "torch.LongTensor", "torch.cat", "torch.sort" ] ]
fshart/aesara
[ "1ddf96a7b8e8503fb8773b09c3ca77483fd884c4" ]
[ "aesara/misc/safe_asarray.py" ]
[ "\"\"\"\nHelper function to safely convert an array to a new data type.\n\"\"\"\n\n\nimport numpy as np\n\nfrom aesara.configdefaults import config\n\n\n__docformat__ = \"restructuredtext en\"\n\n\ndef _asarray(a, dtype, order=None):\n \"\"\"Convert the input to a Numpy array.\n\n This function is almost identical to ``numpy.asarray``, but it should be\n used instead of its numpy counterpart when a data type is provided in\n order to perform type conversion if required.\n The reason is that ``numpy.asarray`` may not actually update the array's\n data type to the user-provided type. For more information see ticket\n http://projects.scipy.org/numpy/ticket/870.\n\n In that case, we check that both dtype have the same string\n description (byte order, basic type, and number of bytes), and\n return a view with the desired dtype.\n\n This function's name starts with a '_' to indicate that it is meant to be\n used internally. It is imported so as to be available directly through\n _asarray\n \"\"\"\n if str(dtype) == \"floatX\":\n dtype = config.floatX\n dtype = np.dtype(dtype) # Convert into dtype object.\n rval = np.asarray(a, dtype=dtype, order=order)\n # Note that dtype comparison must be done by comparing their `num`\n # attribute. One cannot assume that two identical data types are pointers\n # towards the same object (e.g. under Windows this appears not to be the\n # case).\n if rval.dtype.num != dtype.num:\n # Type mismatch between the data type we asked for, and the one\n # returned by numpy.asarray.\n # If both types have the same string description (byte order, basic\n # type, and number of bytes), then it is safe to return a view.\n if dtype.str == rval.dtype.str:\n # Silent fix.\n return rval.view(dtype=dtype)\n else:\n # Unexpected mismatch: better know what is going on!\n raise TypeError(\n \"numpy.array did not return the data type we \"\n f\"asked for ({dtype} {dtype.str} #{dtype.num}), instead it returned type \"\n f\"{rval.dtype} {rval.str} #{rval.dtype.num}: function \"\n \"_asarray may need to be modified to handle this \"\n \"data type.\"\n )\n else:\n return rval\n" ]
[ [ "numpy.dtype", "numpy.asarray" ] ]
synicalsyntax/eeg-notebooks
[ "1edcaf24d55eabe076f6ba98645fcc09b3459613" ]
[ "notebooks/stimulus_presentation/spatial_gratings.py" ]
[ "\"\"\"\nGenerate spatial gratings\n=========================\n\nStimulus presentation based on gratings of different spatial frequencies\nfor generating ERPs, high frequency oscillations, and alpha reset.\n\nInspired from:\n\n> Hermes, Dora, K. J. Miller, B. A. Wandell, and Jonathan Winawer. \"Stimulus\ndependence of gamma oscillations in human visual cortex.\" Cerebral Cortex 25,\nno. 9 (2015): 2951-2959.\n\n\"\"\"\n\nfrom time import time\nfrom optparse import OptionParser\n\nimport numpy as np\nimport pandas as pd\nfrom psychopy import visual, core, event\nfrom pylsl import StreamInfo, StreamOutlet\n\n\ndef present(duration=120):\n\n # Create markers stream outlet\n info = StreamInfo('Markers', 'Markers', 3, 0, 'float32', 'myuidw43536')\n channels = info.desc().append_child(\"channels\")\n\n for c in ['Frequency', 'Contrast', 'Orientation']:\n channels.append_child(\"channel\") \\\n .append_child_value(\"label\", c)\n\n outlet = StreamOutlet(info)\n\n start = time()\n\n # Set up trial parameters\n n_trials = 2010\n iti = 1.0\n soa = 1.5\n jitter = 0.5\n record_duration = np.float32(duration)\n\n # Setup trial list\n frequency = np.random.binomial(1, 0.5, n_trials)\n contrast = np.ones(n_trials, dtype=int)\n orientation = np.random.randint(0, 4, n_trials) * 45\n\n trials = pd.DataFrame(dict(frequency=frequency,\n contrast=contrast,\n orientation=orientation))\n\n # graphics\n mywin = visual.Window([1920, 1080], monitor=\"testMonitor\", units=\"deg\",\n fullscr=True)\n grating = visual.GratingStim(win=mywin, mask='circle', size=40, sf=4)\n fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,\n rgb=[1, 0, 0])\n\n rs = np.random.RandomState(42)\n\n core.wait(2)\n\n for ii, trial in trials.iterrows():\n\n # onset\n fre = trials['frequency'].iloc[ii]\n contrast = trials['contrast'].iloc[ii]\n ori = trials['orientation'].iloc[ii]\n grating.sf = 4 * fre + 0.1\n grating.ori = ori\n grating.contrast = contrast\n grating.draw()\n fixation.draw()\n\n # Send marker\n outlet.push_sample([fre + 1, contrast, ori], time())\n mywin.flip()\n\n # offset\n core.wait(soa)\n fixation.draw()\n outlet.push_sample([fre + 3, contrast, ori], time())\n mywin.flip()\n\n if len(event.getKeys()) > 0 or (time() - start) > record_duration:\n break\n event.clearEvents()\n\n # Intertrial interval\n core.wait(iti + np.random.rand() * jitter)\n\n # Cleanup\n mywin.close()\n\n\ndef main():\n parser = OptionParser()\n\n parser.add_option(\"-d\", \"--duration\",\n dest=\"duration\", type='int', default=120,\n help=\"duration of the recording in seconds.\")\n\n (options, args) = parser.parse_args()\n present(options.duration)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.binomial", "numpy.ones", "numpy.float32", "numpy.random.RandomState", "numpy.random.rand", "numpy.random.randint" ] ]
flaport/gdsfactory
[ "1f2e844c1fe27b9c6340e2d51500fd3358fa16e5" ]
[ "pp/components/waveguide_heater.py" ]
[ "from typing import Callable, Dict, List, Tuple\n\nimport numpy as np\n\nimport pp\nfrom pp.cell import cell\nfrom pp.component import Component\nfrom pp.components.electrical.tlm import tlm\nfrom pp.components.extension import line\nfrom pp.components.hline import hline\nfrom pp.components.waveguide import waveguide\nfrom pp.layers import LAYER\nfrom pp.port import Port, deco_rename_ports\n\n\n@cell\ndef heater(\n length: float = 10.0,\n width: float = 0.5,\n layers_heater: List[Tuple[int, int]] = [LAYER.HEATER],\n) -> Component:\n \"\"\" straight heater\n \"\"\"\n c = pp.Component()\n for layer in layers_heater:\n _ref = c.add_ref(hline(length=length, width=width, layer=layer))\n c.ports = _ref.ports # Use ports from latest layer as heater ports\n c.absorb(_ref)\n return c\n\n\ndef add_trenches(\n c: Component,\n sstw: float = 2.0,\n trench_width: float = 0.5,\n trench_keep_out: float = 2.0,\n trenches: List[Dict[str, int]] = [\n {\"nb_segments\": 2, \"lane\": 1, \"x_start_offset\": 0},\n {\"nb_segments\": 2, \"lane\": -1, \"x_start_offset\": 0},\n ],\n layer_trench: Tuple[int, int] = LAYER.DEEPTRENCH,\n) -> Component:\n \"\"\"\n Add trenches to a waveguide-heater-like component\n \"\"\"\n\n heater_width = c.settings[\"heater_width\"]\n heater_spacing = c.settings[\"heater_spacing\"]\n width = c.settings[\"width\"]\n length = c.settings[\"length\"]\n\n a = heater_spacing + (width + heater_width) / 2\n\n # Add trenches\n if trench_width and trench_width > 0:\n tko = trench_keep_out\n\n for trench in trenches:\n lane = trench[\"lane\"]\n td = tko + a + (trench_width + heater_width) / 2\n y = np.sign(lane) * (td + (abs(lane) - 1) * (trench_width + tko))\n x_start_offset = trench[\"x_start_offset\"]\n\n if \"segments\" not in trench:\n nb_segments = trench[\"nb_segments\"]\n trench_length = (length - (nb_segments - 1) * sstw) / nb_segments\n segments = [trench_length] * nb_segments\n else:\n segments = trench[\"segments\"]\n x = x_start_offset\n for i, trench_length in enumerate(segments):\n trench = hline(\n length=trench_length, width=trench_width, layer=layer_trench\n )\n _trench = trench.ref(\n port_id=\"W0\", position=c.ports[\"W0\"].position + (x, y)\n )\n c.add(_trench)\n c.absorb(_trench)\n x += trench_length + sstw\n\n return c\n\n\n@cell\ndef waveguide_heater(\n length: float = 10.0,\n width: float = 0.5,\n heater_width: float = 0.5,\n heater_spacing: float = 1.2,\n sstw: float = 2.0,\n trench_width: float = 0.5,\n trench_keep_out: float = 2.0,\n trenches: List[Dict[str, int]] = [\n {\"nb_segments\": 2, \"lane\": 1, \"x_start_offset\": 0},\n {\"nb_segments\": 2, \"lane\": -1, \"x_start_offset\": 0},\n ],\n layers_heater: List[Tuple[int, int]] = [LAYER.HEATER],\n waveguide_factory: Callable = waveguide,\n layer_trench: Tuple[int, int] = LAYER.DEEPTRENCH,\n) -> Component:\n \"\"\" waveguide with heater\n\n .. code::\n\n TTTTTTTTTTTTT TTTTTTTTTTTTT <-- trench\n\n HHHHHHHHHHHHHHHHHHHHHHHHHHHHHH <-- heater\n\n ------------------------------ <-- waveguide\n\n HHHHHHHHHHHHHHHHHHHHHHHHHHHHHH <-- heater\n\n TTTTTTTTTTTTT TTTTTTTTTTTTT <-- trench\n\n .. plot::\n :include-source:\n\n import pp\n\n c = pp.c.waveguide_heater()\n pp.plotgds(c)\n\n \"\"\"\n c = Component()\n\n _heater = heater(length=length, width=heater_width, layers_heater=layers_heater)\n\n y_heater = heater_spacing + (width + heater_width) / 2\n heater_top = c << _heater\n heater_bot = c << _heater\n\n heater_top.movey(+y_heater)\n heater_bot.movey(-y_heater)\n\n wg = c << waveguide_factory(length=length, width=width)\n\n for i in [heater_top, heater_bot, wg]:\n c.absorb(i)\n\n # Add wg ports\n for p in wg.ports.values():\n c.add_port(name=p.name, port=p)\n\n # Add heater ports\n for p in heater_top.ports.values():\n c.add_port(name=\"HT\" + p.name, port=p)\n\n for p in heater_bot.ports.values():\n c.add_port(name=\"HB\" + p.name, port=p)\n\n c.settings[\"width\"] = width\n c.settings[\"heater_width\"] = heater_width\n c.settings[\"heater_spacing\"] = heater_spacing\n c.settings[\"length\"] = length\n add_trenches(\n c, sstw, trench_width, trench_keep_out, trenches, layer_trench=layer_trench\n )\n\n return c\n\n\n@cell\ndef wg_heater_connector(\n heater_ports: List[Port],\n metal_width: float = 10.0,\n tlm_layers: List[Tuple[int, int]] = [\n LAYER.VIA1,\n LAYER.M1,\n LAYER.VIA2,\n LAYER.M2,\n LAYER.VIA3,\n LAYER.M3,\n ],\n) -> Component:\n \"\"\"\n Connects together a pair of wg heaters and connect to a M3 port\n \"\"\"\n\n cmp = Component()\n assert len(heater_ports) == 2\n assert (\n heater_ports[0].orientation == heater_ports[1].orientation\n ), \"both ports should be facing in the same direction\"\n angle = heater_ports[0].orientation\n angle = angle % 360\n assert angle in [0, 180], \"angle should be 0 or 180, got {}\".format(angle)\n\n dx = 0.0\n dy = 0.0\n\n angle_to_dps = {0: [(-dx, -dy), (-dx, dy)], 180: [(dx, -dy), (dx, dy)]}\n ports = heater_ports\n hw = heater_ports[0].width\n\n if angle in [0, 180]:\n ports.sort(key=lambda p: p.y)\n else:\n ports.sort(key=lambda p: p.x)\n\n _heater_to_metal = tlm(width=0.5, height=0.5, layers=tlm_layers, vias=[])\n\n tlm_positions = []\n for port, dp in zip(ports, angle_to_dps[angle]):\n # Extend heater\n p = port.midpoint\n\n # Add via/metal transitions\n tlm_pos = p + dp\n hm = _heater_to_metal.ref(position=tlm_pos)\n tlm_positions += [tlm_pos]\n cmp.add(hm)\n\n ss = 1 if angle == 0 else -1\n\n # Connect both sides with top metal\n edge_metal_piece_width = 7.0\n x = ss * edge_metal_piece_width / 2\n top_metal_layer = tlm_layers[-1]\n cmp.add_polygon(\n line(\n tlm_positions[0] + (x, -hw / 2),\n tlm_positions[1] + (x, hw / 2),\n edge_metal_piece_width,\n ),\n layer=top_metal_layer,\n )\n\n # Add metal port\n cmp.add_port(\n name=\"0\",\n midpoint=0.5 * sum(tlm_positions) + (ss * edge_metal_piece_width / 2, 0),\n orientation=angle,\n width=metal_width,\n layer=top_metal_layer,\n port_type=\"dc\",\n )\n\n return cmp\n\n\n@deco_rename_ports\n@cell\ndef wg_heater_connected(\n waveguide_heater: Callable = waveguide_heater,\n wg_heater_connector: Callable = wg_heater_connector,\n tlm_layers: List[Tuple[int, int]] = [\n LAYER.VIA1,\n LAYER.M1,\n LAYER.VIA2,\n LAYER.M2,\n LAYER.VIA3,\n LAYER.M3,\n ],\n **kwargs\n) -> Component:\n \"\"\"\n .. plot::\n :include-source:\n\n import pp\n\n c = pp.c.wg_heater_connected()\n pp.plotgds(c)\n\n \"\"\"\n wg_heater = waveguide_heater(**kwargs)\n # print(wg_heater.ports.keys())\n conn1 = wg_heater_connector(\n heater_ports=[wg_heater.ports[\"HBE0\"], wg_heater.ports[\"HTE0\"]],\n tlm_layers=tlm_layers,\n )\n\n conn2 = wg_heater_connector(\n heater_ports=[wg_heater.ports[\"HBW0\"], wg_heater.ports[\"HTW0\"]],\n tlm_layers=tlm_layers,\n )\n\n cmp = Component()\n for c in [wg_heater, conn1, conn2]:\n _c = cmp.add_ref(c)\n cmp.absorb(_c)\n\n for port_name, p in wg_heater.ports.items():\n cmp.add_port(name=port_name, port=p)\n\n cmp.add_port(name=1, port=conn1.ports[\"0\"])\n cmp.add_port(name=2, port=conn2.ports[\"0\"])\n\n return cmp\n\n\ndef _demo_waveguide_heater():\n c = waveguide_heater(width=0.5)\n pp.write_gds(c)\n\n\nif __name__ == \"__main__\":\n # print(c.get_optical_ports())\n\n c = waveguide_heater()\n # c = wg_heater_connector(heater_ports=[c.ports[\"HBW0\"], c.ports[\"W0\"]])\n # c = wg_heater_connected(length=100.0, width=0.5)\n print(c.ports.keys())\n for p in c.ports.values():\n print(p.name, p.port_type)\n\n pp.show(c)\n" ]
[ [ "numpy.sign" ] ]
LeBenchmark/Interspeech2021
[ "1b368c6461a9a56a4337f9ee86888e286a55f2f9" ]
[ "SLU/slu_models.py" ]
[ "# coding: utf8\n\nimport os\nimport sys\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n#sys.path.append( os.environ['RNNTAGGERPATH'] )\nfrom fairseq.globals import *\n#import utils_classes as Cl\n\n# ---------- Decoders from LD-RNN tool ----------\n# This part is from my first system coded from scratch with pytorch, so be comprehensive if you see bullshits reading it :-)\n\nclass SimpleDecoder(nn.Module):\n\n def __init__(self, nn_params, input_size, direction):\n\n super(SimpleDecoder, self).__init__()\n\n # TMP FOR DEBUG\n self.debug_flag = False\n\n self.attention_heads = nn_params.attention_heads\n self.start_tag_idx = nn_params.start_tag_idx\n self.end_tag_idx = nn_params.end_tag_idx\n self.batch_size = nn_params.batch_size\n self.vocab_size = nn_params.word_vocab_size\n self.char_vocab_size = nn_params.char_vocab_size\n self.tagset_size = nn_params.tag_vocab_size\n self.hidden_dim = 2*nn_params.hidden_dim\n self.label_embed_dim = nn_params.label_embed_dim # NEW\n self.char_embed_dim = nn_params.char_embed_dim\n self.char_hidden_dim = nn_params.char_hidden_dim\n self.label_context_size = nn_params.label_context_size\n self.lex_hidden_layers = nn_params.lex_hidden_layers\n self.lab_hidden_layers = nn_params.lab_hidden_layers\n \n # TMP FOR DEBUG\n #self.word_dict = nn_params.word_dict\n #self.label_dict = nn_params.label_dict\n #self.ix_to_sublabel = nn_params.ix_to_sublabel\n \n print(' - SimpleDecoder init:')\n print(' - start_tag_idx: {}'.format(self.start_tag_idx))\n print(' - end_tag_idx: {}'.format(self.end_tag_idx))\n print(' - batch_size: {}'.format(self.batch_size))\n print(' - vocab_size: {}'.format(self.vocab_size))\n print(' - char_vocab_size: {}'.format(self.char_vocab_size))\n print(' - tagset_size: {}'.format(self.tagset_size))\n print(' - hidden_dim: {}'.format(self.hidden_dim))\n print(' - label_embed_dim: {}'.format(self.label_embed_dim))\n print(' - char_embed_dim: {}'.format(self.char_embed_dim))\n print(' - char_hidden_dim: {}'.format(self.char_hidden_dim))\n print(' - label_context_size: {}'.format(self.label_context_size))\n print(' - lex_hidden_layers: {}'.format(self.lex_hidden_layers))\n print(' - lab_hidden_layers: {}'.format(self.lab_hidden_layers))\n print(' ----------')\n \n self.n_subparts = nn_params.n_subparts\n self.sl_batch_size = 1\n if self.n_subparts > 0:\n self.tag_to_subparts = nn_params.tag_to_subparts\n self.num_directions = 1\n self.CUDA = nn_params.CUDA\n self.TEST = 0\n self.TeachingSignal = True\n self.dtype = nn_params.dtype\n self.ltype = nn_params.ltype\n self.direction = direction\n self.output_length_factor = nn_params.output_length_factor\n if self.direction == 0 or self.direction == 1:\n self.output_length_factor = 1.0\n print(' *** SimpleDecoder, output-length-factor: {}'.format(self.output_length_factor))\n sys.stdout.flush()\n \n self.bw_label_embeddings = nn.Embedding(self.tagset_size, nn_params.label_embed_dim, sparse=False)\n self.emb_dropout_p = nn_params.embed_dropout # NEW\n self.embed_dropout = nn.Dropout(p=nn_params.embed_dropout)\n\n self.attention_size = input_size # TMP\n attention_size = input_size\n #self.hidden_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim + nn_params.attention_heads * attention_size\n #if self.n_subparts > 0:\n # self.hidden_dim = self.hidden_dim + nn_params.sublabel_hidden_dim\n self.input_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim\n #if self.n_subparts > 0:\n # self.input_dim = self.input_dim + nn_params.sublabel_hidden_dim\n \n self.BWInputNorm = nn.LayerNorm(self.input_dim)\n self.HiddenSizeMap = nn.Linear(self.input_dim, self.hidden_dim)\n if self.attention_heads > 0:\n print(' *** SimpleDecoder: using gated attention context')\n sys.stdout.flush()\n self.h_lin = nn.Linear(input_size, input_size)\n self.a_lin = nn.Linear(attention_size, attention_size)\n self.LexAttention = ga.GlobalAttention([self.hidden_dim, attention_size, attention_size], attention_size)\n #self.LexAttention = MultiHeadAttention(attention_size, self.hidden_dim, attention_size, nn_params.attention_heads, nn_params.attention_type, self.dtype) # NEW\n #self.SemAttention = AttentionModule(self.hidden_dim, self.hidden_dim, self.hidden_dim, nn_params.attention_heads, nn_params.attention_type, self.dtype) # NEW\n self.SLM = SubLabelModule(nn_params, input_size)\n \n self.RNNInputNorm = nn.LayerNorm(self.hidden_dim)\n #self.bw_RNN = nn.GRU(self.hidden_dim, self.hidden_dim, bidirectional=False)\n self.bw_RNN = ContextualFeatureEncoder(self.hidden_dim, self.hidden_dim, self.batch_size, 1, False, nn_params.dtype, nn_params.contextual_encoder_type)\n #self.bw_RNN.flatten_parameters()\n \n self.MLPInputNorm = nn.LayerNorm(self.hidden_dim)\n self.BWOutputNorm = nn.LayerNorm(self.tagset_size)\n self.output_mlp = ReLU_MLP( [2,self.hidden_dim, self.hidden_dim] )\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n self.bw_hidden2tag = nn.Linear(output_dim, self.tagset_size)\n self.hid_dropout_p = nn_params.hidden_dropout # NEW\n self.hidden_dropout = nn.Dropout(p=nn_params.hidden_dropout)\n \n #self.dir_hidden = self.bw_RNN.get_hidden_state()\n \n def init_hidden(self):\n #self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n self.bw_RNN.init_hidden()\n self.SLM.init_hidden()\n \n def resize_embeddings(self, nn_params):\n\n if nn_params.tag_vocab_size > self.tagset_size:\n old_embeddings = self.bw_label_embeddings\n self.bw_label_embeddings = nn.Embedding(nn_params.tag_vocab_size, nn_params.label_embed_dim, sparse=False)\n self.bw_label_embeddings.weight[:self.tagset_size,:] = old_embeddings.weight\n \n old_lin = self.bw_hidden2tag\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n self.bw_hidden2tag = nn.Linear(output_dim, nn_params.tag_vocab_size)\n self.bw_hidden2tag.weight[:self.tagset_size,:] = old_lin.weight\n \n old_norm = self.BWOutputNorm\n self.BWOutputNorm = nn.LayerNorm(nn_params.tag_vocab_size)\n self.BWOutputNorm.weight[:self.tagset_size] = old_norm.weight\n \n self.tagset_size = nn_params.tag_vocab_size\n\n\n def train_forward(self, input, bw_streams):\n \n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n bw_label_streams = bw_streams[0]\n next_sublabels = bw_streams[1]\n \n indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = bw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n source_idxs = [int( i / self.output_length_factor ) for i in indeces]\n target_idxs = [int( i * gold_to_hyp_length_factor ) for i in indeces]\n \n # NEW: TEST IT!!!\n bin_size = 1\n if self.output_length_factor < 1.0:\n bin_size = int(1 / self.output_length_factor) + 1\n\n input_tsr = torch.cat( input, 2 )[source_idxs,:,:]\n local_input = [input_tsr]\n local_input.append( self.embed_dropout( self.bw_label_embeddings(bw_label_streams[0][target_idxs,:]) ) )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n print('')\n print(' ************************************************')\n print(' * SimpleDecoder.train_forward -')\n print('')\n print(' *** indeces ({}): {}'.format(len(indeces), list(indeces)))\n print(' *** source_idxs ({}): {}'.format(len(source_idxs), source_idxs))\n print(' *** target_idxs ({}): {}'.format(len(target_idxs), target_idxs))\n print('*')\n print(' * Size of input: {}'.format( torch.cat(input, 2).size() ))\n print(' * Size of local_input: {}'.format( torch.cat(local_input, 2).size() ))\n print(' * Size of bw_label_streams: {}'.format(bw_label_streams[0].size()))\n print(' *')\n print(' * SimpleDecoder.train_forward, backward sublabels and labels:')\n for tgt_idx in target_idxs:\n # print(' {}'.format([self.ix_to_sublabel[sl.item()] for sl in next_sublabels[tgt_idx,:,0]]))\n print(' -----')\n print('@{}, {}'.format(tgt_idx, self.label_dict.index2token(bw_label_streams[0][tgt_idx,0])))\n print('')\n print(' * SimpleDecoder.train_forward, len of local_input: {}'.format(len(local_input)))\n for debug_idx in range(len(local_input)):\n print(' * {}'.format(local_input[debug_idx].size()))\n print(' ---')\n #print(' * SimpleDecoder.train_forward, size of next_sublabels: {}'.format(next_sublabels.size()))\n print(' * SimpleDecoder.train_forward, size of bw_label_streams[0]: {}'.format(bw_label_streams[0].size()))\n print('')\n # END TMP FOR DEBUG\n\n bw_sublabels_rep = []\n if self.n_subparts > 0:\n bw_sublabels_rep = self.SLM( input_tsr, next_sublabels[target_idxs,:,:], 1 )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n #print(' * SimpleDecoder.train_forward, size of bw_sublabels_rep: {}'.format(bw_sublabels_rep[0].size()))\n print(' ***********************************************************')\n sys.stdout.flush()\n\n #local_input = local_input + bw_sublabels_rep\n bw_total_input = self.BWInputNorm( torch.cat( local_input, 2 ) )\n \n #self.bw_RNN.flatten_parameters()\n #idxs = range(bw_total_input.size(0),-1,-1)\n rnn_input = self.RNNInputNorm( self.HiddenSizeMap( bw_total_input ) )\n bw_hidden_state, self.dir_hidden = self.bw_RNN( rnn_input )\n \n bw_mlp_input = rnn_input + self.hidden_dropout( bw_hidden_state )\n deep_reps = self.output_mlp( self.MLPInputNorm( bw_mlp_input ) )\n \n #bw_final_input = [bw_mlp_input + self.hidden_dropout( deep_reps )] + bw_sublabels_rep\n bw_final_input = torch.cat( [bw_mlp_input + self.hidden_dropout(deep_reps)] + bw_sublabels_rep, -1 )\n bw_scores = F.log_softmax( self.BWOutputNorm( self.bw_hidden2tag( bw_final_input ) ), dim=2 )\n \n return (bw_hidden_state, bw_scores)\n\n # NOTE: we assume \"input\" is a list of all inputs given to this layer, \"bw_label_stream\" is the stream of backward labels, so that accessing the i-th position of bw_label_stream when predicting label at position i, gives the label on the right of the current position.\n def fast_forward(self, input, bw_streams):\n \n vflag = (self.TEST == 1)\n if self.TeachingSignal and (not vflag):\n return self.train_forward(input, bw_streams)\n else:\n return self.test_forward(input, bw_streams)\n\n def test_forward(self, input, bw_streams):\n \n # NOTE: we assume the first element of input is the lexical-level representation computed by the encoder, that is its hidden state.\n #lex_rnn_out = input[0]\n vflag = (self.TEST == 1)\n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n bw_label_streams = bw_streams[0]\n\n #print(' - SimpleDecoder.forward, input size: {}'.format(input[0].size()))\n #sys.stdout.flush()\n\n indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = bw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n embedding_mask = dropout_mask_dims( [1, batch_size, self.label_embed_dim], self.emb_dropout_p, self.dtype)\n hidden_layer_mask = dropout_mask_dims( [batch_size, self.hidden_dim], self.hid_dropout_p, self.dtype)\n if vflag:\n embedding_mask = torch.ones( [1, batch_size, self.label_embed_dim] ).type(self.dtype)\n hidden_layer_mask = torch.ones( [batch_size, self.hidden_dim] ).type(self.dtype)\n \n hidden_state = torch.zeros(sequence_length, batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n scores = torch.zeros(sequence_length, batch_size, self.tagset_size).type(self.dtype) #VARIABLE\n start_idx = 0\n if self.direction == 1 or self.direction == 3:\n start_idx = -1\n next_labels = bw_label_streams[0][start_idx,:]\n prev_input = torch.cat( input, 2 )\n next_sublabels = bw_streams[1] #VARIABLE\n \n # NEW: TEST IT!!!\n bin_size = 1\n if self.output_length_factor < 1.0:\n bin_size = int(1 / self.output_length_factor) + 1\n\n for i in indeces:\n source_idx = int( i / self.output_length_factor )\n bin_bound = min(source_length,source_idx+bin_size) # NEW: TEXT IT!!!\n target_idx = int( i * gold_to_hyp_length_factor )\n\n if self.TeachingSignal and (not vflag):\n next_labels = bw_label_streams[0][target_idx,:]\n if self.n_subparts > 0:\n next_sublabels = bw_streams[1][target_idx,:,:] #VARIABLE #GRAPHCHECKPOINT\n\n curr_lex_input = torch.sum( prev_input[source_idx:bin_bound,:,:], 0 ) # SOURCE INDEXING ## This is ~different in 'train_forward'\n #curr_lex_input = prev_input[source_idx,:,:] # TMP, SOURCE INDEXING ...\n bw_sublabels_rep = self.SLM( curr_lex_input, next_sublabels, 0 )\n bw_total_input_lst = [curr_lex_input.view(1, batch_size, -1)] # SOURCE INDEXING # NEW: TEST IT!!!\n bw_total_input_lst.append( self.embed_dropout( self.bw_label_embeddings( next_labels ).view(1, batch_size, -1) ) )\n \n if self.attention_heads > 0:\n #print(' xxx SimpleDecoder, applying attention: {}, {}'.format(hidden_state[i,:,:].size(), prev_input.size()))\n #sys.stdout.flush()\n c, alphas = self.LexAttention( hidden_state[i,:,:].clone().detach().view(batch_size, 1, -1), prev_input.transpose(0,1).contiguous().detach() )\n #bw_total_input_lst.append( c )\n # We gate-mix the original input and the attention vector\n g_lambda = F.sigmoid( self.h_lin( bw_total_input_lst[0] ) + self.a_lin(c) )\n bw_total_input_lst[0] = g_lambda * bw_total_input_lst[0] + (1.0 - g_lambda) * c\n\n bw_total_input = self.BWInputNorm( torch.cat( bw_total_input_lst, 2 ) )\n rnn_input = self.RNNInputNorm( self.hidden_dropout( self.HiddenSizeMap( bw_total_input ) ) ) # NEW: hidden_dropout !\n _, dec_hidden_state = self.bw_RNN( rnn_input )\n #hidden_state[i,:,:] = dec_hidden_state[0,:,:]\n\n bw_mlp_input = self.MLPInputNorm( rnn_input[0] + self.hidden_dropout( dec_hidden_state[0,:,:] ) )\n deep_reps = self.output_mlp( bw_mlp_input )\n \n final_dec_state = bw_mlp_input + self.hidden_dropout( deep_reps )\n hidden_state[i,:,:] = final_dec_state\n\n bw_final_input = torch.cat( [final_dec_state] + bw_sublabels_rep, -1 )\n scores[i,:,:] = F.log_softmax( self.BWOutputNorm( self.bw_hidden2tag( bw_final_input ) ), dim=1 )\n\n (max_scores, max_indeces) = torch.max(scores[i,:,:], 1)\n max_indeces = max_indeces.squeeze()\n \n if vflag:\n next_labels = max_indeces\n next_labels = next_labels.view(self.batch_size)\n max_indeces = max_indeces.unsqueeze(0)\n\n if self.n_subparts > 0:\n next_sublabels = torch.LongTensor(self.tag_to_subparts[max_indeces].transpose(0,1)).type(self.ltype) #VARIABLE #GRAPHCHECKPOINT\n\n return (hidden_state, scores)\n\n def forward(self, input, bw_streams):\n\n return self.test_forward(input, bw_streams)\n\n def set_batch_size(self, val):\n self.batch_size = val\n if self.n_subparts > 0:\n self.sl_batch_size = val\n self.bw_RNN.set_batch_size( val )\n self.SLM.set_batch_size(val)\n\n def set_test_mode(self, val):\n self.TEST = val\n self.bw_RNN.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n\n self.TeachingSignal = val\n\n\nclass BidirectionalDecoder(nn.Module):\n\n def __init__(self, nn_params, input_size, direction):\n \n super(BidirectionalDecoder, self).__init__()\n \n # TMP FOR DEBUG\n self.debug_flag = False\n \n self.attention_heads = nn_params.attention_heads\n self.start_tag_idx = nn_params.start_tag_idx\n self.end_tag_idx = nn_params.end_tag_idx\n self.batch_size = nn_params.batch_size\n self.vocab_size = nn_params.word_vocab_size\n self.char_vocab_size = nn_params.char_vocab_size\n self.tagset_size = nn_params.tag_vocab_size\n self.hidden_dim = 2*nn_params.hidden_dim\n self.label_embed_dim = nn_params.label_embed_dim # NEW\n self.char_embed_dim = nn_params.char_embed_dim\n self.char_hidden_dim = nn_params.char_hidden_dim\n self.label_context_size = nn_params.label_context_size\n self.lex_hidden_layers = nn_params.lex_hidden_layers\n self.lab_hidden_layers = nn_params.lab_hidden_layers\n self.n_subparts = nn_params.n_subparts\n self.sl_batch_size = 1\n if self.n_subparts > 0:\n self.tag_to_subparts = nn_params.tag_to_subparts\n self.num_directions = 1\n self.CUDA = nn_params.CUDA\n self.TEST = 0\n self.TeachingSignal = True\n self.dtype = nn_params.dtype\n self.ltype = nn_params.ltype\n self.direction = direction\n self.output_length_factor = nn_params.output_length_factor\n if self.direction == 0 or self.direction == 1:\n self.output_length_factor = 1.0\n \n # TMP FOR DEBUG\n #self.word_dict = nn_params.word_dict\n #self.label_dict = nn_params.label_dict\n #self.ix_to_sublabel = nn_params.ix_to_sublabel\n\n self.fw_label_embeddings = nn.Embedding(self.tagset_size, nn_params.label_embed_dim, sparse=False)\n self.emb_dropout_p = nn_params.embed_dropout # NEW\n self.embed_dropout = nn.Dropout(p=nn_params.embed_dropout)\n \n attention_size = input_size\n sem_attention_size = self.hidden_dim\n self.input_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim\n \n self.FWInputNorm = nn.LayerNorm( self.input_dim )\n self.HiddenSizeMap = nn.Linear(self.input_dim, self.hidden_dim)\n if self.attention_heads > 0:\n self.h_lin = nn.Linear(attention_size, attention_size)\n self.a_lin = nn.Linear(attention_size, attention_size)\n self.LexAttention = ga.GlobalAttention([self.hidden_dim, attention_size, attention_size], attention_size)\n self.SemAttention = ga.GlobalAttention([self.hidden_dim, self.hidden_dim, self.hidden_dim], sem_attention_size)\n self.SLM = SubLabelModule(nn_params, input_size)\n\n self.RNNInputNorm = nn.LayerNorm( self.hidden_dim )\n self.fw_RNN = ContextualFeatureEncoder(self.hidden_dim, self.hidden_dim, self.batch_size, 1, False, nn_params.dtype, nn_params.contextual_encoder_type)\n self.hid_dropout_p = nn_params.hidden_dropout # NEW\n self.hidden_dropout = nn.Dropout(p=nn_params.hidden_dropout)\n \n self.MLPInputNorm = nn.LayerNorm( self.hidden_dim )\n self.FWOutputNorm = nn.LayerNorm( self.tagset_size )\n self.output_mlp = ReLU_MLP( [2,self.hidden_dim, self.hidden_dim] )\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n output_dim = output_dim + nn_params.attention_heads * sem_attention_size\n self.hidden2tag = nn.Linear(output_dim, self.tagset_size)\n\n #self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n \n def init_hidden(self):\n #self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n self.fw_RNN.init_hidden()\n self.SLM.init_hidden()\n \n def resize_embeddings(self, nn_params):\n\n if nn_params.tag_vocab_size > self.tagset_size:\n old_embeddings = self.fw_label_embeddings\n self.fw_label_embeddings = nn.Embedding(nn_params.tag_vocab_size, nn_params.label_embed_dim, sparse=False)\n self.fw_label_embeddings.weight[:self.tagset_size,:] = old_embeddings.weight\n \n old_lin = self.hidden2tag\n output_dim = self.hidden_dim\n if self.n_subparts > 0:\n output_dim = output_dim + nn_params.sublabel_hidden_dim\n self.hidden2tag = nn.Linear(output_dim, nn_params.tag_vocab_size)\n self.hidden2tag.weight[:self.tagset_size,:] = old_lin.weight\n \n old_norm = self.FWOutputNorm\n self.FWOutputNorm = nn.LayerNorm(nn_params.tag_vocab_size)\n self.FWOutputNorm.weight[:self.tagset_size] = old_norm.weight\n \n self.tagset_size = nn_params.tag_vocab_size\n \n def train_forward(self, input, fw_streams, bw_states):\n\n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n fw_label_streams = fw_streams[0]\n prev_sublabels = fw_streams[1]\n \n indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = fw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n source_idxs = [int( i / self.output_length_factor ) for i in indeces]\n target_idxs = [int( i * gold_to_hyp_length_factor ) for i in indeces]\n\n input_tsr = torch.cat( input, 2 )[source_idxs,:,:]\n local_input = [input_tsr]\n local_input.append( self.embed_dropout( self.fw_label_embeddings(fw_label_streams[0][target_idxs,:]) ) )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n print('')\n print(' ************************************************')\n print(' * BidirectionalDecoder.train_forward -')\n print('')\n print(' *** indeces ({}): {}'.format(len(indeces), list(indeces)))\n print(' *** source_idxs ({}): {}'.format(len(source_idxs), source_idxs))\n print(' *** target_idxs ({}): {}'.format(len(target_idxs), target_idxs))\n print('*')\n print(' * Size of input: {}'.format( torch.cat(input, 2).size() ))\n print(' * Size of local_input: {}'.format( torch.cat(local_input, 2).size() ))\n print(' * Size of bw_label_streams: {}'.format(fw_label_streams[0].size()))\n print(' *')\n print(' * BidirectionalDecoder.train_forward, forward sublabels and labels:')\n for tgt_idx in target_idxs:\n # print(' {}'.format([self.ix_to_sublabel[sl.item()] for sl in prev_sublabels[tgt_idx,:,0]]))\n print(' -----')\n print('@{}, {}'.format(tgt_idx, self.label_dict.index2token(fw_label_streams[0][tgt_idx,0])))\n print('')\n print(' * BidirectionalDecoder.train_forward, len of local_input: {}'.format(len(local_input)))\n for debug_idx in range(len(local_input)):\n print(' * {}'.format(local_input[debug_idx].size()))\n print(' ---')\n #print(' * BidirectionalDecoder.train_forward, size of prev_sublabels: {}'.format(prev_sublabels.size()))\n print(' * BidirectionalDecoder.train_forward, size of fw_label_streams[0]: {}'.format(fw_label_streams[0].size()))\n #print(' ***********************************************************')\n #print('')\n # END TMP FOR DEBUG\n\n fw_sublabels_rep = []\n if self.n_subparts > 0:\n fw_sublabels_rep = self.SLM( input_tsr, prev_sublabels[target_idxs,:,:], 1 )\n\n # TMP FOR DEBUG\n if self.debug_flag:\n #print(' * BidirectionalDecoder.train_forward, size of fw_sublabels_rep: {}'.format(fw_sublabels_rep[0].size()))\n print(' ***********************************************************')\n sys.stdout.flush()\n\n #local_input = local_input + fw_sublabels_rep\n fw_total_input = self.FWInputNorm( torch.cat( local_input, 2 ) )\n \n rnn_input = self.RNNInputNorm( self.HiddenSizeMap( fw_total_input ) )\n fw_hidden_state, self.dir_hidden = self.fw_RNN( rnn_input )\n \n fw_mlp_input = rnn_input + self.hidden_dropout( fw_hidden_state )\n deep_reps = self.output_mlp( self.MLPInputNorm( fw_mlp_input ) )\n \n fw_final_input = torch.cat( [fw_mlp_input + self.hidden_dropout( deep_reps + bw_states[0][indeces] )] + fw_sublabels_rep, -1 )\n fw_scores = F.log_softmax( self.FWOutputNorm( self.hidden2tag( fw_final_input ) ), dim=2 )\n \n return (fw_hidden_state, fw_scores)\n \n # NOTE: we assume \"bw_states\" contains backward hidden states and backward predictions, this and only this information, and in this order.\n # OBSOLETE: remove it !\n def fast_forward(self, input, fw_streams, bw_states):\n \n vflag = (self.TEST == 1)\n if self.TeachingSignal and (not vflag):\n #print(' * BidirectionalDecoder.train_forward...')\n #sys.stdout.flush()\n return self.train_forward(input, fw_streams, bw_states)\n else:\n #print(' * BidirectionalDecoder.test_forward...')\n #sys.stdout.flush()\n return self.test_forward(input, fw_streams, bw_states)\n\n\n # NOTE: we assume \"bw_states\" contains backward hidden states and backward predictions, this and only this information, and in this order.\n def test_forward(self, input, fw_streams, bw_states):\n \n # NOTE: we assume the first element of input is the lexical-level representation computed by the encoder, that is its hidden state.\n vflag = (self.TEST == 1)\n dims = input[0].size()\n sequence_length = dims[0]\n batch_size = self.batch_size\n fw_label_streams = fw_streams[0]\n \n target_length = bw_states[0].size(0)\n indeces = decoding_indeces_(self.direction, target_length, 1.0) # We use the length of the output sequence predicted by a previous simple-decoder\n source_length = sequence_length\n sequence_length = len(indeces)\n gold_sequence_length = fw_label_streams[0].size(0)\n gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)\n \n embedding_mask = dropout_mask_dims( [1, batch_size, self.label_embed_dim], self.emb_dropout_p, self.dtype)\n hidden_layer_mask = dropout_mask_dims( [batch_size, self.hidden_dim], self.hid_dropout_p, self.dtype)\n if vflag:\n embedding_mask = torch.ones( [1, batch_size, self.label_embed_dim] ).type(self.dtype)\n hidden_layer_mask = torch.ones( [batch_size, self.hidden_dim] ).type(self.dtype)\n\n fw_hidden_state = torch.zeros(sequence_length, batch_size, self.hidden_dim).type(self.dtype) #VARIABLE\n fw_scores = torch.zeros(sequence_length, batch_size, self.tagset_size).type(self.dtype) #VARIABLE\n start_idx = 0\n if self.direction == 1 or self.direction == 3:\n start_idx = -1\n prev_labels = fw_label_streams[0][start_idx,:]\n prev_input = torch.cat( input, 2 )\n prev_sublabels = fw_streams[1] #VARIABLE\n\n # NEW: TEST IT!!!\n bin_size = 1\n if self.output_length_factor < 1.0:\n bin_size = int(1 / self.output_length_factor) + 1\n\n self.fw_RNN.set_hidden_state( bw_states[0][0,:,:].view(1, batch_size, -1))\n for i in indeces:\n source_idx = int( i / self.output_length_factor )\n bin_bound = min(source_length,source_idx+bin_size) # NEW: TEXT IT!!!\n target_idx = int( i * gold_to_hyp_length_factor )\n \n if self.TeachingSignal and (not vflag):\n prev_labels = fw_label_streams[0][target_idx,:]\n if self.n_subparts > 0:\n prev_sublabels = fw_streams[1][target_idx,:,:] #VARIABLE #GRAPHCHECKPOINT\n\n curr_lex_input = torch.sum(prev_input[source_idx:bin_size,:,:],0) ## This is ~different in 'train_forward'\n #curr_lex_input = prev_input[source_idx,:,:]\n fw_sublabels_rep = self.SLM( curr_lex_input, prev_sublabels, 0 )\n fw_total_input_lst = [curr_lex_input.view(1, batch_size, -1)] # SOURCE INDEXING # NEW: TEST IT!!!\n fw_total_input_lst.append( self.embed_dropout( self.fw_label_embeddings( prev_labels ).view(1, batch_size, -1) ) )\n \n if self.attention_heads > 0:\n c, alphas = self.LexAttention( fw_hidden_state[i,:,:].clone().view(batch_size, 1, -1), prev_input.transpose(0, 1).contiguous() )\n #fw_total_input_lst.append( c )\n g_lambda = F.sigmoid( self.h_lin( fw_total_input_lst[0] ) + self.a_lin(c) )\n fw_total_input_lst[0] = g_lambda * fw_total_input_lst[0] + (1.0 - g_lambda) * c\n \n fw_total_input = self.FWInputNorm( torch.cat( fw_total_input_lst, 2 ) )\n rnn_input = self.RNNInputNorm( self.hidden_dropout( self.HiddenSizeMap( fw_total_input ) ) )\n _, dec_hidden_state = self.fw_RNN( rnn_input )\n #fw_hidden_state[i,:,:] = dec_hidden_state[0,:,:]\n\n #mlp_input = fw_total_input[0] + hidden_layer_mask*( dec_hidden_state[0,:,:] )\n mlp_input = self.MLPInputNorm( rnn_input[0] + self.hidden_dropout( dec_hidden_state[0,:,:] ) )\n deep_reps = self.output_mlp( mlp_input )\n \n dec_final_state = mlp_input + self.hidden_dropout(deep_reps)\n fw_hidden_state[i,:,:] = dec_final_state\n atts = []\n if self.attention_heads > 0:\n sem_c, sem_alphas = self.SemAttention(dec_final_state.clone().view(batch_size, 1, -1), bw_states[0].transpose(0, 1).contiguous())\n atts = [sem_c.view(batch_size, -1)]\n \n #fw_final_input = torch.cat( [mlp_input + self.hidden_dropout(deep_reps) + bw_states[0][i,:,:]] + fw_sublabels_rep + atts, -1 )\n fw_final_input = torch.cat( [dec_final_state + bw_states[0][i,:,:]] + fw_sublabels_rep + atts, -1 )\n \n #fw_scores[i,:,:] = F.log_softmax( self.hidden2tag( fw_final_input + torch.sum( hidden_layer_mask*( torch.stack(fw_sem_atts) ) )), dim=1 )\n fw_scores[i,:,:] = F.log_softmax( self.FWOutputNorm( self.hidden2tag( fw_final_input ) ), dim=1 )\n\n (max_scores, max_indeces) = torch.max(fw_scores[i,:,:], 1)\n max_indeces = max_indeces.squeeze()\n \n if vflag:\n prev_labels = max_indeces\n prev_labels = prev_labels.view(self.batch_size)\n max_indeces = max_indeces.unsqueeze(0)\n\n if self.n_subparts > 0:\n prev_sublabels = torch.LongTensor(self.tag_to_subparts[max_indeces].transpose(0,1)).type(self.ltype) #VARIABLE #GRAPHCHECKPOINT\n\n return (fw_hidden_state, fw_scores)\n\n def forward(self, input, fw_streams, bw_states):\n\n # TMP FOR DEBUG\n #self.train_forward(input, fw_streams, bw_states)\n \n return self.test_forward(input, fw_streams, bw_states)\n\n def set_batch_size(self, val):\n self.batch_size = val\n if self.n_subparts > 0:\n self.sl_batch_size = val\n self.fw_RNN.set_batch_size( val )\n self.SLM.set_batch_size(val)\n\n def set_test_mode(self, val):\n self.TEST = val\n self.fw_RNN.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n\n self.TeachingSignal = val\n\n# ---------- Models for Speech decoding ----------\n\nclass Conv1dNormWrapper(nn.Module):\n '''\n class Conv1dNormWrapper\n \n Wrap a Conv1d class to be used in a nn.Sequential module, adding a layer normalization module.\n '''\n\n def __init__(self, input_size, output_size, kernel, stride_factor):\n\n super(Conv1dNormWrapper,self).__init__()\n\n self.conv = nn.Conv1d(input_size, output_size, kernel, stride=stride_factor)\n self.cNorm = nn.LayerNorm( output_size )\n\n def forward(self, input):\n\n return self.cNorm( self.conv( input ).permute(2,0,1) ).permute(1,2,0)\n\nclass LSTMWrapper(nn.Module):\n '''\n LSTMWrapper\n \n Wrap a LSTM layer to be used in a nn.Sequential module.\n '''\n\n def __init__(self, input_size, output_size, bidirFlag):\n\n super(LSTMWrapper,self).__init__()\n self.lstm = nn.LSTM(input_size, output_size, bidirectional=bidirFlag)\n\n def forward(self, input):\n\n output, _ = self.lstm( input )\n return output\n\nclass BasicEncoder(nn.Module):\n \n def __init__(self, params):\n \n super(BasicEncoder,self).__init__()\n #self.window_size = params.window_size\n\n # Parameter initialization\n # 1. Size of convolution layer\n self.input_size = params.num_features\n self.input_conv = self.input_size\n self.speech_conv_size = params.speech_conv_size\n \n # 2. Size of LSTM layer\n self.input_size_lstm = self.speech_conv_size\n self.hidden_size = params.speech_lstm_size\n \n # 3. Size of the output, that is of the linear layer\n self.output_size = params.output_size\n \n self.num_conv = params.speech_conv\n self.num_lstm_layers = params.num_lstm_layers\n self.conv_kernel = params.conv_kernel\n self.conv_kernel_width = params.conv_kernel_width\n self.conv_kernel_height = params.conv_kernel_height\n self.conv2d_dim = params.small_dim\n self.kernel_2d_hw_ratio = params.kernel_2d_hw_ratio\n self.stride_factor1 = params.conv_stride1\n self.stride_factor2 = params.conv_stride2\n\n # Layer initialization\n # 1. Convolutions\n conv_layers = []\n for i in range(self.num_conv):\n conv_stride = 1\n if i == self.num_conv-1:\n conv_stride = 2\n input_size = self.speech_conv_size\n if i == 0:\n input_size = self.input_conv\n conv_layers.append( ('Conv'+str(i+1), Conv1dNormWrapper(input_size, self.speech_conv_size, self.conv_kernel, conv_stride)) )\n conv_layers.append( ('Dropout'+str(i+1), nn.Dropout(p=params.drop_ratio)) )\n #conv_layers.append( ('ConvNorm'+str(i+1), nn.BatchNorm1d( self.speech_conv_size )) )\n self.convolutions = nn.Sequential( OrderedDict(conv_layers) )\n \n '''#self.conv1 = nn.Conv2d(self.input_conv,self.speech_conv_size, (self.conv_kernel_width, self.conv_kernel_height), stride=(self.stride_factor1, self.stride_factor1))\n self.conv1 = nn.Conv1d(self.input_conv,self.speech_conv_size, self.conv_kernel, stride=self.stride_factor1)\n #self.conv2 = nn.Conv1d(self.speech_conv_size,self.speech_conv_size,self.conv_kernel,stride=self.stride_factor2)'''\n #self.CONV_norm = nn.LayerNorm( self.speech_conv_size )\n \n # 2. Recurrent layers\n recurrent_layers = []\n for i in range(self.num_lstm_layers):\n input_size = 2*self.hidden_size\n if i == 0:\n input_size = self.input_size_lstm\n recurrent_layers.append( ('LSTM'+str(i+1), LSTMWrapper(input_size, self.hidden_size, True)) )\n recurrent_layers.append( ('ConvNorm'+str(i+1), nn.LayerNorm( 2*self.hidden_size )) )\n recurrent_layers.append( ('Dropout'+str(i+1), nn.Dropout(p=params.drop_ratio)) )\n self.rnns = nn.Sequential( OrderedDict(recurrent_layers) )\n\n #self.h_dropout = nn.Dropout(p=params.drop_ratio)\n #self.LSTM_norm = nn.LayerNorm(self.hidden_size*2)\n #self.rnns = nn.LSTM(self.input_size_lstm,self.hidden_size,num_layers = self.num_lstm_layers,bidirectional=True)\n \n #Linear Layer\n self.linear_layer = nn.Linear(2*self.hidden_size, self.output_size)\n \n #small_dim = int( math.sqrt(seq_len / hw_ratio) + 0.5 )\n #x_pad = torch.randn(num_features, batch_size, small_dim * hw_ratio * small_dim - seq_len)\n #x_padded = torch.cat( [x, x_pad], 2 )\n #x_conv = x_padded.view(num_features, batch_size, hw_ratio*small_dim, small_dim)\n\n '''\n print(' *** Initializing BasicEncoder:')\n print(' * Input size: {}'.format(params.num_features))\n print(' * Output size: {}'.format(params.output_size))\n print(' * Convolution size: {}'.format(params.speech_conv_size))\n print(' * Hidden size: {}'.format(params.speech_lstm_size))\n print(' -')\n print(' * Stride factor 1: {}'.format(params.conv_stride1))\n print(' * Stride factor 2: {}'.format(params.conv_stride2))\n print(' * Num. LSTM layers: {}'.format(params.num_lstm_layers))\n print(' ***')\n '''\n\n\n def forward(self, x):\n # Input has shape (sequence_length, batch_size, num. of channels), that is (L, N, C), convolution needs it to be (N, C, L)\n \n # 1. For Conv2d\n #(L, N, C) = x.size()\n #small_dim = int( math.sqrt(float(L) / float(self.kernel_2d_hw_ratio)) )\n #out = self.conv1( x.permute(1, 2, 0).view(N, C, small_dim * self.kernel_2d_hw_ratio, small_dim) )\n #out = self.h_dropout( out.view(N, self.speech_conv_size, -1).permute(2,0,1) )\n # ---------------------\n \n '''# 2. For Conv1d\n out = self.conv1( x.permute(1, 2, 0) )\n out = self.h_dropout( out.permute(2,0,1) )\n # ---------------------\n \n #out = self.conv2(x)\n\n output, _ = self.rnns( self.conv_output_norm( out ) )\n output = self.h_dropout(output)\n \n output = self.linear_layer( self.LSTM_norm(output) )\n #output = self.log_softmax(output)'''\n \n # New forward code with generic layer structures\n out = self.convolutions( x.permute(1, 2, 0) )\n #out = self.rnns( self.CONV_norm( out.permute(2,0,1) ) )\n #output = self.linear_layer( self.LSTM_norm( out ) )\n out = self.rnns( out.permute(2, 0, 1) )\n output = self.linear_layer( out )\n \n return (output, output, out)\n\n\nclass BasicSpeechEncoder(nn.Module):\n\n def __init__(self, params, nn_params):\n\n super(BasicSpeechEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n\n self.encoder = BasicEncoder(params)\n self.log_softmax = nn.LogSoftmax(dim = 2)\n\n def get_fw_parameters(self):\n return self.parameters()\n\n def get_bw_parameters(self):\n return self.get_fw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n\n (representations, reps, hidden_states) = self.encoder( x )\n scores = self.log_softmax( representations )\n\n return (scores, scores, hidden_states) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n #return (scores, representations)\n\n def set_test_mode(self, val):\n \n return\n\n def set_teaching_signal_flag(self, val):\n\n return\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n\n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n #SpkID = torch.ones_like(input)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n #SpkID[:,i,:] = SpkID[:,i,:] * self.speaker_val[i] * 0.002\n #return torch.cat( [padder, input + SpkID, padder], 0 )\n return torch.cat( [padder, input, padder], 0 )\n\nclass BasicSpeechSeqEncoder(nn.Module):\n \n def __init__(self, params, nn_params):\n \n super(BasicSpeechSeqEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n self.encoder = BasicEncoder(params)\n self.seq_encoder = SimpleDecoder(nn_params, 2*params.speech_lstm_size, 0)\n\n def get_fw_parameters(self):\n \n return self.parameters()\n\n def get_bw_parameters(self):\n\n return self.get_fw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n \n (sequence_length, batch_size, num_features) = x.size()\n self.seq_encoder.set_batch_size( batch_size )\n \n (representations, reps, hidden_states) = self.encoder(x)\n\n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0]))\n fw_streams = (prev_labels, prev_sublabels)\n \n self.seq_encoder.init_hidden()\n (fw_hidden_state, fw_scores) = self.seq_encoder([hidden_states], fw_streams) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n\n return (fw_scores, fw_scores, fw_hidden_state)\n \n def set_test_mode(self, val):\n \n self.seq_encoder.set_test_mode( val )\n \n def set_teaching_signal_flag(self, val):\n \n self.seq_encoder.set_teaching_signal_flag( val )\n \n def load_encoder(self, bsencoder):\n \n self.encoder.load_state_dict( bsencoder.encoder.state_dict() )\n \n def set_speaker_val(self, val):\n \n self.speaker_val = val\n \n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.cuda.FloatTensor(1, batch_size, num_features)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\nclass BasicSpeechBiseqEncoder(nn.Module):\n \n def __init__(self, params, nn_params):\n \n super(BasicSpeechBiseqEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n self.encoder = BasicEncoder(params)\n #self.seq_encoder = SimpleDecoder(nn_params, params.output_size, 0)\n #self.seq_encoder = SimpleDecoder(nn_params, params.output_size, 2) # NEW: TEST IT!!!\n self.bw_seq_encoder = SimpleDecoder(nn_params, 2*params.speech_lstm_size, 1) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n #self.log_softmax = nn.LogSoftmax(dim = 2)\n self.fw_seq_encoder = BidirectionalDecoder(nn_params, 2*params.speech_lstm_size, 0)\n\n def get_fw_parameters(self):\n\n return list(filter(lambda p: p.requires_grad, self.encoder.parameters())) + list(filter(lambda p: p.requires_grad, self.fw_seq_encoder.parameters()))\n\n def get_bw_parameters(self):\n\n return list(filter(lambda p: p.requires_grad, self.encoder.parameters())) + list(filter(lambda p: p.requires_grad, self.bw_seq_encoder.parameters()))\n \n def forward(self, x, next_labels, prev_labels):\n \n (sequence_length, batch_size, num_features) = x.size()\n self.fw_seq_encoder.set_batch_size( batch_size )\n self.bw_seq_encoder.set_batch_size( batch_size )\n \n (representations, reps, hidden_states) = self.encoder(x)\n\n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0]))\n fw_streams = (prev_labels, prev_sublabels)\n bw_streams = (next_labels, next_sublabels)\n \n self.bw_seq_encoder.init_hidden()\n self.fw_seq_encoder.init_hidden()\n #(fw_hidden_state, fw_scores) = self.seq_encoder([representations], fw_streams)\n (bw_hidden_state, bw_scores) = self.bw_seq_encoder([hidden_states], bw_streams) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE\n (fw_hidden_state, fw_scores) = self.fw_seq_encoder([hidden_states], fw_streams, [bw_hidden_state, bw_scores])\n global_scores = 0.5 * (fw_scores + bw_scores)\n\n return (fw_scores, bw_scores, fw_hidden_state)\n\n def set_test_mode(self, val):\n\n self.bw_seq_encoder.set_test_mode( val )\n self.fw_seq_encoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n\n self.bw_seq_encoder.set_teaching_signal_flag( val )\n self.fw_seq_encoder.set_teaching_signal_flag( val )\n\n def load_encoder(self, bsencoder):\n\n self.encoder.load_state_dict( bsencoder.encoder.state_dict() )\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n #self.speaker_val = val\n #(sequence_length, batch_size, num_features) = input.size()\n #padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n #SpkID = torch.ones_like(input)\n #for i in range( batch_size ):\n # padder[:,i,:] = self.speaker_val[i]\n # SpkID[:,i,:] = SpkID[:,i,:] * self.speaker_val[i] * 0.002\n #return torch.cat( [padder, input + SpkID, padder], 0 )\n\nclass MLSpeechEncoder(nn.Module):\n\n def __init__(self, ch_params, tk_params, nn_params):\n\n super(MLSpeechEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n\n self.char_encoder = BasicSpeechEncoder(ch_params, nn_params)\n self.token_encoder = BasicSpeechEncoder(tk_params, nn_params)\n\n def get_fw_parameters(self):\n\n return self.parameters()\n\n def get_bw_parameters(self):\n\n return self.get_fw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n\n (ch_scores, ch_sc, ch_reps) = self.char_encoder(x, next_labels, prev_labels)\n (tk_scores, tk_sc, tk_reps) = self.token_encoder(ch_reps, next_labels, prev_labels)\n\n return (tk_scores, tk_scores, tk_reps)\n\n def load_char_encoder(self, char_encoder):\n\n self.char_encoder.encoder.load_state_dict( char_encoder.encoder.state_dict() )\n #for param in self.char_encoder.encoder.parameters():\n # param.requires_grad = False\n \n def freeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = False\n\n def unfreeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = True\n\n def load_token_encoder(self, token_encoder):\n\n self.token_encoder.encoder.rnns.load_state_dict( token_encoder.encoder.rnns.state_dict() )\n\n def set_test_mode(self, val):\n \n return\n\n def set_teaching_signal_flag(self, val):\n \n return\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\nclass MLSpeechSeqEncoder(nn.Module):\n \n def __init__(self, ch_params, tk_params, nn_params):\n \n super(MLSpeechSeqEncoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n self.char_encoder = BasicSpeechEncoder(ch_params, nn_params)\n self.token_encoder = BasicSpeechSeqEncoder(tk_params, nn_params)\n\n def get_fw_parameters(self):\n\n return self.char_encoder.get_fw_parameters() + self.token_encoder.get_fw_parameters()\n\n def get_bw_parameters(self):\n\n return self.char_encoder.get_bw_parameters() + self.token_encoder.get_bw_parameters()\n\n def forward(self, x, next_labels, prev_labels):\n \n (ch_scores, ch_sc, ch_reps) = self.char_encoder(x, next_labels, prev_labels)\n (fw_tk_scores, bw_tk_scores, tk_reps) = self.token_encoder(ch_reps, next_labels, prev_labels)\n \n return (fw_tk_scores, bw_tk_scores, tk_reps)\n \n def load_char_encoder(self, char_encoder):\n \n self.char_encoder.encoder.load_state_dict( char_encoder.encoder.state_dict() )\n #for param in self.char_encoder.encoder.parameters():\n # param.requires_grad = False\n \n def freeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = False\n\n def unfreeze_char_encoder(self):\n \n for param in self.char_encoder.parameters():\n param.requires_grad = True\n\n def load_token_encoder(self, token_encoder):\n \n self.token_encoder.encoder.rnns.load_state_dict( token_encoder.encoder.rnns.state_dict() )\n self.token_encoder.bw_seq_encoder.load_state_dict( token_encoder.bw_seq_encoder.state_dict() )\n self.token_encoder.fw_seq_encoder.load_state_dict( token_encoder.fw_seq_encoder.state_dict() )\n\n def load_ml_encoder(self, ml_encoder):\n \n self.char_encoder.load_state_dict( ml_encoder.char_encoder.state_dict() )\n #print(' -- MLSpeechSeqEncoder: freezing char-encoder parameters...')\n #for param in self.char_encoder.parameters():\n # param.requires_grad = False\n self.token_encoder.encoder.load_state_dict( ml_encoder.token_encoder.encoder.state_dict() )\n #print(' -- MLSpeechSeqEncoder: freezing token-encoder (encoder only) parameters...')\n #sys.stdout.flush()\n #for param in self.token_encoder.encoder.parameters():\n # param.requires_grad = False\n \n def load_ml_seq_decoder(self, ml_encoder):\n \n self.char_encoder.load_state_dict( ml_encoder.char_encoder.state_dict() )\n self.token_encoder.load_state_dict( ml_encoder.token_encoder.state_dict() )\n\n def set_test_mode(self, val):\n \n self.token_encoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n \n self.token_encoder.set_teaching_signal_flag( val )\n\n def set_speaker_val(self, val):\n\n self.speaker_val = val\n\n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n# ---------- Models for End-to-end SLU ----------\n\nclass SLUSimpleDecoder(nn.Module):\n\n def __init__(self, ch_params, tk_params, nn_params):\n \n super(SLUSimpleDecoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n\n tmp = nn_params.tag_vocab_size\n nn_params.tag_vocab_size = nn_params.sd_tag_vocab_size\n decoder_output_size = 0\n if nn_params.train_char_decoder or nn_params.load_char_decoder:\n print(' -- SLUSimpleDecoder: using character speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(ch_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_token_decoder or nn_params.load_token_decoder:\n print(' -- SLUSimpleDecoder: using token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_ml_decoder or nn_params.load_ml_decoder:\n print(' -- SLUSimpleDecoder: using 2-stage token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = MLSpeechSeqEncoder(ch_params, tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n\n nn_params.tag_vocab_size = tmp\n nn_params.label_embed_dim = 2 * nn_params.label_embed_dim\n nn_params.hidden_dim = 2 * nn_params.hidden_dim\n self.slu_decoder = SimpleDecoder(nn_params, decoder_output_size, 0)\n\n def get_fw_parameters(self):\n\n return self.speech_decoder.get_fw_parameters() + list(filter(lambda p: p.requires_grad, self.slu_decoder.parameters()))\n\n def get_bw_parameters(self):\n\n return self.speech_decoder.get_bw_parameters() + list(filter(lambda p: p.requires_grad, self.slu_decoder.parameters()))\n\n def forward(self, input, bw_label_streams, fw_label_streams):\n \n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0])) #VARIABLE x2\n fw_streams = (fw_label_streams, prev_sublabels)\n bw_streams = (bw_label_streams, next_sublabels)\n \n #(sequence_length, batch_size, num_features) = input.size()\n #padder = torch.cuda.FloatTensor(1, batch_size, num_features)\n #for i in range( batch_size ):\n # padder[:,i,:] = self.speaker_val[i]\n #padded_input = torch.cat( [padder, input, padder], 0 )\n\n self.slu_decoder.set_batch_size( batch_size )\n (fw_tk_scores, bw_tk_scores, tk_reps) = self.speech_decoder(input, bw_label_streams, fw_label_streams)\n self.slu_decoder.init_hidden()\n (sem_hidden_states, sem_scores) = self.slu_decoder([tk_reps], fw_streams)\n\n return (sem_scores, sem_scores, sem_hidden_states)\n\n def load_speech_encoder(self, speech_encoder):\n\n self.speech_decoder.load_state_dict( speech_encoder.state_dict() )\n if isinstance(speech_encoder, MLSpeechSeqEncoder):\n print(' -- SLUSimpleDecoder: freezing speech-encoder parameters...')\n sys.stdout.flush()\n for param in self.speech_decoder.char_encoder.parameters():\n param.requires_grad = False\n\n def set_speaker_val(self, val):\n \n self.speaker_val = val\n \n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n def set_test_mode(self, val):\n \n self.speech_decoder.set_test_mode( val )\n self.slu_decoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n \n self.speech_decoder.set_teaching_signal_flag( val )\n self.slu_decoder.set_teaching_signal_flag( val )\n\nclass SLUBiDecoder(nn.Module):\n \n def __init__(self, ch_params, tk_params, nn_params):\n \n super(SLUBiDecoder,self).__init__()\n \n self.speaker_val = [globals.user_speaker_val]\n \n tmp = nn_params.tag_vocab_size\n nn_params.tag_vocab_size = nn_params.sd_tag_vocab_size\n decoder_output_size = 0\n if nn_params.train_char_decoder or nn_params.load_char_decoder:\n print(' -- SLUBiDecoder: using character speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(ch_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_token_decoder or nn_params.load_token_decoder:\n print(' -- SLUBiDecoder: using token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = BasicSpeechSeqEncoder(tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n elif nn_params.train_ml_decoder or nn_params.load_ml_decoder:\n print(' -- SLUBiDecoder: using 2-stage token speech decoder')\n sys.stdout.flush()\n self.speech_decoder = MLSpeechSeqEncoder(ch_params, tk_params, nn_params)\n decoder_output_size = nn_params.hidden_dim\n\n nn_params.tag_vocab_size = tmp\n nn_params.label_embed_dim = 2 * nn_params.label_embed_dim\n nn_params.hidden_dim = 2 * nn_params.hidden_dim\n self.bw_slu_decoder = SimpleDecoder(nn_params, decoder_output_size, 1)\n self.fw_slu_decoder = BidirectionalDecoder(nn_params, decoder_output_size, 0)\n\n def forward(self, input, bw_label_streams, fw_label_streams):\n \n (prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0])) #VARIABLE x2\n fw_streams = (fw_label_streams, prev_sublabels)\n bw_streams = (bw_label_streams, next_sublabels) \n \n self.bw_slu_decoder.set_batch_size( batch_size )\n self.fw_slu_decoder.set_batch_size( batch_size )\n (fw_tk_scores, bw_tk_scores, tk_reps) = self.speech_decoder(input, bw_label_streams, fw_label_streams)\n self.bw_slu_decoder.init_hidden()\n self.fw_slu_decoder.init_hidden()\n (sem_bw_hidden_states, sem_bw_scores) = self.bw_slu_decoder([tk_reps], bw_streams)\n (sem_fw_hidden_states, sem_fw_scores) = self.fw_slu_decoder([tk_reps], fw_streams, [sem_bw_hidden_states, sem_bw_scores])\n global_scores = 0.5 * (sem_fw_scores + sem_bw_scores)\n\n return (global_scores, sem_bw_scores, sem_hidden_states)\n\n def load_speech_encoder(self, speech_encoder):\n \n self.speech_decoder.load_state_dict( speech_encoder.state_dict() )\n if isinstance(speech_encoder, MLSpeechSeqEncoder):\n print(' -- SLUBiDecoder: freezing speech-encoder parameters...')\n sys.stdout.flush()\n for param in self.speech_decoder.char_encoder.parameters():\n param.requires_grad = False\n\n def set_speaker_val(self, val):\n \n self.speaker_val = val\n \n def pad_input(self, input, val):\n \n self.speaker_val = val\n (sequence_length, batch_size, num_features) = input.size()\n padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)\n for i in range( batch_size ):\n padder[:,i,:] = self.speaker_val[i]\n return torch.cat( [padder, input, padder], 0 )\n\n def set_test_mode(self, val):\n \n self.speech_decoder.set_test_mode( val )\n self.slu_decoder.set_test_mode( val )\n\n def set_teaching_signal_flag(self, val):\n \n self.speech_decoder.set_teaching_signal_flag( val )\n self.slu_decoder.set_teaching_signal_flag( val )\n\n" ]
[ [ "torch.sum", "torch.FloatTensor", "torch.nn.LSTM", "torch.nn.Linear", "torch.ones", "torch.cuda.FloatTensor", "torch.nn.Embedding", "torch.nn.LogSoftmax", "torch.nn.Conv1d", "torch.nn.LayerNorm", "torch.max", "torch.zeros", "torch.LongTensor", "torch.cat", "torch.nn.Dropout" ] ]
hisiter97/Transformer_OCR_API
[ "78322ec2b9648d0b027326dced7c4aec967bcab3" ]
[ "vietocr/model/trainer.py" ]
[ "from vietocr.optim.optim import ScheduledOptim\nfrom vietocr.optim.labelsmoothingloss import LabelSmoothingLoss\nfrom torch.optim import Adam, SGD, AdamW\nfrom torch import nn\nfrom vietocr.tool.translate import build_model\nfrom vietocr.tool.translate import translate, batch_translate_beam_search\nfrom vietocr.tool.utils import download_weights\nfrom vietocr.tool.logger import Logger\nfrom vietocr.loader.aug import ImgAugTransform\n\nimport yaml\nimport torch\nfrom vietocr.loader.DataLoader import DataGen\nfrom vietocr.loader.dataloader import OCRDataset, ClusterRandomSampler, collate_fn\nfrom torch.utils.data import DataLoader\nfrom einops import rearrange\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, CyclicLR, OneCycleLR\n\nimport torchvision \n\nfrom vietocr.tool.utils import compute_accuracy\nfrom PIL import Image\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport time\n\nclass Trainer():\n def __init__(self, config, pretrained=True):\n\n self.config = config\n self.model, self.vocab = build_model(config)\n \n self.device = config['device']\n self.num_iters = config['trainer']['iters']\n self.beamsearch = config['predictor']['beamsearch']\n\n self.data_root = config['dataset']['data_root']\n self.train_annotation = config['dataset']['train_annotation']\n self.valid_annotation = config['dataset']['valid_annotation']\n self.dataset_name = config['dataset']['name']\n\n self.batch_size = config['trainer']['batch_size']\n self.print_every = config['trainer']['print_every']\n self.valid_every = config['trainer']['valid_every']\n\n self.checkpoint = config['trainer']['checkpoint']\n self.export_weights = config['trainer']['export']\n self.metrics = config['trainer']['metrics']\n logger = config['trainer']['log']\n \n if logger:\n self.logger = Logger(logger) \n\n if pretrained:\n weight_file = download_weights(**config['pretrain'], quiet=config['quiet'])\n self.load_weights(weight_file)\n\n self.iter = 0\n \n self.optimizer = AdamW(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09)\n self.scheduler = OneCycleLR(self.optimizer, total_steps=self.num_iters, **config['optimizer'])\n# self.optimizer = ScheduledOptim(\n# Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),\n# #config['transformer']['d_model'], \n# 512,\n# **config['optimizer'])\n\n self.criterion = LabelSmoothingLoss(len(self.vocab), padding_idx=self.vocab.pad, smoothing=0.1)\n \n transforms = ImgAugTransform()\n\n self.train_gen = self.data_gen('train_{}'.format(self.dataset_name), \n self.data_root, self.train_annotation, transform=transforms)\n if self.valid_annotation:\n self.valid_gen = self.data_gen('valid_{}'.format(self.dataset_name), \n self.data_root, self.valid_annotation)\n\n self.train_losses = []\n \n def train(self):\n total_loss = 0\n \n total_loader_time = 0\n total_gpu_time = 0\n best_acc = 0\n\n data_iter = iter(self.train_gen)\n for i in range(self.num_iters):\n self.iter += 1\n\n start = time.time()\n\n try:\n batch = next(data_iter)\n except StopIteration:\n data_iter = iter(self.train_gen)\n batch = next(data_iter)\n\n total_loader_time += time.time() - start\n\n start = time.time()\n loss = self.step(batch)\n total_gpu_time += time.time() - start\n\n total_loss += loss\n self.train_losses.append((self.iter, loss))\n\n if self.iter % self.print_every == 0:\n info = 'iter: {:06d} - train loss: {:.3f} - lr: {:.2e} - load time: {:.2f} - gpu time: {:.2f}'.format(self.iter, \n total_loss/self.print_every, self.optimizer.param_groups[0]['lr'], \n total_loader_time, total_gpu_time)\n\n total_loss = 0\n total_loader_time = 0\n total_gpu_time = 0\n print(info) \n self.logger.log(info)\n\n if self.valid_annotation and self.iter % self.valid_every == 0:\n val_loss = self.validate()\n acc_full_seq, acc_per_char = self.precision(self.metrics)\n\n info = 'iter: {:06d} - valid loss: {:.3f} - acc full seq: {:.4f} - acc per char: {:.4f}'.format(self.iter, val_loss, acc_full_seq, acc_per_char)\n print(info)\n self.logger.log(info)\n\n if acc_full_seq > best_acc:\n self.save_weights(self.export_weights)\n best_acc = acc_full_seq\n\n \n def validate(self):\n self.model.eval()\n\n total_loss = []\n \n with torch.no_grad():\n for step, batch in enumerate(self.valid_gen):\n batch = self.batch_to_device(batch)\n img, tgt_input, tgt_output, tgt_padding_mask = batch['img'], batch['tgt_input'], batch['tgt_output'], batch['tgt_padding_mask']\n\n outputs = self.model(img, tgt_input, tgt_padding_mask)\n# loss = self.criterion(rearrange(outputs, 'b t v -> (b t) v'), rearrange(tgt_output, 'b o -> (b o)'))\n \n outputs = outputs.flatten(0,1)\n tgt_output = tgt_output.flatten()\n loss = self.criterion(outputs, tgt_output)\n\n total_loss.append(loss.item())\n \n del outputs\n del loss\n\n total_loss = np.mean(total_loss)\n self.model.train()\n \n return total_loss\n \n def predict(self, sample=None):\n pred_sents = []\n actual_sents = []\n img_files = []\n\n for batch in self.valid_gen:\n batch = self.batch_to_device(batch)\n\n if self.beamsearch:\n translated_sentence = batch_translate_beam_search(batch['img'], self.model)\n else:\n translated_sentence = translate(batch['img'], self.model)\n\n pred_sent = self.vocab.batch_decode(translated_sentence.tolist())\n actual_sent = self.vocab.batch_decode(batch['tgt_output'].tolist())\n\n img_files.extend(batch['filenames'])\n\n pred_sents.extend(pred_sent)\n actual_sents.extend(actual_sent)\n \n if sample != None and len(pred_sents) > sample:\n break\n\n return pred_sents, actual_sents, img_files\n\n def precision(self, sample=None):\n\n pred_sents, actual_sents, _ = self.predict(sample=sample)\n\n acc_full_seq = compute_accuracy(actual_sents, pred_sents, mode='full_sequence')\n acc_per_char = compute_accuracy(actual_sents, pred_sents, mode='per_char')\n \n return acc_full_seq, acc_per_char\n \n def visualize_prediction(self, sample=16, errorcase=False, fontname='serif', fontsize=16):\n \n pred_sents, actual_sents, img_files = self.predict(sample)\n\n if errorcase:\n wrongs = []\n for i in range(len(img_files)):\n if pred_sents[i]!= actual_sents[i]:\n wrongs.append(i)\n\n pred_sents = [pred_sents[i] for i in wrongs]\n actual_sents = [actual_sents[i] for i in wrongs]\n img_files = [img_files[i] for i in wrongs]\n\n\n img_files = img_files[:sample]\n\n fontdict = {\n 'family':fontname,\n 'size':fontsize\n } \n\n for vis_idx in range(0, len(img_files)):\n img_path = img_files[vis_idx]\n pred_sent = pred_sents[vis_idx]\n actual_sent = actual_sents[vis_idx]\n\n img = Image.open(open(img_path, 'rb'))\n plt.figure()\n plt.imshow(img)\n plt.title('pred: {} - actual: {}'.format(pred_sent, actual_sent), loc='left', fontdict=fontdict)\n plt.axis('off')\n\n plt.show()\n \n def visualize_dataset(self, sample=16, fontname='serif'):\n n = 0\n for batch in self.train_gen:\n for i in range(self.batch_size):\n img = batch['img'][i].numpy().transpose(1,2,0)\n sent = self.vocab.decode(batch['tgt_input'].T[i].tolist())\n \n plt.figure()\n plt.title('sent: {}'.format(sent), loc='center', fontname=fontname)\n plt.imshow(img)\n plt.axis('off')\n \n n += 1\n if n >= sample:\n plt.show()\n return\n\n\n def load_checkpoint(self, filename):\n checkpoint = torch.load(filename)\n \n optim = ScheduledOptim(\n\t Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),\n \tself.config['transformer']['d_model'], **self.config['optimizer'])\n\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.load_state_dict(checkpoint['state_dict'])\n self.iter = checkpoint['iter']\n\n self.train_losses = checkpoint['train_losses']\n\n def save_checkpoint(self, filename):\n state = {'iter':self.iter, 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(), 'train_losses': self.train_losses}\n \n path, _ = os.path.split(filename)\n os.makedirs(path, exist_ok=True)\n\n torch.save(state, filename)\n\n def load_weights(self, filename):\n state_dict = torch.load(filename, map_location=torch.device(self.device))\n\n for name, param in self.model.named_parameters():\n if name not in state_dict:\n print('{} not found'.format(name))\n elif state_dict[name].shape != param.shape:\n print('{} missmatching shape, required {} but found {}'.format(name, param.shape, state_dict[name].shape))\n del state_dict[name]\n\n self.model.load_state_dict(state_dict, strict=False)\n\n def save_weights(self, filename):\n path, _ = os.path.split(filename)\n os.makedirs(path, exist_ok=True)\n \n torch.save(self.model.state_dict(), filename)\n\n def batch_to_device(self, batch):\n img = batch['img'].to(self.device, non_blocking=True)\n tgt_input = batch['tgt_input'].to(self.device, non_blocking=True)\n tgt_output = batch['tgt_output'].to(self.device, non_blocking=True)\n tgt_padding_mask = batch['tgt_padding_mask'].to(self.device, non_blocking=True)\n\n batch = {\n 'img': img, 'tgt_input':tgt_input, \n 'tgt_output':tgt_output, 'tgt_padding_mask':tgt_padding_mask, \n 'filenames': batch['filenames']\n }\n\n return batch\n\n def data_gen(self, lmdb_path, data_root, annotation, transform=None):\n dataset = OCRDataset(lmdb_path=lmdb_path, \n root_dir=data_root, annotation_path=annotation, \n vocab=self.vocab, transform=transform, \n image_height=self.config['dataset']['image_height'], \n image_min_width=self.config['dataset']['image_min_width'], \n image_max_width=self.config['dataset']['image_max_width'])\n\n sampler = ClusterRandomSampler(dataset, self.batch_size, True)\n gen = DataLoader(\n dataset,\n batch_size=self.batch_size, \n sampler=sampler,\n collate_fn = collate_fn,\n shuffle=False,\n drop_last=False,\n **self.config['dataloader'])\n \n return gen\n\n def data_gen_v1(self, lmdb_path, data_root, annotation):\n data_gen = DataGen(data_root, annotation, self.vocab, 'cpu', \n image_height = self.config['dataset']['image_height'], \n image_min_width = self.config['dataset']['image_min_width'],\n image_max_width = self.config['dataset']['image_max_width'])\n\n return data_gen\n\n def step(self, batch):\n self.model.train()\n\n batch = self.batch_to_device(batch)\n img, tgt_input, tgt_output, tgt_padding_mask = batch['img'], batch['tgt_input'], batch['tgt_output'], batch['tgt_padding_mask'] \n \n outputs = self.model(img, tgt_input, tgt_key_padding_mask=tgt_padding_mask)\n# loss = self.criterion(rearrange(outputs, 'b t v -> (b t) v'), rearrange(tgt_output, 'b o -> (b o)'))\n outputs = outputs.view(-1, outputs.size(2))#flatten(0, 1)\n tgt_output = tgt_output.view(-1)#flatten()\n \n loss = self.criterion(outputs, tgt_output)\n\n self.optimizer.zero_grad()\n\n loss.backward()\n \n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1) \n\n self.optimizer.step()\n self.scheduler.step()\n\n loss_item = loss.item()\n\n return loss_item\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "torch.optim.lr_scheduler.OneCycleLR", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "torch.save", "torch.no_grad", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "torch.device", "numpy.mean" ] ]
ad-daniel/opendr
[ "cc71138ae22ec39b186960ff98c74bc2cdca3623" ]
[ "tests/sources/tools/perception/object_detection_2d/gem/test_gem.py" ]
[ "# Copyright 2020-2022 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\nimport shutil\nimport os\nimport torch\nimport warnings\nfrom opendr.engine.datasets import ExternalDataset\nfrom opendr.perception.object_detection_2d import GemLearner\n\nfrom PIL import Image\n\nDEVICE = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\nprint(\"Using device:\", DEVICE)\nprint(\"Using device:\", DEVICE, file=sys.stderr)\n\n\ndef rmfile(path):\n try:\n os.remove(path)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\ndef rmdir(_dir):\n try:\n shutil.rmtree(_dir)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\nclass TestGemLearner(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print(\"\\n\\n*********************************\\nTEST Object Detection GEM Learner\\n\"\n \"*********************************\")\n cls.temp_dir = os.path.join(\"tests\", \"sources\", \"tools\",\n \"perception\", \"object_detection_2d\",\n \"gem\", \"gem_temp\")\n\n cls.model_backbone = \"resnet50\"\n\n cls.learner = GemLearner(iters=1,\n temp_path=cls.temp_dir,\n backbone=cls.model_backbone,\n num_classes=7,\n device=DEVICE,\n )\n\n cls.learner.download(mode='pretrained_gem')\n\n print(\"Model downloaded\", file=sys.stderr)\n\n cls.learner.download(mode='test_data_sample_dataset')\n\n cls.learner.download(mode='test_data_sample_images')\n\n print(\"Data downloaded\", file=sys.stderr)\n cls.dataset_location = os.path.join(cls.temp_dir,\n 'sample_dataset',\n )\n cls.m1_dataset = ExternalDataset(\n cls.dataset_location,\n \"coco\",\n )\n cls.m2_dataset = ExternalDataset(\n cls.dataset_location,\n \"coco\",\n )\n\n @classmethod\n def tearDownClass(cls):\n # Clean up downloaded files\n rmdir(os.path.join(cls.temp_dir, 'pretrained_models'))\n rmdir(os.path.join(cls.temp_dir, 'checkpoints'))\n rmdir(os.path.join(cls.temp_dir, 'facebookresearch_detr_master'))\n rmdir(os.path.join(cls.temp_dir, 'sample_dataset'))\n rmdir(os.path.join(cls.temp_dir, 'sample_images'))\n rmdir(os.path.join(cls.temp_dir, 'outputs'))\n rmdir(cls.temp_dir)\n\n def test_fit(self):\n # Test fit will issue resource warnings due to some files left open in pycoco tools,\n # as well as a deprecation warning due to a cast of a float to integer (hopefully they will be fixed in a future\n # version)\n warnings.simplefilter(\"ignore\", ResourceWarning)\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n self.learner.model = None\n self.learner.ort_session = None\n\n self.learner.download(mode='pretrained_gem')\n\n m = list(self.learner.model.parameters())[0].clone()\n\n self.learner.fit(\n m1_train_edataset=self.m1_dataset,\n m2_train_edataset=self.m2_dataset,\n annotations_folder='annotations',\n m1_train_annotations_file='RGB_26May2021_14h19m_coco.json',\n m2_train_annotations_file='Thermal_26May2021_14h19m_coco.json',\n m1_train_images_folder='train/m1',\n m2_train_images_folder='train/m2',\n out_dir=os.path.join(self.temp_dir, \"outputs\"),\n trial_dir=os.path.join(self.temp_dir, \"trial\"),\n logging_path='',\n verbose=False,\n m1_val_edataset=self.m1_dataset,\n m2_val_edataset=self.m2_dataset,\n m1_val_annotations_file='RGB_26May2021_14h19m_coco.json',\n m2_val_annotations_file='Thermal_26May2021_14h19m_coco.json',\n m1_val_images_folder='val/m1',\n m2_val_images_folder='val/m2',\n )\n\n self.assertFalse(torch.equal(m, list(self.learner.model.parameters())[0]),\n msg=\"Model parameters did not change after running fit.\")\n\n # Cleanup\n warnings.simplefilter(\"default\", ResourceWarning)\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def test_eval(self):\n # Test eval will issue resource warnings due to some files left open in pycoco tools,\n # as well as a deprecation warning due to a cast of a float to integer (hopefully they will be fixed in a future\n # version)\n warnings.simplefilter(\"ignore\", ResourceWarning)\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n self.learner.model = None\n self.learner.ort_session = None\n\n self.learner.download(mode='pretrained_gem')\n\n result = self.learner.eval(\n m1_edataset=self.m1_dataset,\n m2_edataset=self.m2_dataset,\n m1_images_folder='val/m1',\n m2_images_folder='val/m2',\n annotations_folder='annotations',\n m1_annotations_file='RGB_26May2021_14h19m_coco.json',\n m2_annotations_file='Thermal_26May2021_14h19m_coco.json',\n verbose=False,\n )\n\n self.assertGreater(len(result), 0)\n\n # Cleanup\n warnings.simplefilter(\"default\", ResourceWarning)\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def test_infer(self):\n self.learner.model = None\n self.learner.ort_session = None\n\n self.learner.download(mode='pretrained_gem')\n\n m1_image = Image.open(os.path.join(self.temp_dir, \"sample_images/rgb/2021_04_22_21_35_47_852516.jpg\"))\n m2_image = Image.open(os.path.join(self.temp_dir, 'sample_images/aligned_infra/2021_04_22_21_35_47_852516.jpg'))\n\n result, _, _ = self.learner.infer(m1_image, m2_image)\n\n self.assertGreater(len(result), 0)\n\n def test_save(self):\n self.learner.model = None\n self.learner.ort_session = None\n\n model_dir = os.path.join(self.temp_dir, \"test_model\")\n\n self.learner.download(mode='pretrained_detr')\n\n self.learner.save(model_dir)\n\n starting_param_1 = list(self.learner.model.parameters())[0].clone()\n\n learner2 = GemLearner(\n iters=1,\n temp_path=self.temp_dir,\n device=DEVICE,\n num_classes=7,\n )\n learner2.load(model_dir)\n\n new_param = list(learner2.model.parameters())[0].clone()\n self.assertTrue(torch.equal(starting_param_1, new_param))\n\n rmdir(model_dir)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.cuda.is_available", "torch.equal" ] ]
ryosukehata/pytorch-lightning
[ "a5bd2edefbafa6e03acffd4ba1a8816bbc1682a3" ]
[ "pl_examples/basic_examples/lightning_module_template.py" ]
[ "\"\"\"\nExample template for defining a system\n\"\"\"\nimport os\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torchvision.datasets import MNIST\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.root_module.root_module import LightningModule\n\n\nclass LightningTemplateModel(LightningModule):\n \"\"\"\n Sample model to show how to define a template\n \"\"\"\n\n def __init__(self, hparams):\n \"\"\"\n Pass in parsed HyperOptArgumentParser to the model\n :param hparams:\n \"\"\"\n # init superclass\n super(LightningTemplateModel, self).__init__()\n self.hparams = hparams\n\n self.batch_size = hparams.batch_size\n\n # if you specify an example input, the summary will show input/output for each layer\n self.example_input_array = torch.rand(5, 28 * 28)\n\n # build model\n self.__build_model()\n\n # ---------------------\n # MODEL SETUP\n # ---------------------\n def __build_model(self):\n \"\"\"\n Layout model\n :return:\n \"\"\"\n self.c_d1 = nn.Linear(in_features=self.hparams.in_features,\n out_features=self.hparams.hidden_dim)\n self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)\n self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)\n\n self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim,\n out_features=self.hparams.out_features)\n\n # ---------------------\n # TRAINING\n # ---------------------\n def forward(self, x):\n \"\"\"\n No special modification required for lightning, define as you normally would\n :param x:\n :return:\n \"\"\"\n\n x = self.c_d1(x)\n x = torch.tanh(x)\n x = self.c_d1_bn(x)\n x = self.c_d1_drop(x)\n\n x = self.c_d2(x)\n logits = F.log_softmax(x, dim=1)\n\n return logits\n\n def loss(self, labels, logits):\n nll = F.nll_loss(logits, labels)\n return nll\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the training loop\n :param batch:\n :return:\n \"\"\"\n # forward pass\n x, y = batch\n x = x.view(x.size(0), -1)\n\n y_hat = self.forward(x)\n\n # calculate loss\n loss_val = self.loss(y, y_hat)\n\n # in DP mode (default) make sure if result is scalar, there's another dim in the beginning\n if self.trainer.use_dp or self.trainer.use_ddp2:\n loss_val = loss_val.unsqueeze(0)\n\n tqdm_dict = {'train_loss': loss_val}\n output = OrderedDict({\n 'loss': loss_val,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n\n # can also return just a scalar instead of a dict (return loss_val)\n return output\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the validation loop\n :param batch:\n :return:\n \"\"\"\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self.forward(x)\n\n loss_val = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n val_acc = torch.tensor(val_acc)\n\n if self.on_gpu:\n val_acc = val_acc.cuda(loss_val.device.index)\n\n # in DP mode (default) make sure if result is scalar, there's another dim in the beginning\n if self.trainer.use_dp or self.trainer.use_ddp2:\n loss_val = loss_val.unsqueeze(0)\n val_acc = val_acc.unsqueeze(0)\n\n output = OrderedDict({\n 'val_loss': loss_val,\n 'val_acc': val_acc,\n })\n\n # can also return just a scalar instead of a dict (return loss_val)\n return output\n\n def validation_end(self, outputs):\n \"\"\"\n Called at the end of validation to aggregate outputs\n :param outputs: list of individual outputs of each validation step\n :return:\n \"\"\"\n # if returned a scalar from validation_step, outputs is a list of tensor scalars\n # we return just the average in this case (if we want)\n # return torch.stack(outputs).mean()\n\n val_loss_mean = 0\n val_acc_mean = 0\n for output in outputs:\n val_loss = output['val_loss']\n\n # reduce manually when using dp\n if self.trainer.use_dp:\n val_loss = torch.mean(val_loss)\n val_loss_mean += val_loss\n\n # reduce manually when using dp\n val_acc = output['val_acc']\n if self.trainer.use_dp or self.trainer.use_ddp2:\n val_acc = torch.mean(val_acc)\n\n val_acc_mean += val_acc\n\n val_loss_mean /= len(outputs)\n val_acc_mean /= len(outputs)\n tqdm_dict = {'val_loss': val_loss_mean, 'val_acc': val_acc_mean}\n result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': val_loss_mean}\n return result\n\n # ---------------------\n # TRAINING SETUP\n # ---------------------\n def configure_optimizers(self):\n \"\"\"\n return whatever optimizers we want here\n :return: list of optimizers\n \"\"\"\n optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)\n return [optimizer], [scheduler]\n\n def __dataloader(self, train):\n # init data generators\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,))])\n dataset = MNIST(root=self.hparams.data_root, train=train,\n transform=transform, download=True)\n\n # when using multi-node (ddp) we need to add the datasampler\n train_sampler = None\n batch_size = self.hparams.batch_size\n\n if self.use_ddp:\n train_sampler = DistributedSampler(dataset)\n\n should_shuffle = train_sampler is None\n loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=should_shuffle,\n sampler=train_sampler,\n num_workers=0\n )\n\n return loader\n\n @pl.data_loader\n def train_dataloader(self):\n print('training data loader called')\n return self.__dataloader(train=True)\n\n @pl.data_loader\n def val_dataloader(self):\n print('val data loader called')\n return self.__dataloader(train=False)\n\n @pl.data_loader\n def test_dataloader(self):\n print('test data loader called')\n return self.__dataloader(train=False)\n\n @staticmethod\n def add_model_specific_args(parent_parser, root_dir): # pragma: no cover\n \"\"\"\n Parameters you define here will be available to your model through self.hparams\n :param parent_parser:\n :param root_dir:\n :return:\n \"\"\"\n parser = ArgumentParser(parents=[parent_parser])\n\n # param overwrites\n # parser.set_defaults(gradient_clip_val=5.0)\n\n # network params\n parser.add_argument('--in_features', default=28 * 28, type=int)\n parser.add_argument('--out_features', default=10, type=int)\n # use 500 for CPU, 50000 for GPU to see speed difference\n parser.add_argument('--hidden_dim', default=50000, type=int)\n parser.add_argument('--drop_prob', default=0.2, type=float)\n parser.add_argument('--learning_rate', default=0.001, type=float)\n\n # data\n parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)\n\n # training params (opt)\n parser.add_argument('--optimizer_name', default='adam', type=str)\n parser.add_argument('--batch_size', default=64, type=int)\n return parser\n" ]
[ [ "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.utils.data.DataLoader", "torch.nn.functional.log_softmax", "torch.sum", "torch.nn.Linear", "torch.utils.data.distributed.DistributedSampler", "torch.nn.BatchNorm1d", "torch.argmax", "torch.nn.functional.nll_loss", "torch.rand", "torch.tensor", "torch.tanh", "torch.nn.Dropout", "torch.mean" ] ]
ajshajib/fabspec
[ "0fec1595a4525215bbabd1f2480d1d31a86d955e" ]
[ "fabspec/spectra.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nThis module defines the class Spectra() that contains a spectra and\nrelevant information.\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom copy import deepcopy\n\n\nclass Spectra(object):\n \"\"\"\n Contains a spectra and relevant information.\n \"\"\"\n\n def __init__(self, wavelengths, spectra, *args, **kwargs):\n \"\"\"\n :param spectra: Array of flux.\n :param wavelengths: Array of wavelengths, must be same length as\n spectra.\n :param args:\n :param kwargs: `resolution`: R, `resolution_fwhm`: FWHM of\n spectral resolution.\n \"\"\"\n try:\n assert len(spectra) == len(wavelengths)\n except:\n raise ('Error: spectra and wavelength must have same size!')\n\n self._spectra = np.array(spectra) # to store the original spectra\n self.spectra = deepcopy(self._spectra)\n self._wavelengths = np.array(wavelengths) # to store the original\n self.wavelengths = deepcopy(self._wavelengths)\n\n if 'resolution_fwhm' in kwargs:\n self._resolution_fwhm = kwargs['resolution_fwhm']\n\n # resolution parameter R\n if 'resolution' in kwargs:\n self._resolution = kwargs['resolution']\n\n if 'flux_unit' in kwargs:\n self._flux_unit = kwargs['flux_unit']\n else:\n self._flux_unit = 'arbitrary'\n\n if 'wavelength_unit' in kwargs:\n self._wavelength_unit = kwargs['wavelength_unit']\n\n @property\n def resolution_fwhm(self):\n if hasattr(self, '_resolution_fwhm'):\n return self._resolution_fwhm\n else:\n return None\n\n @resolution_fwhm.setter\n def resolution_fwhm(self, fwhm):\n \"\"\"\n Update the FWHM of the spectra.\n :param fwhm: FWHM to set for the spectra, in the same unit as\n `self.wavelengths`.\n \"\"\"\n self._resolution_fwhm = fwhm\n\n @property\n def resolution(self):\n if hasattr(self, '_resolution'):\n return self._resolution\n else:\n return None\n\n @property\n def flux_unit(self):\n if hasattr(self, '_flux_unit'):\n return self._flux_unit\n else:\n return None\n\n @property\n def wavelength_unit(self):\n if hasattr(self, '_wavelength_unit'):\n return self._wavelength_unit\n else:\n return None\n\n def get_delta_lambda(self):\n \"\"\"\n Compute the spatial pixel size of the spectra.\n :return:\n \"\"\"\n return np.mean(np.diff(self.wavelengths))\n\n def linearize_wavelength_scale(self, dlambda):\n \"\"\"\n Linearize the wavelength scale if its currently in log scale.\n :param dlambda: Wavelength resolution for linear intervals.\n :return:\n \"\"\"\n sample = interp1d(self.wavelengths, self.spectra, kind='linear',\n bounds_error=False, fill_value=0.)\n # NOT shortening the wavelength range by 1 index so that\n # `scipy.interpolate.interp1d` does not throw error. Fill value with 0\n # outside interpolation range.\n self.wavelengths = np.arange(self.wavelengths[0],\n self.wavelengths[-1]+dlambda/2., dlambda)\n\n self.spectra = sample(self.wavelengths)\n\n def normalize_flux(self):\n \"\"\"\n Normalize the flux so that the median is 1.\n :return:\n \"\"\"\n self.spectra /= np.median(self.spectra)\n self._flux_unit = 'normalized'\n\n def reset_to_initial(self):\n \"\"\"\n Reset the spectra to initial flux and wavelengths at the time\n of creating the `Spectra` object.\n :return:\n \"\"\"\n self.wavelengths = deepcopy(self._wavelengths)\n self.spectra = deepcopy(self._spectra)\n\n def get_wavelength_range(self):\n \"\"\"\n Get the wavelength range of the spectra.\n :return:\n \"\"\"\n return self.wavelengths[[0, -1]] #\\\n # + np.array([-0.5, 0.5])*self.get_delta_lambda()\n\n def clip(self, start_wavelength=None, end_wavelength=None):\n \"\"\"\n Clip the spectra within the specified wavelengths.\n :param start_wavelength: Start wavelength for clipping. If\n `None`, set to minimum of current wavelength range.\n :param end_wavelength: End wavelength for clipping. If `None`,\n set to maximum of current wavelength range.\n :return:\n \"\"\"\n if start_wavelength is None:\n start_wavelength = self.wavelengths[0]\n\n if end_wavelength is None:\n end_wavelength = self.wavelengths[-1]\n\n self.spectra = self.spectra[(self.wavelengths >= start_wavelength) &\n (self.wavelengths <= end_wavelength)]\n self.wavelengths = self.wavelengths[(self.wavelengths >=\n start_wavelength) &\n (self.wavelengths <=\n end_wavelength)]" ]
[ [ "scipy.interpolate.interp1d", "numpy.diff", "numpy.median", "numpy.arange", "numpy.array" ] ]
edward-io/pytorch
[ "04caef8e1d4f951cc380d6cebb9967b71695de13" ]
[ "torch/profiler/profiler.py" ]
[ "import gzip\nimport json\nimport os\nimport tempfile\nfrom enum import Enum\nfrom typing import Any, Callable, Iterable, Optional\nfrom warnings import warn\n\nimport torch\nimport torch.autograd.profiler as prof\nfrom torch.autograd import kineto_available, ProfilerActivity\n\n\nclass ProfilerAction(Enum):\n \"\"\"\n Profiler actions that can be taken at the specified intervals\n \"\"\"\n NONE = 0\n WARMUP = 1\n RECORD = 2\n RECORD_AND_SAVE = 3\n\n\ndef schedule(*, wait: int, warmup: int, active: int, repeat: int = 0, skip_first: int = 0) -> Callable:\n \"\"\"\n Returns a callable that can be used as profiler ``schedule`` argument. The profiler will skip\n the first ``skip_first`` steps, then wait for ``wait`` steps, then do the warmup for the next ``warmup`` steps,\n then do the active recording for the next ``active`` steps and then repeat the cycle starting with ``wait`` steps.\n The optional number of cycles is specified with the ``repeat`` parameter, the zero value means that\n the cycles will continue until the profiling is finished.\n \"\"\"\n def schedule_fn(step: int) -> ProfilerAction:\n assert step >= 0\n if step < skip_first:\n return ProfilerAction.NONE\n else:\n step -= skip_first\n num_steps = wait + warmup + active\n if repeat > 0 and step / num_steps >= repeat:\n return ProfilerAction.NONE\n mod_step = step % num_steps\n if mod_step < wait:\n return ProfilerAction.NONE\n elif mod_step < wait + warmup:\n return ProfilerAction.WARMUP\n else:\n return ProfilerAction.RECORD if mod_step < num_steps - 1 \\\n else ProfilerAction.RECORD_AND_SAVE\n assert wait >= 0 and warmup >= 0 and active > 0 and \\\n repeat >= 0 and skip_first >= 0, \"Invalid profiler schedule arguments\"\n if warmup == 0:\n warn(\"Profiler won't be using warmup, this can skew profiler results\")\n return schedule_fn\n\n\ndef _default_schedule_fn(_: int) -> ProfilerAction:\n \"\"\"\n Default profiler behavior - immediately starts recording the events,\n keeps doing it on every profiler step.\n \"\"\"\n return ProfilerAction.RECORD\n\ndef tensorboard_trace_handler(dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False):\n \"\"\"\n Outputs tracing files to directory of ``dir_name``, then that directory can be\n directly delivered to tensorboard as logdir.\n ``worker_name`` should be unique for each worker in distributed scenario,\n it will be set to '[hostname]_[pid]' by default.\n \"\"\"\n import os\n import socket\n import time\n\n def handler_fn(prof) -> None:\n nonlocal worker_name\n if not os.path.isdir(dir_name):\n try:\n os.makedirs(dir_name, exist_ok=True)\n except Exception:\n raise RuntimeError(\"Can't create directory: \" + dir_name)\n if not worker_name:\n worker_name = \"{}_{}\".format(socket.gethostname(), str(os.getpid()))\n file_name = \"{}.{}.pt.trace.json\".format(worker_name, int(time.time() * 1000))\n if use_gzip:\n file_name = file_name + '.gz'\n prof.export_chrome_trace(os.path.join(dir_name, file_name))\n return handler_fn\n\ndef supported_activities():\n \"\"\"\n Returns a set of supported profiler tracing activities.\n\n Note: profiler uses CUPTI library to trace on-device CUDA kernels.\n In case when CUDA is enabled but CUPTI is not available, passing\n ``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA\n profiling code (same as in the legacy ``torch.autograd.profiler``).\n This, in turn, results in including CUDA time in the profiler table output,\n but not in the JSON trace.\n \"\"\"\n return torch.autograd._supported_activities()\n\n\nclass profile(object):\n \"\"\"Profiler context manager.\n\n Args:\n activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:\n ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.\n Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.\n schedule (callable): callable that takes step (int) as a single parameter and returns\n ``ProfilerAction`` value that specifies the profiler action to perform at each step.\n on_trace_ready (callable): callable that is called at each step when ``schedule``\n returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.\n record_shapes (bool): save information about operator's input shapes.\n profile_memory (bool): track tensor memory allocation/deallocation.\n with_stack (bool): record source information (file and line number) for the ops.\n with_flops (bool): use formula to estimate the FLOPS of specific operators\n (matrix multiplication and 2D convolution).\n use_cuda (bool):\n .. deprecated:: 1.8.1\n use ``activities`` instead.\n\n .. note::\n Use :func:`~torch.profiler.schedule` to generate the callable schedule.\n Non-default schedules are useful when profiling long training jobs\n and allow the user to obtain multiple traces at the different iterations\n of the training process.\n The default schedule simply records all the events continuously for the\n duration of the context manager.\n\n .. note::\n Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard:\n\n ``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)``\n\n After profiling, result files can be found in the specified directory. Use the command:\n\n ``tensorboard --logdir dir_name``\n\n to see the results in TensorBoard.\n For more information, see\n `PyTorch Profiler TensorBoard Plugin <https://github.com/pytorch/kineto/tree/master/tb_plugin>`__\n\n .. note::\n Enabling shape and stack tracing results in additional overhead.\n When record_shapes=True is specified, profiler will temporarily hold references to the tensors;\n that may further prevent certain optimizations that depend on the reference count and introduce\n extra tensor copies.\n\n Examples:\n\n .. code-block:: python\n\n with torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ]\n ) as p:\n code_to_profile()\n print(p.key_averages().table(\n sort_by=\"self_cuda_time_total\", row_limit=-1))\n\n Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions:\n\n .. code-block:: python\n\n # Non-default profiler schedule allows user to turn profiler on and off\n # on different iterations of the training loop;\n # trace_handler is called every time a new trace becomes available\n def trace_handler(prof):\n print(prof.key_averages().table(\n sort_by=\"self_cuda_time_total\", row_limit=-1))\n # prof.export_chrome_trace(\"/tmp/test_trace_\" + str(prof.step_num) + \".json\")\n\n with torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ],\n\n # In this example with wait=1, warmup=1, active=2,\n # profiler will skip the first step/iteration,\n # start warming up on the second, record\n # the third and the forth iterations,\n # after which the trace will become available\n # and on_trace_ready (when set) is called;\n # the cycle repeats starting with the next step\n\n schedule=torch.profiler.schedule(\n wait=1,\n warmup=1,\n active=2),\n on_trace_ready=trace_handler\n # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')\n # used when outputting for tensorboard\n ) as p:\n for iter in range(N):\n code_iteration_to_profile(iter)\n # send a signal to the profiler that the next iteration has started\n p.step()\n \"\"\"\n def __init__(\n self,\n *,\n activities: Optional[Iterable[ProfilerActivity]] = None,\n schedule: Optional[Callable[[int], ProfilerAction]] = None,\n on_trace_ready: Optional[Callable[..., Any]] = None,\n record_shapes: bool = False,\n profile_memory: bool = False,\n with_stack: bool = False,\n with_flops: bool = False,\n # deprecated:\n use_cuda: Optional[bool] = None):\n if activities:\n self.activities = set(activities)\n else:\n self.activities = supported_activities()\n\n if use_cuda is not None:\n warn(\"use_cuda is deprecated, use activities argument instead\")\n if use_cuda:\n self.activities.add(ProfilerActivity.CUDA)\n elif ProfilerActivity.CUDA in self.activities:\n self.activities.remove(ProfilerActivity.CUDA)\n\n assert len(self.activities) > 0, \"No valid profiler activities found\"\n\n if schedule:\n self.schedule = schedule\n # add step markers into the trace and table view\n self.record_steps = True\n else:\n self.schedule = _default_schedule_fn\n self.record_steps = False\n self.on_trace_ready = on_trace_ready\n self.record_shapes = record_shapes\n self.with_flops = with_flops\n self.profile_memory = profile_memory\n self.with_stack = with_stack\n self.step_num = 0\n self.current_action = self.schedule(self.step_num)\n self.profiler: Optional[prof.profile] = None\n self.step_rec_fn: Optional[prof.record_function] = None\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n\n def start(self):\n self._enter_actions()\n if self.record_steps:\n self.step_rec_fn = prof.record_function(\"ProfilerStep#\" + str(self.step_num))\n self.step_rec_fn.__enter__()\n\n def stop(self):\n if self.record_steps and self.step_rec_fn:\n self.step_rec_fn.__exit__(None, None, None)\n self._exit_actions()\n\n def step(self):\n \"\"\"\n Signals the profiler that the next profiling step has started.\n \"\"\"\n if self.record_steps and self.step_rec_fn:\n self.step_rec_fn.__exit__(None, None, None)\n prev_action = self.current_action\n self.step_num += 1\n self.current_action = self.schedule(self.step_num)\n\n if self.current_action == ProfilerAction.NONE:\n if prev_action == ProfilerAction.NONE:\n pass\n elif prev_action == ProfilerAction.WARMUP:\n warn(\"Incorrect schedule: WARMUP followed by NONE\")\n self._start_trace()\n self._stop_trace()\n elif prev_action == ProfilerAction.RECORD:\n warn(\"Incorrect schedule: RECORD followed by NONE\")\n self._stop_trace()\n else:\n assert prev_action == ProfilerAction.RECORD_AND_SAVE\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n elif self.current_action == ProfilerAction.WARMUP:\n if prev_action == ProfilerAction.NONE:\n self._start_warmup()\n elif prev_action == ProfilerAction.WARMUP:\n pass\n elif prev_action == ProfilerAction.RECORD:\n warn(\"Incorrect schedule: RECORD followed by WARMUP\")\n self._stop_trace()\n else:\n assert prev_action == ProfilerAction.RECORD_AND_SAVE\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n self._start_warmup()\n elif self.current_action in \\\n [ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE]:\n if prev_action == ProfilerAction.NONE:\n self._start_warmup()\n self._start_trace()\n elif prev_action == ProfilerAction.WARMUP:\n self._start_trace()\n elif prev_action == ProfilerAction.RECORD:\n pass\n else:\n assert prev_action == ProfilerAction.RECORD_AND_SAVE\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n self._start_warmup()\n self._start_trace()\n\n if self.record_steps:\n self.step_rec_fn = prof.record_function(\"ProfilerStep#\" + str(self.step_num))\n self.step_rec_fn.__enter__()\n\n def export_chrome_trace(self, path: str):\n \"\"\"\n Exports the collected trace in Chrome JSON format.\n \"\"\"\n assert self.profiler\n if path.endswith('.gz'):\n fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)\n fp.close()\n retvalue = self.profiler.export_chrome_trace(fp.name)\n with open(fp.name) as fin:\n with gzip.open(path, 'wt') as fout:\n fout.writelines(fin)\n os.remove(fp.name)\n return retvalue\n else:\n return self.profiler.export_chrome_trace(path)\n\n def export_stacks(self, path: str, metric: str = \"self_cpu_time_total\"):\n \"\"\"Save stack traces in a file in a format suitable for visualization.\n\n Args:\n path (str): save stacks file to this location;\n metric (str): metric to use: \"self_cpu_time_total\" or \"self_cuda_time_total\"\n\n .. note::\n Example of using FlameGraph tool:\n\n - git clone https://github.com/brendangregg/FlameGraph\n - cd FlameGraph\n - ./flamegraph.pl --title \"CPU time\" --countname \"us.\" profiler.stacks > perf_viz.svg\n \"\"\"\n assert self.profiler\n return self.profiler.export_stacks(path, metric)\n\n def key_averages(self, group_by_input_shape: bool = False, group_by_stack_n: int = 0):\n \"\"\"Averages events, grouping them by operator name and (optionally) input shapes and\n stack.\n\n .. note::\n To use shape/stack functionality make sure to set record_shapes/with_stack\n when creating profiler context manager.\n \"\"\"\n assert self.profiler\n return self.profiler.key_averages(group_by_input_shape, group_by_stack_n)\n\n def events(self):\n \"\"\"\n Returns the list of unaggregated profiler events,\n to be used in the trace callback or after the profiling is finished\n \"\"\"\n assert self.profiler\n return self.profiler.function_events\n\n def add_metadata(self, key: str, value: str):\n \"\"\"\n Adds a user defined metadata with a string key and a string value\n into the trace file\n \"\"\"\n wrapped_value = \"\\\"\" + value.replace('\"', '\\\\\"') + \"\\\"\"\n torch.autograd._add_metadata_json(key, wrapped_value)\n\n def add_metadata_json(self, key: str, value: str):\n \"\"\"\n Adds a user defined metadata with a string key and a valid json value\n into the trace file\n \"\"\"\n torch.autograd._add_metadata_json(key, value)\n\n def _get_distributed_info(self):\n import torch.distributed as dist\n if not dist.is_available() or not dist.is_initialized():\n return None\n\n return {\n \"backend\": dist.get_backend(),\n \"rank\": dist.get_rank(),\n \"world_size\": dist.get_world_size()\n }\n\n def _enter_actions(self):\n if self.current_action == ProfilerAction.WARMUP:\n self._start_warmup()\n elif self.current_action in \\\n [ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE]:\n self._start_warmup()\n self._start_trace()\n\n def _exit_actions(self):\n if self.current_action == ProfilerAction.WARMUP:\n self._start_trace()\n self._stop_trace()\n elif self.current_action in \\\n [ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE]:\n self._stop_trace()\n if self.on_trace_ready:\n self.on_trace_ready(self)\n\n def _start_warmup(self):\n self.profiler = prof.profile(\n use_cuda=(ProfilerActivity.CUDA in self.activities),\n use_cpu=(ProfilerActivity.CPU in self.activities),\n record_shapes=self.record_shapes,\n with_flops=self.with_flops,\n profile_memory=self.profile_memory,\n with_stack=self.with_stack,\n use_kineto=True,\n )\n self.profiler._prepare_trace()\n\n def _start_trace(self):\n assert self.profiler is not None\n self.profiler._start_trace()\n\n if kineto_available():\n dist_info = self._get_distributed_info()\n if dist_info:\n self.add_metadata_json(\"distributedInfo\", json.dumps(dist_info))\n\n def _stop_trace(self):\n assert self.profiler is not None\n self.profiler.__exit__(None, None, None)\n" ]
[ [ "torch.distributed.get_backend", "torch.distributed.get_rank", "torch.autograd._add_metadata_json", "torch.distributed.get_world_size", "torch.distributed.is_available", "torch.distributed.is_initialized", "torch.autograd._supported_activities", "torch.autograd.kineto_available", "torch.autograd.profiler.profile" ] ]
JhonLiuljs/tensorflow_demo
[ "0757f81a2c8baae41fce586e5d86f7312f46fda6" ]
[ "1.Cnn_Captcha/gen_captcha.py" ]
[ "# coding:utf-8\nfrom captcha.image import ImageCaptcha # pip install captcha\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport random\nimport time\nimport sys\n\nfrom constants import number\nfrom constants import alphabet\nfrom constants import ALPHABET\n\n\n# 验证码一般都无视大小写;验证码长度4个字符\ndef random_captcha_text(char_set=number + alphabet + ALPHABET, captcha_size=4):\n \"\"\" 指定使用的验证码内容列表和长期 返回随机的验证码文本 \"\"\"\n captcha_text = []\n for i in range(captcha_size):\n c = random.choice(char_set)\n captcha_text.append(c)\n return captcha_text\n\n\n# 使用ImageCaptcha库生成验证码\ndef gen_captcha_text_and_image():\n \"\"\"生成字符对应的验证码 \"\"\"\n image = ImageCaptcha() # 导入验证码包 生成一张空白图\n\n captcha_text = random_captcha_text() # 随机一个验证码内容\n captcha_text = ''.join(captcha_text) # 类型转换为字符串\n\n captcha = image.generate(captcha_text)\n # image.write(captcha_text, 'image/' + captcha_text + '.jpg') # 写到文件\n\n # rm = 'rm '+captcha_text + '.jpg'\n # os.system(rm)\n\n captcha_image = Image.open(captcha) # 转换为图片格式\n captcha_image = np.array(captcha_image) # 转换为 np数组类型\n\n return captcha_text, captcha_image\n\n\n# 把彩色图像转为灰度图像(色彩对识别验证码没有什么用)\ndef convert2gray(img):\n if len(img.shape) > 2:\n gray = np.mean(img, -1)\n # 上面的转法较快,正规转法如下\n # r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]\n # gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n else:\n return img\n\n\nif __name__ == '__main__':\n # 测试\n for i in range(10000):\n text, image = gen_captcha_text_and_image()\n print('begin ', time.ctime(), type(image))\n f = plt.figure()\n ax = f.add_subplot(111)\n ax.text(0.1, 0.9, text, ha='center', va='center', transform=ax.transAxes)\n plt.imshow(image)\n # plt.show() # 显示,,取消注释并在30行取消写到文件的注释即可保存为文件\n print('end ', time.ctime())\n print(\"over!\")\n sys.exit()\n\n" ]
[ [ "numpy.array", "matplotlib.pyplot.figure", "matplotlib.pyplot.imshow", "numpy.mean" ] ]
yoheikikuta/a-primer-on-adversarial-examples
[ "1f4bea303b01b140b3a022cc7448ad3daaae3447" ]
[ "data.py" ]
[ "import random\nfrom abc import ABC, abstractmethod\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as F\n\n\nclass Data(ABC):\n \"\"\"Data represents an abstract class providing interfaces.\n\n Attributes\n ----------\n base_dit str : base directory of data.\n self.batch_size int : batch size.\n self.num_workers int : number of workers used in multi-process data loding.\n \"\"\"\n base_dir = \"./data\"\n\n def __init__(self, batch_size, num_workers):\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n @abstractmethod\n def transform(self) -> torchvision.transforms.transforms.Compose:\n pass\n\n @abstractmethod\n def get_dataset(self) -> torchvision.datasets.vision.VisionDataset:\n pass\n\n def prepare_data(self):\n \"\"\"Get and return dataset with transformations.\n\n Returns\n -------\n trainloader torch.utils.data.DataLoader : train DataLoader.\n testloader torch.utils.data.DataLoader : test DataLoader.\n num_classes int : number of classes of dataset.\n \"\"\"\n trainset, testset = self.get_dataset()\n num_classes = len(trainset.classes)\n\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers)\n testloader = torch.utils.data.DataLoader(testset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers)\n\n return trainloader, testloader, num_classes\n\n\nclass DataCIFAR10(Data):\n \"\"\"DataCIFAR10 represents cifar10 dataset.\n\n Attributes\n ----------\n name str : \"cifar10\".\n \"\"\"\n name = \"cifar10\"\n\n def __init__(self, batch_size=4, num_workers=2):\n \"\"\"\n Parameters\n ----------\n batch_size int : batch_size.\n num_workers int : number of workers used in multi-process data loding.\n \"\"\"\n super(DataCIFAR10, self).__init__(batch_size, num_workers)\n\n def transform(self):\n \"\"\"Only uses transforms.ToTensor().\"\"\"\n return transforms.Compose([transforms.ToTensor()])\n\n def get_dataset(self):\n \"\"\"Download and load cifar10 dataset.\n\n Returns\n -------\n trainset torchvision.datasets.CIFAR10 : train dataset.\n testset torchvision.datasets.CIFAR10 : test dataset.\n \"\"\"\n trainset = torchvision.datasets.CIFAR10(root=f\"{self.base_dir}/{self.name}\",\n train=True, download=True,\n transform=self.transform())\n testset = torchvision.datasets.CIFAR10(root=f\"{self.base_dir}/{self.name}\",\n train=False, download=True,\n transform=self.transform())\n\n return trainset, testset\n\n\nclass DataGTSRB(Data):\n \"\"\"DataGTSRB represents pre-processed GTSRB dataset.\n\n Attributes\n ----------\n name str : \"GTSRB_processed\".\n \"\"\"\n name = \"GTSRB_processed\"\n\n def __init__(self, batch_size=4, num_workers=2):\n super(DataGTSRB, self).__init__(batch_size, num_workers)\n\n def transform(self):\n \"\"\"Only uses transforms.ToTensor().\"\"\"\n return transforms.Compose([transforms.ToTensor()])\n\n def get_dataset(self):\n \"\"\"Load GTSRB dataset from directory that is prepared in advance.\n\n Returns\n -------\n trainset torchvision.datasets.ImageFolder : train dataset.\n testset torchvision.datasets.ImageFolder : test dataset.\n \"\"\"\n trainset = torchvision.datasets.ImageFolder(\n root=f\"{self.base_dir}/{self.name}/train\",\n transform=self.transform())\n\n testset = torchvision.datasets.ImageFolder(\n root=f\"{self.base_dir}/{self.name}/test\",\n transform=self.transform())\n\n return trainset, testset\n\n\nclass RandomResizePadding(object):\n \"\"\"DataGTSRB represents pre-processed GTSRB dataset.\n\n Attributes\n ----------\n self.size int : image will be rescaled to [c, size, size].\n \"\"\"\n def __init__(self, size):\n assert isinstance(size, int)\n self.size = size\n\n def __call__(self, img):\n \"\"\"Randomly resize and 0-pad the given PIL.\n\n Parameters\n ----------\n img PIL.Image : input image.\n\n Returns\n -------\n img PIL.Image : trasnsormed image.\n \"\"\"\n # Randomly resize the image.\n resize = random.randint(img.width, self.size)\n resized_img = F.resize(img, resize)\n # 0-pad the resized image. 0-pad to all left, right, top and bottom.\n pad_size = self.size - resize\n padded_img = F.pad(resized_img, pad_size, fill=0)\n # Crop the padded image to get (size, size) image.\n pos_top = random.randint(0, pad_size)\n pos_left = random.randint(0, pad_size)\n transformed_img = F.crop(padded_img, pos_top, pos_left, self.size, self.size)\n return transformed_img\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
fenning-research-group/sentaurus_ddd
[ "e9e7a9b86c8b87cafff0b69c4a0f83c2fe292a45" ]
[ "DatAnalysis.py" ]
[ "# DatAnalysis.py\n# Module containing data analysis functions used for the DDD model\n\nimport os\nimport re # Regular Expression package\nimport pdb # debugger\nimport numpy as np\nimport matplotlib.pyplot as plt # plotting package\n#import readh5conc\nimport csv\nimport h5py\nfrom scipy import signal\nfrom scipy import interpolate\nimport matplotlib.animation as manimation\nfrom datetime import datetime\n\n\n# function to plot the IV curve\n# Arguments:\n#\t\tfilename: name of the .plt file containing the data. Example: \"n_t6600_light_des.plt\"\n#\t\tplotcond=1 to plot, 0 otherwise\ndef analyzedata(filename,plotcond):\n\n\t#if condition==\"dark\":\n\t#\tfid=open(\"nodnum1_dark_des.plt\",\"r\")\n\t#elif condition==\"light\":\n\t#\tfid=open(\"nodnum1_light_des.plt\",\"r\")\n\t#else:\n\t#\traise Exception('wrong argument entered. Enter \"dark\" or \"light\".')\t\n\t\n\tfid=open(filename,\"r\")\n\trawdata=\"\" #Create empty string\n\twhile 1:\n\t\tdataline=fid.readline()\n\t\tif dataline==\"\": # check if the string is empty, meaning end of file\n\t\t\tbreak\n\t\tlinestr=dataline.strip() # save dataline into a string and remove leading and trailing spaces\n\t\trawdata=rawdata+\" \"+linestr # concatenate each line with the line string\n\tfid.close()\n\t\n\t# find the indices of the { brackets\n\t# xstart=re.search(\"{\",data)\n\t# xstart.span()\n\tindstart=[m.span() for m in re.finditer(\"{\",rawdata)] # finditer finds all the iterations of '{'\n\t\n\t# find the indices of the } brackets\n\tindend=[m.span() for m in re.finditer(\"}\",rawdata)]\n\t\n\t# Make a dictionary 'data' containing the info section and the numerical data section as an array\n\trawinfo=rawdata[indstart[0][0]+1:indend[0][0]-1] # names of datasets and of functions (includes extra spaces)\n\trawvalues=rawdata[indstart[1][1]+1:indend[1][1]-1] # numerical values calculated by Sentaurus (includes extra spaces)\n\t\n\t# create the dictionary entries and remove spaces\n\tdata=dict() # define dictionary\n\tdata[\"info\"]=rawinfo.strip() \n\tdata[\"values\"]=rawvalues.strip()\n\t\n\t#pdb.set_trace()\n\t# Find indices of the [ and ] brackets in the info section\n\t# [ brackets\n\tsqindst=[m.span() for m in re.finditer(\"\\[\",rawinfo.strip())] # starting index of square bracket\n\t# ] brackets\n\tsqinden=[m.span() for m in re.finditer(\"\\]\",rawinfo.strip())] # ending index of square bracket\n\t\n\t# The dataset names are within the first brackets\n\trawdatasets=data[\"info\"][sqindst[0][0]+1:sqinden[0][0]-1]\n\tdata[\"datasets\"]=rawdatasets.strip()\n\t\n\t# Split the dataset field at the double quotes to find the number of output parameters (voltage, current, etc.)\n\tlist_datasets=re.split(\"\\\" \",data[\"datasets\"]) # split each time a double quote followed by a space in encountered\n\tnboutputs=len(list_datasets) #number of output parameters from sdevice\n\t\n\t# Split the value string list\n\t#list_values=re.split(\" \",data[\"values\"]) # separate with two spaces as this is the minimum spacing between consecutive values\n\tlist_values=data[\"values\"].split() # split without argument splits at white spaces\n\t\n\t# Convert the value string list into a numpy string array (vector)\n\tvalvect_str=np.array(list_values)\n\t\n\t# convert numpy string array into a numpy float array\n\tvalvect=valvect_str.astype(np.float)\n\tlen_valvect=len(valvect)\n\t\n\t# Reshape the data array\n\tnblines=int(len_valvect/nboutputs)# nb of lines for the matrix\n\t#pdb.set_trace()\n\tvalarray=np.reshape(valvect,(nblines,nboutputs))\n\t#pdb.set_trace()\n\t\n\t# Get current and voltage (column number depends on the defined outputs in the sdevice file)\n\tV=valarray[:,1] # voltage in col. index 1, ie 2nd column\n\tI=valarray[:,7] # voltage in col. index 7, ie 8th column\n\t\n\tif plotcond:\n\t\tplt.ion()\n\t\tplt.plot(V,I,'-+')\n\t\tplt.ylabel(\"Total current (mA/cm2)\")\n\t\tplt.xlabel(\"Voltage (V)\")\n\t\tplt.ylim(-50, 20)\n\t\tplt.show()\n\t\n\treturn [I, V]\n\t\nNaprofilename=\"single_layer_D1=4E-16cm2ps_D2=1E-15cm2ps_Cs1E+20cm3_T85_time96hr_h1.0e-04_m1.0e+00_pnp.h5\"\n#Naprofilename=\"two_layers_D1=4E-16cm2ps_D2=1E-14cm2ps_Cs1E+20cm3_T85_time96hr_h1.0e-10_m1.0e+00_pnp.h5\"\ndef batchanalysis(directory='./2019-11-1_backup',sdevicetemplate=\"sdevice_light_des\",h5File=Naprofilename,startstep=0,endstep=0):\n\t# analyzes data from several plt files\n\t# assumes that the .plt files are saved in directory\n\t#sdevicetemplate is the name of the Sentaurus device template file without the extension, for instance \"sdevice_light_des\"\n\t# example: runSentaurus.batchanalysis(6,\"sdevice_light_des\")\n\t\n\t# obtain depth of the shunt as a function of time\n\t#sigma=shuntcond(nbsteps)\n\t\n\t# Find time point corresponding to each time\n\t# Open h5 file\n\t#h5File=\"FOR_JV_85C.h5\"\n\thf= h5py.File(h5File, 'r')\n\ttme= hf.get('time')\n\tti=tme[:]\n\t\n\tLti=len(ti) # length of the time list\n\t\n\t# In case endstep is not defined by the user or is wrongly defined\n\tif endstep==0 or endstep<startstep:\n\t\tendstep=Lti\n\t\tprint(\"Last simulation step set to the total nb of steps\")\n\t\t\n\t#nbsteps=len(ti) # number of time steps\n\tnbsteps=endstep-startstep\n\n\t# Save h5py data\n\ttime=[0]*nbsteps\n\ttime[:]=ti[:]\n\t\n\thf.close()\n\t\t\n\t# Define dictionary containing IV data. Each field contains a list.\n\tresults=dict()\n\t# Create lists of NaN (so that 0's won't be plotted if some points are skipped)\n\tresults[\"I\"]=NaNlist(Lti)\n\tresults[\"sdevicename\"]=NaNlist(Lti)\n\tresults[\"time\"]=NaNlist(Lti)\n\tresults[\"V\"]=NaNlist(Lti)\n\tresults[\"shdepth\"]=NaNlist(Lti)\n\tresults[\"Efficiency\"]=NaNlist(Lti)\n\t\n\t#pdb.set_trace()\n\t# results[\"I\"]=[0]*nbsteps # intialize the list to save current data\n\t# results[\"V\"]=[0]*nbsteps\n\t# results[\"sdevicename\"]=[0]*nbsteps\n\t# results[\"time\"]=[0]*nbsteps\n\t# results[\"shdepth\"]=[0]*nbsteps\n\t# results[\"Efficiency\"]=[0]*nbsteps\n\t\n\tshdepth=[]# List containing depth of the shunt at each iteration\n\t\n\tplt.ion()\n\tplt.figure()\n\tfor i in range(startstep,endstep):\n\t\t# Analyze data and store generated IV curve in a dictionary\n\t\t# The name of the file to analyze is obtained by adding the node name to the sdevice file name without the string \"sdevice\".\n\t\t# eg: sdevice_light_des.cmd becomes n_t1_light_des.plt\n\t\t\n\t\t#shdepth=sigma[i,1] # append shunt depth\n\t\t#timestamp=\"_t\"+str(int(sigma[i,0]))\n\t\t\n\t\tbasename=re.split(\"sdevice_\",sdevicetemplate) # yields \"light_des\" from \"sdevice_light_des\", case of a template file \"sdevice_light_des.cmd\".\n\t\tplotfile=directory+\"//\"+\"n_t\"+str(int(time[i]))+\"_\"+basename[1]+\".plt\" # by default the .plt files containing the extracted data are saved with a name in the form \"n_tk_basename.plt\" where k is the time and basename is the string after \"sdevice\" in the sdevice file. Example: \"n_t4_light_des.plt\" based on the file \"sdevice_light_des_tk.cmd\".\n\t\t\n\t\t# Try openining the filename. Pass if it does not exist (in case some points were skipped in the simulations)\n\t\tplt_flag=1\n\t\ttry:\n\t\t\tfid=open(plotfile,\"r\")\n\t\t\tfid.close()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\twarning_str='Could not open file '+plotfile+', it may not exist.\\nSkipping the file in the analysis.\\n'\n\t\t\tprint(warning_str)\n\t\t\tplt_flag=0 # set the flag to 0 so the file won't be used in the analysis\n\n\t\tif plt_flag: # if the .plt file was found, extract data\n\t\t\t# Extract resulting IV curve\n\t\t\tprint(plotfile)\n\t\t\t#pdb.set_trace()\n\t\t\t\n\t\t\ttry: # try extracting data from the .plt file\n\t\t\t\t[I,V]=analyzedata(plotfile,0)\n\t\t\texcept: # if the file is corrupted, skip this .plt file.\n\t\t\t\t# (happens for instance if the simulations do not converge)skip this .plt file.\n\t\t\t\t# The corresponding dictionary value will remain a NaN.\n\t\t\t\tprint('*****File %s is corrupted, skipped in analysis\\n' % plotfile)\n\t\t\telse: # if no exception is found, save the extracted data in the dictionary\n\t\t\t\t# Extract efficiency as a function of time step\n\t\t\t\t# save results in dictionary\n\t\t\t\tprint('File %s is ok\\n' % plotfile)\n\t\t\t\tresults[\"I\"][i]=I\n\t\t\t\tresults[\"V\"][i]=V\n\t\t\t\tresults[\"sdevicename\"][i]=plotfile\n\t\t\t\t#results[\"time\"][i]=i\n\t\t\t\tresults[\"time\"][i]=time[i]\n\t\t\t\tresults[\"Efficiency\"][i]=findeff(V,I)\n\t\t\t\t#results[\"shdepth\"][i]=shdepth\n\t\t\t\t\n\t\t\t\t# Plot the IV curves\n\t\t\t\t#plt.figure()\n\t\t\t\tplt.subplot(1,2,1)\n\t\t\t\tplt.plot(V,I)\n\t\t\t\n\tplt.ylabel(\"Total current (mA/cm$^2$)\")\n\tplt.xlabel(\"Voltage (V)\")\n\tplt.ylim(-35, 0)\n\tplt.xlim(0, 0.75)\n\tplt.rcParams.update({'font.size':16})\n\t\n\t# pdb.set_trace()\n\t\n\t# Convert lists to numpy arrays to apply a mask\n\ttimearr=np.asarray(results[\"time\"])\n\teffarr=np.asarray(results[\"Efficiency\"])\n\t# Find where to mask NaN in the array\n\teffmask=np.isfinite(effarr)\n\t#pdb.set_trace()\n\teffarr_mask=effarr[effmask] # apply mask\n\ttimearr_mask=timearr[effmask] # apply mask\n\t\n\t#pdb.set_trace()\n\t\n\tplt.subplot(1,2,2)\n\tplt.plot(timearr_mask/3600,effarr_mask,marker='s',linestyle='',color='k',markersize=12,fillstyle='none') # plot simulated points\tplt.plot(time_new/3600,eff_smoothed,'--b',linewidth=1.3,dashes=(5,6)) #plot interpolated curve\n\t\n\t# interpolate only if there are more than 3 points\n\ttime_new=float('NaN')\n\teff_smoothed=float('NaN')\n\tif len(effarr_mask)>3:\n\t\t# Spline interpolation of the efficiency curve\n\t\ttck=interpolate.splrep(timearr_mask,effarr_mask,s=0,k=3) # spline interpolation coefficients (order k)\n\t\ttime_new=np.arange(timearr_mask[0],timearr_mask[-1],300) # create a finer time vector\n\t\teffarr_interp=interpolate.splev(time_new,tck,der=0) #evaluate the interpolate curve\n\t\n\t\t# smooth interpolated curve\n\t\twdw=11\n\t\tpolord=3\n\t\teff_smoothed=signal.savgol_filter(effarr_interp,wdw,polord) #window length wdw, poly order polord\n\t\n\t\tplt.plot(time_new/3600,eff_smoothed,'--b',linewidth=1.3,dashes=(5,6)) #plot interpolated curve\n\t\t\n\t\tplt.legend(['Simulated\\npoints','Guide to\\nthe eye'],loc='upper right',prop={'size':9})\n\t\n\tplt.rcParams.update({'font.size':16})\n\tplt.subplots_adjust(left=0.15,top=0.98,wspace=0.4) # adjust space between subplots\n\t\n\tplt.ylabel(\"Efficiency (%)\")\n\tplt.xlabel(\"Time (h)\")\n\t#plt.ylim(-35, 0)\n\t#plt.xlim(0, 0.65)\n\t\n\ttimepts=SimTimePts(results) # Time points that have been run by the simulations\n\t\n\t#plt.show()\n\treturn results,timearr_mask,effarr_mask,time_new,eff_smoothed\n\t\n\t\ndef plotcurves():\n\t[Idark,Vdark]=analyzedat(\"dark\")\n\t[Ilight,Vlight]=analyzedat(\"light\")\n\tplt.plot(Vdark,Idark,label='dark')\n\tplt.hold(True)\n\tplt.plot(Vlight,Ilight,label='light')\n\tplt.rcParams.update({'font.size': 20}) # increase fontsize\n\tplt.legend(loc='upper left')\n\tplt.show()\n\t\ndef findeff(V,I):\n# Function returning the efficiency of the cell assuming 1 sun illumination\n\t\n\tI=-I # to work with positive currents\n\tP=V*I\n\t# Find max powerpoint\n\tP_L=list(P)\n\tpm=max(P_L)\n\timax=P_L.index(pm)\n\t\n\t#[k for k,j in enumerate(P) if j==pm]\n\t#pdb.set_trace()\n\tVmax=V[imax]\n\tImax=I[imax]\n\t\n\tnu=Imax*Vmax*10/1e3*100 # I is in mA/cm2, corresponding to 10 A/m2\n\t\n\treturn nu\n\n# function to plot Na profile\ndef ploth5(h5file=\"FOR_newNaprofile.h5\", folderpath=\"/home/linux/ieng6/na299x/na299x/DB/Guillaume/Solar_cell_Al_BSF\"):\n\t\n\t#Open h5 file (file for full stack file)\n\thf\t\t= h5py.File(h5file, 'r')\n\ttime \t= hf.get('time')\n\tct\t\t= hf.get('si/concentration')\n\t# ct_10=hf.get('si/concentration/ct_10')\n\tx\t\t= hf.get('si/x')\n\t\n\tpdb.set_trace()\n\tplt.ion()\n\tfig,ax=plt.subplots()\n\tfor t in enumerate(time):\n\t\tct_field='ct_'+str(t[0]) # do not use all curve since they are high resolution\n\t\t#pdb.set_trace()\n\t\tplt.plot(x-x[0],ct[ct_field])\n\t\tplt.ylim(1e10,1e16)\n\t\tplt.xlim(0,0.8)\n\t\tplt.yscale('log')\n\t\t\n\tfor item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n\t\tax.get_xticklabels() + ax.get_yticklabels()):\n\t\titem.set_fontsize(19)\n\t\t\n\t\n\thf.close()\n\t\n\t# creates a list of length L filled with NaN\ndef NaNlist(L): \n\tlist=np.zeros(L,dtype=np.float) #create array of floats because an array of int cannot be filled with NaN\n\tlist.fill(np.nan) # fill with NaN\n\tlist=list.tolist() #convert to list\n\t\n\treturn list\n\t\n# Function returning the indices of the time points that have been simulated in the result list output from batchanalysis.py\ndef SimTimePts(results):\n\ttme=results[\"time\"]\n\ttme_array=np.asarray(tme)\n\ttme_mask=np.isfinite(tme_array)\n\ttimepts=np.where(tme_mask==True)\n\t\n\treturn timepts[0] # array\n\t\n# Function to make video of efficiency as a function of time\n# Takes in argument the images generated at each time\n## not currently working, see module videoPID.py\ndef makePIDvideo(time_new,eff_smoothed):\n\n\t#Writer=manimation.FFMpegWriter(fps=30)\n\t#['ffmpeg']\n\t#writer=Writer(fps=20, metadata=dict(title='PID simulated efficiency', artist='G.',bitrate=1800)) #record at 20 fps\n\t#pdb.set_trace()\n\t# create figure\n\tfigPID,ax=plt.subplots()\n\tline,=ax.plot(time_new,eff_smoothed)\n\tplt.xlim(0,12)\n\tplt.ylim(0,19)\n\t\n\tdef animate_PID(i,time_new,eff_smoothed,line):\n\t\tline.set_data(time_new[:i],eff_smoothed[:i])\n\t\treturn line,\n\t\n\tplt.xlabel('Time (hrs)')\n\tplt.ylabel('Efficiency (%)')\n\t\n\t#plt.show()\n\t#pdb.set_trace()\n\tani = manimation.FuncAnimation(figPID, animate_PID, len(time_new), fargs=[time_new,eff_smoothed,line], blit=False)\n\tani.save('Efficiencyplot.mp4', writer='ffmpeg')\n\t# import images and make video\n\n# Function used to create a log of the parameters and files used in the simulations\ndef createlog(batchdir,Temp,mseg,clathrate_file, h5file, startstep, endstep, skipNB, sdetemplate, sdevicetemplate):\n\t# Also add the name of the optical generation file used? (although it is in the sde file)\n\targuments=locals() # get all function arguments as a dictionary\n\tnow=datetime.now()\n\tfilename=batchdir+\"/AA_logfile_PID_\"+now.strftime(\"%Y%d%m_%H_%M_%S\")+\".txt\"\n\tfid=open(filename,\"w+\")\n\t\n\tfid.write(\"Simulation starting time:\t\t\t\t\"+now.strftime(\"%Y/%d/%m %H:%M:%S\")+\"\\n\")\n\tfid.write(\"\\n\")\n\tfid.write(\"Temperature:\t\t\t\t\"+str(Temp)+\" °C\\n\")\n\tfid.write(\"Shunt segregation coefficient:\t\t\t\t\"+str(mseg)+\"\\n\")\n\tfid.write(\"Clathrate conductivity file:\t\t\t\t\"+clathrate_file+\"\\n\")\n\tfid.write(\"Sodium profiles from h5py file:\t\t\t\t\"+h5file+\"\\n\")\n\tfid.write(\"Simulation starting step:\t\t\t\t\"+str(startstep)+\"\\n\")\n\tfid.write(\"Simulation ending step:\t\t\t\t\"+str(endstep)+\"\\n\")\n\tfid.write(\"Step used to skip sodium profiles in the h5py file:\t\t\t\t\"+str(skipNB)+\"\\n\")\n\tfid.write(\"Sentaurus editor template file:\t\t\t\t\"+sdetemplate+\"\\n\")\n\tfid.write(\"Sentaurus device template file:\t\t\t\t\"+sdevicetemplate+\"\\n\")\n\n# Function replacing expression \"expr_search\" by \"expr_replace\" in file \"templatename.ext\" (in the base directory)\n# and saving the updated template \"templatename_t<time>.ext\" in directory \"newfolderpath\".\ndef replace_line(templatename,ext, expr_search,expr_replace,newfolderpath,time,identifier):\n# templatename:\t\t\tname of the template file, eg \"sdetemplate\"\n# extension:\t\t\textension of the template file, eg \"cmd\"\n# expr_search:\t\t\tname of the expression to be replaced, e.g. \"nodnum1\"\n# expr_replace:\t\t\tname of the new line, e.g. newnodename as newnodename=\"n_t\"+str(int(time))\n# newfolderpath:\t\tpath of the directory where the simulation data is saved. e.g. \"testdir2\"\n# time:\t\t\t\t\ttime of the Na profile (int or float)\n# identifier:\t\t\ttype of line being searched. eg: \"mesh line\"\n \n# open sdevice file to find number of lines\n\twith open(templatename+\".\"+ext,'r') as fp:\n\t\tcount_temp = len(fp.readlines( ))\n\t\tfp.close\n\t\n\t\tlinelist=[0]*count_temp # preallocate list memory\n\t\tnewlinelist=[0]*count_temp # preallocate list memory for file with modified line\n\n\n\twith open(templatename+\".\"+ext,'r') as fp:\n\t\tk=0\n\t\tsuccessdevice=0\n\t\t# save each line into a list\n\t\twhile 1:\n\t\t\tdataline=fp.readline()\n\t\t\tif dataline==\"\": # check if the string is empty, meaning end of file\n\t\t\t\tbreak\n\t\t\tlinelist[k]=dataline # create of list containing one line at each index\n\t\t\tnewlinelist[k]=dataline # copy the file into a new list array\n\t\t\t# find line where mesh is defined and change the name of the mesh file\n\t\t\tif re.search(expr_search,linelist[k]): # if the mesh definition is found\n\t\t\t\tprint(identifier + \" found\")\n\t\t\t\tnewlinelist[k]=re.sub(expr_search,expr_replace,linelist[k]) # replace the nodenumber \"nodnum1\" in the template by n_t1, n_t2, etc.\n\t\t\t\tsuccessdevice=successdevice+1\n\t\t\t\t#pdb.set_trace()\n\t\t\tk=k+1\n\t\tfp.close()\n\t\t\n\t\tif successdevice==0:\n\t\t\tprint(\"*********\\n\"+identifier + \" not found\\n**********\")\n\t\t\n\t\t# save the files under a different name (_t0, _t1, _t2, etc)\n\t\t# file path\n\t\tfilepath=os.path.join(newfolderpath,templatename) # path to save file in the right simulation folder\n\n\t\tnew_datafile_name=filepath+\"_t\"+str(int(time))+\".\"+ext\n\t\tf=open(new_datafile_name,\"w+\")\n\t\tfor i in range(len(newlinelist)):\n\t\t\tf.write(newlinelist[i])\n\t\tf.close()\n\t\tprint(\"File \"+new_datafile_name+\" created.\\n\")\n\t\t\n\t\treturn new_datafile_name\n\t\t\n\t\t\n" ]
[ [ "matplotlib.pyplot.yscale", "numpy.asarray", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.isfinite", "matplotlib.pyplot.figure", "numpy.reshape", "matplotlib.pyplot.xlim", "matplotlib.pyplot.rcParams.update", "scipy.interpolate.splev", "numpy.where", "numpy.zeros", "matplotlib.pyplot.hold", "matplotlib.pyplot.subplots", "numpy.arange", "scipy.interpolate.splrep", "matplotlib.pyplot.ylim", "matplotlib.pyplot.ion", "scipy.signal.savgol_filter", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
bdice/pymbar
[ "22c327bcdde20f7c6256b3eb0de2efc4939e77c8" ]
[ "pymbar/tests/test_covariance.py" ]
[ "import numpy as np\nimport pymbar\nfrom pymbar.utils_for_testing import eq, suppress_derivative_warnings_for_tests\n\ndef load_oscillators(n_states, n_samples):\n name = \"%dx%d oscillators\" % (n_states, n_samples)\n O_k = np.linspace(1, 5, n_states)\n k_k = np.linspace(1, 3, n_states)\n N_k = (np.ones(n_states) * n_samples).astype('int')\n test = pymbar.testsystems.harmonic_oscillators.HarmonicOscillatorsTestCase(O_k, k_k)\n x_n, u_kn, N_k_output, s_n = test.sample(N_k, mode='u_kn')\n return name, u_kn, N_k_output, s_n\n\n\ndef load_exponentials(n_states, n_samples):\n name = \"%dx%d exponentials\" % (n_states, n_samples)\n rates = np.linspace(1, 3, n_states)\n N_k = (np.ones(n_states) * n_samples).astype('int')\n test = pymbar.testsystems.exponential_distributions.ExponentialTestCase(rates)\n x_n, u_kn, N_k_output, s_n = test.sample(N_k, mode='u_kn')\n return name, u_kn, N_k_output, s_n\n\n\ndef _test(data_generator):\n name, U, N_k, s_n = data_generator()\n print(name)\n mbar = pymbar.MBAR(U, N_k)\n results1 = mbar.getFreeEnergyDifferences(uncertainty_method=\"svd\", return_dict=True)\n fij1_t, dfij1_t = mbar.getFreeEnergyDifferences(uncertainty_method=\"svd\", return_dict=False)\n results2 = mbar.getFreeEnergyDifferences(uncertainty_method=\"svd-ew\", return_dict=True)\n fij1 = results1['Delta_f']\n dfij1 = results1['dDelta_f']\n fij2 = results2['Delta_f']\n dfij2 = results2['dDelta_f']\n\n # Check to make sure the returns from with and w/o dict are the same\n eq(fij1, fij1_t)\n eq(dfij1, dfij1_t)\n\n eq(pymbar.mbar_solvers.mbar_gradient(U, N_k, mbar.f_k), np.zeros(N_k.shape), decimal=8)\n eq(np.exp(mbar.Log_W_nk).sum(0), np.ones(len(N_k)), decimal=10)\n eq(np.exp(mbar.Log_W_nk).dot(N_k), np.ones(U.shape[1]), decimal=10)\n eq(pymbar.mbar_solvers.self_consistent_update(U, N_k, mbar.f_k), mbar.f_k, decimal=10)\n\n # Test against old MBAR code.\n with suppress_derivative_warnings_for_tests():\n mbar0 = pymbar.old_mbar.MBAR(U, N_k)\n fij0, dfij0 = mbar0.getFreeEnergyDifferences(uncertainty_method=\"svd\")\n eq(mbar.f_k, mbar0.f_k, decimal=8)\n eq(np.exp(mbar.Log_W_nk), np.exp(mbar0.Log_W_nk), decimal=5)\n\n eq(fij0, fij1, decimal=8)\n eq(dfij0, dfij1, decimal=8)\n\n eq(fij0, fij2, decimal=8)\n eq(dfij0, dfij2, decimal=8)\n\n\ndef test_100x100_oscillators():\n data_generator = lambda: load_oscillators(100, 100)\n _test(data_generator)\n\n\ndef test_200x50_oscillators():\n data_generator = lambda: load_oscillators(200, 50)\n _test(data_generator)\n\n\ndef test_200x50_exponentials():\n data_generator = lambda: load_exponentials(200, 50)\n _test(data_generator)\n" ]
[ [ "numpy.ones", "numpy.exp", "numpy.linspace", "numpy.zeros" ] ]
xunhen/HRNet-Object-Detection
[ "44f641da00810c61e217c1080ef1b45d39df484f" ]
[ "mmdet/ops/nms/setup.py" ]
[ "import os.path as osp\nfrom setuptools import setup, Extension\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\next_args = dict(\n include_dirs=[np.get_include()],\n language='c++',\n extra_compile_args={\n 'cc': ['-Wno-unused-function', '-Wno-write-strings'],\n 'nvcc': ['-c', '--compiler-options', '-fPIC'],\n },\n)\n\nextensions = [\n Extension('soft_nms_cpu', ['src/soft_nms_cpu.pyx'], **ext_args),\n]\n\n\ndef customize_compiler_for_nvcc(self):\n \"\"\"inject deep into distutils to customize how the dispatch\n to cc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on.\"\"\"\n\n # tell the compiler it can processes .cu\n self.src_extensions.append('.cu')\n\n # save references to the default compiler_so and _comple methods\n #add by wjc\n if hasattr(self, 'compiler_so'): # add by hwx at 20180408\n default_compiler_so = self.compiler_so\n super = self._compile\n\n # now redefine the _compile method. This gets executed for each\n # object but distutils doesn't have the ability to change compilers\n # based on source extension: we add it.\n def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n if osp.splitext(src)[1] == '.cu':\n # use the cuda for .cu files\n self.set_executable('compiler_so', 'nvcc')\n # use only a subset of the extra_postargs, which are 1-1 translated\n # from the extra_compile_args in the Extension class\n postargs = extra_postargs['nvcc']\n else:\n postargs = extra_postargs['cc']\n\n super(obj, src, ext, cc_args, postargs, pp_opts)\n # reset the default compiler_so, which we might have changed for cuda\n self.compiler_so = default_compiler_so\n\n # inject our redefined _compile method into the class\n self._compile = _compile\n\n\nclass custom_build_ext(build_ext):\n\n def build_extensions(self):\n customize_compiler_for_nvcc(self.compiler)\n build_ext.build_extensions(self)\n\n\nsetup(\n name='soft_nms',\n cmdclass={'build_ext': custom_build_ext},\n ext_modules=cythonize(extensions),\n)\n\nsetup(\n name='nms_cuda',\n ext_modules=[\n CUDAExtension('nms_cuda', [\n 'src/nms_cuda.cpp',\n 'src/nms_kernel.cu',\n ]),\n CUDAExtension('nms_cpu', [\n 'src/nms_cpu.cpp',\n ]),\n ],\n cmdclass={'build_ext': BuildExtension})\n" ]
[ [ "torch.utils.cpp_extension.CUDAExtension", "numpy.get_include" ] ]
mrsempress/stereo
[ "c7465e92d9d03f73c13011125bdd02c33def6c19" ]
[ "epilines.py" ]
[ "\"\"\"\nIt is for Epipolar geometry\n\"\"\"\n\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\ndef Epipolar_geometry(leftpath, rightpath):\n \"\"\"\n :param leftpath: The path of left images\n :param rightpath: The path of right images\n :return:\n \"\"\"\n # objP = np.zeros((6 * 7, 3), np.float32)\n # objP[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)\n # patternSize = (7, 6)\n imgl = cv2.imread(leftpath, 0) # queryimage # left image\n imgr = cv2.imread(rightpath, 0) # trainimage # right image\n # id = leftpath[16:]\n id = leftpath[42:]\n # # The origin image is gray\n # grayl = cv2.cvtColor(imgl, cv2.COLOR_BGR2GRAY)\n # grayr = cv2.cvtColor(imgr,cv2.COLOR_BGR2GRAY)\n # criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n # retl, cornersl = cv2.findChessboardCorners(grayl, patternSize, None)\n # retr, cornersr = cv2.findChessboardCorners(grayr, patternSize, None)\n # if not retl or not retr:\n # return\n\n # cornersl2 = cv2.cornerSubPix(grayl, cornersl, (11, 11), (-1, -1), criteria)\n # cornersr2 = cv2.cornerSubPix(grayr, cornersr, (11, 11), (-1, -1), criteria)\n\n # imgl = cv2.drawChessboardCorners(grayl, patternSize, cornersl2, retl)\n # imgr = cv2.drawChessboardCorners(grayr, patternSize, cornersr2, retr)\n\n # FLANN: Fast Libary for Approximate Nearest Neighbors\n (pts1, pts2) = findMatches(imgl, imgr, id)\n F, pts1, pts2 = findFundamentalMatrix(pts1, pts2)\n findEpilines(imgl, imgr, pts1, pts2, F, id)\n\n # # Brute Force\n # sift = cv2.xfeatures2d.SIFT_create(100)\n # kp1, des1 = sift.detectAndCompute(imgl, None)\n # kp2, des2 = sift.detectAndCompute(imgr, None)\n # bf = cv2.BFMatcher()\n # # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = False)\n # matches = bf.knnMatch(des1, des2, k=2)\n # goodMatches = []\n # minRatio = 1/3\n # for m,n in matches:\n # if m.distance / n.distance < minRatio:\n # goodMatches.append([m])\n # sorted(goodMatches,key=lambda x:x[0].distance)\n # #绘制最优匹配点\n # img3 = None\n # img3 = cv2.drawMatchesKnn(imgl, kp1, imgr, kp2, matches, img3, flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)\n # img3 = cv2.resize(img3,(1000, 400))\n # cv2.imwrite('output/epilines/epilines_' + id, img3)\n\n\ndef findMatches(img1, img2, id):\n \"\"\"\n :param img1: The left image\n :param img2: The right image\n :param id: The name of image\n :return: The list of symmetric point\n \"\"\"\n # vgg = cv2.xfeatures2d.VGG_create()\n # brisk = cv2.BRISK_create()\n # gms = cv2.xfeatures2d.matchGMS()\n # sift = cv2.xfeatures2d.SIFT_create(100)\n sift = cv2.xfeatures2d.SIFT_create()\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(img1, None)\n kp2, des2 = sift.detectAndCompute(img2, None)\n\n # FLANN parameters\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n\n matchesMask = [[0, 0] for i in range(len(matches))]\n good = []\n pts1 = []\n pts2 = []\n # ratio test as per Lowe's paper\n for i, (m, n) in enumerate(matches):\n if m.distance < 0.8 * n.distance:\n good.append(m)\n pts2.append(kp2[m.trainIdx].pt)\n pts1.append(kp1[m.queryIdx].pt)\n matchesMask[i] = [1, 0]\n\n # draw matches\n drawParams = dict( # singlePointColor=(255,0,0), matchColor=(0,255,0),\n matchesMask=matchesMask,\n flags=0)\n resultImage = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **drawParams)\n\n # # Univariate transformation\n # matchesMask = Univariatetrans(good, kp1, kp2, img1, img2)\n # # draw matches\n # drawParams = dict(matchColor = (0,255,0), # draw matches in green color\n # singlePointColor = None, matchesMask = matchesMask, flags = 2)\n # resultImage = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **drawParams)\n\n # cv2.imwrite('output/epilines/epilines_' + id, resultImage)\n cv2.imwrite('output/calibration_binocular/epilines_' + id, resultImage)\n\n return pts1, pts2\n\n\ndef Univariatetrans(goodMatches, kp1, kp2, img1, img2):\n \"\"\"\n :param goodMatches: The matches points\n :param kp1: keypoints 1\n :param kp2: keypoints 2\n :param img1: image 1\n :param img2: image 2\n :return: matchesMask\n \"\"\"\n MIN_MATCH_COUNT = 10\n\n if len(goodMatches) > MIN_MATCH_COUNT:\n\n src_pts = np.float32([kp1[m.queryIdx].pt for m in goodMatches]).reshape(-1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in goodMatches]).reshape(-1, 2)\n\n # Get the projection matrix\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist() # 用来配置匹配图,只绘制单应性图片中关键点的匹配线\n\n h, w = img1.shape[:2]\n\n # four corner\n pts = np.float32([[55, 74], [695, 45], [727, 464], [102, 548]]).reshape(-1, 1, 2)\n dst = cv2.perspectiveTransform(pts, M)\n\n # Draw the framework\n img2 = cv2.polylines(img2, [np.int32(dst)], True, (0, 255, 0), 2, cv2.LINE_AA)\n\n else:\n print(\"Not enough matches are found - %d/%d\" % (len(goodMatches), MIN_MATCH_COUNT))\n matchesMask = None\n\n return matchesMask\n\n\ndef findFundamentalMatrix(pts1, pts2):\n \"\"\"\n :param pts1: Symmetric point list 1\n :param pts2: Symmetric point list 2\n :return: Fundamental matrix and inlier points\n \"\"\"\n pts1 = np.int32(pts1)\n pts2 = np.int32(pts2)\n F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)\n # F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.RANSAC, 5.0)\n\n # We select only inlier points\n pts1 = pts1[mask.ravel() == 1]\n pts2 = pts2[mask.ravel() == 1]\n\n return F, pts1, pts2\n\n\ndef findEpilines(img1, img2, pts1, pts2, F, id):\n \"\"\"\n :param img1: The left image\n :param img2: The right image\n :param pts1: Symmetric point 1\n :param pts2: Symmetric point 2\n :param F: Fundamental matrix\n :param id: The id of raw picture\n :return:\n \"\"\"\n # Find epilines corresponding to points in right image (second image) [img6] and\n # drawing its lines on left image [img5]\n lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)\n lines1 = lines1.reshape(-1, 3)\n img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)\n\n # Find epilines corresponding to points in left image (first image) [img4] and\n # drawing its lines on right image [img3]\n lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)\n lines2 = lines2.reshape(-1, 3)\n img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)\n # cv2.imwrite('output/epilines/epilines_left' + id, img5)\n # cv2.imwrite('output/epilines/epilines_right' + id, img3)\n cv2.imwrite('output/calibration_binocular/epilines_left' + id, img5)\n cv2.imwrite('output/calibration_binocular/epilines_right' + id, img3)\n # plt.subplot(121), plt.imshow(img5)\n # plt.subplot(122), plt.imshow(img3)\n\n plt.subplot(221), plt.imshow(img5)\n plt.subplot(222), plt.imshow(img6)\n plt.subplot(223), plt.imshow(img3)\n plt.subplot(224), plt.imshow(img4)\n plt.show()\n\n\ndef drawlines(img1, img2, lines, pts1, pts2):\n \"\"\"\n :param img1: The image on which we draw the epilines for the points in img2\n :param img2: The other image\n :param lines: corresponding epilines\n :param pts1: Inlier point 1\n :param pts2: Inlier point 2\n :return: The new left and right image\n \"\"\"\n r, c = img1.shape\n img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)\n img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)\n for r, pt1, pt2 in zip(lines, pts1, pts2):\n color = tuple(np.random.randint(0, 255, 3).tolist())\n x0, y0 = map(int, [0, -r[2] / r[1]])\n x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])\n img1 = cv2.line(img1, (x0, y0), (x1, y1), color, 1)\n img1 = cv2.circle(img1, tuple(pt1), 5, color, -1)\n img2 = cv2.circle(img2, tuple(pt2), 5, color, -1)\n return img1, img2\n\n\ndef main():\n # for id in range(1, 15):\n # if id == 10:\n # continue\n # # leftpath = './data/left/left' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # # rightpath = './data/right/right' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # leftpath = 'output/calibration_binocular/rectifiedleft' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # rightpath = 'output/calibration_binocular/rectifiedright' + ('0' if (id < 10) else '') + str(id) + '.jpg'\n # print(leftpath)\n # print(rightpath)\n # Epipolar_geometry(leftpath, rightpath)\n leftpath = 'output/calibration_binocular/rectifiedleft04.jpg'\n rightpath = 'output/calibration_binocular/rectifiedright04.jpg'\n print(leftpath)\n print(rightpath)\n Epipolar_geometry(leftpath, rightpath)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.float32", "numpy.int32", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "numpy.random.randint" ] ]
vednatnaik/sip_calculator
[ "84da3c5c314f4da27dea2bccc0c05620cd18ec6c" ]
[ "Yearly_Sip_calculator.py" ]
[ "import matplotlib\r\nmatplotlib.use('WebAgg')\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef sip_calculator (sip_amount, years, IntrestRate):\r\n\r\n current_amount = sip_amount\r\n current_amount = sip_amount + (current_amount * IntrestRate) / 100\r\n\r\n print(f\"first month return {current_amount}\")\r\n\r\n for n in range(0, years - 1):\r\n RR = sip_amount + current_amount\r\n Nextmonthreturn = RR + (RR * IntrestRate) / 100\r\n # print(RR)\r\n print(f\"your {n + 2} years return is {round(Nextmonthreturn, 2)} Rs/-\")\r\n current_amount = Nextmonthreturn\r\n\r\n print(\"\")\r\n Invested_amount = sip_amount * years\r\n total_value = Nextmonthreturn\r\n est_return = total_value - Invested_amount\r\n print(f\"Invested amount is = {round(Invested_amount, 2)}Rs\")\r\n print(\"\")\r\n print(f\"Estimated return = {round(est_return, 2)}Rs\")\r\n print(\"\")\r\n print(f\"Total Value = {round(total_value, 2)}Rs\")\r\n print(\"\")\r\n\r\n\r\n list_data_name = [\"Invested Amount\", \"Est. Returns\"]\r\n list_data = [round(Invested_amount, 2), round(est_return, 2)]\r\n my_circle = plt.Circle((0, 0), 0.7, color='white')\r\n fig = plt.figure()\r\n plt.pie(list_data, labels=list_data_name)\r\n p = plt.gcf()\r\n p.gca().add_artist(my_circle)\r\n plt.show()\r\n\r\n\r\nprint(\"enter the amout you would like to invest per month:- \")\r\nsip_amount =12*int(input())\r\n\r\nprint(\"No. of years:-\")\r\nyears = int(input())\r\n\r\nprint(\"expected rate of return:-\")\r\nIntrestRate = int(input())\r\n\r\n\r\nsip_calculator(sip_amount,years,IntrestRate)" ]
[ [ "matplotlib.pyplot.Circle", "matplotlib.pyplot.figure", "matplotlib.pyplot.gcf", "matplotlib.pyplot.pie", "matplotlib.pyplot.show", "matplotlib.use" ] ]
vita-epfl/openpifpaf_posetrack
[ "282ba063450d523728637167420d9ade4d9c1e65" ]
[ "openpifpaf_posetrack/transforms/scale.py" ]
[ "import logging\n\nimport numpy as np\nimport PIL\n\nimport openpifpaf\nfrom openpifpaf.transforms.scale import _scale\n\nLOG = logging.getLogger(__name__)\n\n\nclass ScaleMix(openpifpaf.transforms.Preprocess):\n def __init__(self, scale_threshold, *,\n upscale_factor=2.0,\n downscale_factor=0.5,\n resample=PIL.Image.BILINEAR):\n self.scale_threshold = scale_threshold\n self.upscale_factor = upscale_factor\n self.downscale_factor = downscale_factor\n self.resample = resample\n\n def __call__(self, images, all_anns, metas):\n scales = np.array([\n np.sqrt(ann['bbox'][2] * ann['bbox'][3])\n for anns in all_anns\n for ann in anns if (not getattr(ann, 'iscrowd', False)\n and np.any(ann['keypoints'][:, 2] > 0.0))\n ])\n LOG.debug('scale threshold = %f, scales = %s', self.scale_threshold, scales)\n if not scales.shape[0]:\n return images, all_anns, metas\n\n all_above_threshold = np.all(scales > self.scale_threshold)\n all_below_threshold = np.all(scales < self.scale_threshold)\n if not all_above_threshold and \\\n not all_below_threshold:\n return images, all_anns, metas\n\n new_images = []\n new_all_anns = []\n new_metas = []\n for image, anns, meta in zip(images, all_anns, metas):\n w, h = image.size\n\n if all_above_threshold:\n target_w, target_h = int(w / 2), int(h / 2)\n else:\n target_w, target_h = int(w * 2), int(h * 2)\n\n new_image, new_anns, new_meta = \\\n _scale(image, anns, meta, target_w, target_h, self.resample)\n new_images.append(new_image)\n new_all_anns.append(new_anns)\n new_metas.append(new_meta)\n\n return new_images, new_all_anns, new_metas\n" ]
[ [ "numpy.sqrt", "numpy.all", "numpy.any" ] ]
FunByJohn/QaDiL
[ "9e22bb061c5a2c32473c7ab3aa9b9cce4e98c963" ]
[ "Notes/IMO21/img/Matplotlib/sephp.py" ]
[ "#import numpy as np\nimport matplotlib\n#matplotlib.rcParams['text.usetex'] = True\nimport matplotlib.pyplot as plt\n\nplt.plot([1.35, 1.42, 1.45, 1.52], [35, 50, 40, 45], 'ro')\n\nplt.plot([1.68, 1.70, 1.73, 1.73], [65, 70, 60, 80], 'bo')\n\nplt.axis([1.3, 1.8, 30, 90])\n\nplt.xlabel(\"height (m)\")\n\nplt.ylabel(\"weight (kg)\")\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
timudk/probability
[ "8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103" ]
[ "tensorflow_probability/python/internal/special_math.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Functions \"ndtr\" and \"ndtri\" are derived from calculations made in:\n# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n# In the following email exchange, the author gives his consent to redistribute\n# derived works under an Apache 2.0 license.\n#\n# From: Stephen Moshier <[email protected]>\n# Date: Sat, Jun 9, 2018 at 2:36 PM\n# Subject: Re: Licensing cephes under Apache (BSD-like) license.\n# To: rif <[email protected]>\n#\n#\n#\n# Hello Rif,\n#\n# Yes, Google may distribute Cephes files under the Apache 2 license.\n#\n# If clarification is needed, I do not favor BSD over other free licenses.\n# I would agree that Apache 2 seems to cover the concern you mentioned\n# about sublicensees.\n#\n# Best wishes for good luck with your projects!\n# Steve Moshier\n#\n#\n#\n# On Thu, 31 May 2018, rif wrote:\n#\n# > Hello Steve.\n# > My name is Rif. I work on machine learning software at Google.\n# >\n# > Your cephes software continues to be incredibly useful and widely used. I\n# > was wondering whether it would be permissible for us to use the Cephes code\n# > under the Apache 2.0 license, which is extremely similar in permissions to\n# > the BSD license (Wikipedia comparisons). This would be quite helpful to us\n# > in terms of avoiding multiple licenses on software.\n# >\n# > I'm sorry to bother you with this (I can imagine you're sick of hearing\n# > about this by now), but I want to be absolutely clear we're on the level and\n# > not misusing your important software. In former conversation with Eugene\n# > Brevdo ([email protected]), you wrote \"If your licensing is similar to BSD,\n# > the formal way that has been handled is simply to add a statement to the\n# > effect that you are incorporating the Cephes software by permission of the\n# > author.\" I wanted to confirm that (a) we could use the Apache license, (b)\n# > that we don't need to (and probably you don't want to) keep getting\n# > contacted about individual uses, because your intent is generally to allow\n# > this software to be reused under \"BSD-like\" license, and (c) you're OK\n# > letting incorporators decide whether a license is sufficiently BSD-like?\n# >\n# > Best,\n# >\n# > rif\n# >\n# >\n# >\n\n\"\"\"Special Math Ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import dtype_util\n\n__all__ = [\n \"erfinv\",\n \"ndtr\",\n \"ndtri\",\n \"log_ndtr\",\n \"log_cdf_laplace\",\n]\n\n\n# log_ndtr uses different functions over the ranges\n# (-infty, lower](lower, upper](upper, infty)\n# Lower bound values were chosen by examining where the support of ndtr\n# appears to be zero, relative to scipy's (which is always 64bit). They were\n# then made more conservative just to be safe. (Conservative means use the\n# expansion more than we probably need to.) See `NdtrTest` in\n# special_math_test.py.\nLOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64)\nLOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32)\n\n# Upper bound values were chosen by examining for which values of 'x'\n# Log[cdf(x)] is 0, after which point we need to use the approximation\n# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly\n# conservative, meaning we use the approximation earlier than needed.\nLOGNDTR_FLOAT64_UPPER = np.array(8, np.float64)\nLOGNDTR_FLOAT32_UPPER = np.array(5, np.float32)\n\n\ndef ndtr(x, name=\"ndtr\"):\n \"\"\"Normal distribution function.\n\n Returns the area under the Gaussian probability density function, integrated\n from minus infinity to x:\n\n ```\n 1 / x\n ndtr(x) = ---------- | exp(-0.5 t**2) dt\n sqrt(2 pi) /-inf\n\n = 0.5 (1 + erf(x / sqrt(2)))\n = 0.5 erfc(x / sqrt(2))\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"ndtr\").\n\n Returns:\n ndtr: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]:\n raise TypeError(\n \"x.dtype=%s is not handled, see docstring for supported types.\"\n % x.dtype)\n return _ndtr(x)\n\n\ndef _ndtr(x):\n \"\"\"Implements ndtr core logic.\"\"\"\n half_sqrt_2 = tf.constant(\n 0.5 * np.sqrt(2.), dtype=x.dtype, name=\"half_sqrt_2\")\n w = x * half_sqrt_2\n z = tf.abs(w)\n y = tf1.where(\n tf.less(z, half_sqrt_2), 1. + tf.math.erf(w),\n tf1.where(tf.greater(w, 0.), 2. - tf.math.erfc(z), tf.math.erfc(z)))\n return 0.5 * y\n\n\ndef ndtri(p, name=\"ndtri\"):\n \"\"\"The inverse of the CDF of the Normal distribution function.\n\n Returns x such that the area under the pdf from minus infinity to x is equal\n to p.\n\n A piece-wise rational approximation is done for the function.\n This is a port of the implementation in netlib.\n\n Args:\n p: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"ndtri\").\n\n Returns:\n x: `Tensor` with `dtype=p.dtype`.\n\n Raises:\n TypeError: if `p` is not floating-type.\n \"\"\"\n\n with tf.name_scope(name):\n p = tf.convert_to_tensor(value=p, name=\"p\")\n if dtype_util.as_numpy_dtype(p.dtype) not in [np.float32, np.float64]:\n raise TypeError(\n \"p.dtype=%s is not handled, see docstring for supported types.\"\n % p.dtype)\n return _ndtri(p)\n\n\ndef _ndtri(p):\n \"\"\"Implements ndtri core logic.\"\"\"\n\n # Constants used in piece-wise rational approximations. Taken from the cephes\n # library:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n p0 = list(reversed([-5.99633501014107895267E1,\n 9.80010754185999661536E1,\n -5.66762857469070293439E1,\n 1.39312609387279679503E1,\n -1.23916583867381258016E0]))\n q0 = list(reversed([1.0,\n 1.95448858338141759834E0,\n 4.67627912898881538453E0,\n 8.63602421390890590575E1,\n -2.25462687854119370527E2,\n 2.00260212380060660359E2,\n -8.20372256168333339912E1,\n 1.59056225126211695515E1,\n -1.18331621121330003142E0]))\n p1 = list(reversed([4.05544892305962419923E0,\n 3.15251094599893866154E1,\n 5.71628192246421288162E1,\n 4.40805073893200834700E1,\n 1.46849561928858024014E1,\n 2.18663306850790267539E0,\n -1.40256079171354495875E-1,\n -3.50424626827848203418E-2,\n -8.57456785154685413611E-4]))\n q1 = list(reversed([1.0,\n 1.57799883256466749731E1,\n 4.53907635128879210584E1,\n 4.13172038254672030440E1,\n 1.50425385692907503408E1,\n 2.50464946208309415979E0,\n -1.42182922854787788574E-1,\n -3.80806407691578277194E-2,\n -9.33259480895457427372E-4]))\n p2 = list(reversed([3.23774891776946035970E0,\n 6.91522889068984211695E0,\n 3.93881025292474443415E0,\n 1.33303460815807542389E0,\n 2.01485389549179081538E-1,\n 1.23716634817820021358E-2,\n 3.01581553508235416007E-4,\n 2.65806974686737550832E-6,\n 6.23974539184983293730E-9]))\n q2 = list(reversed([1.0,\n 6.02427039364742014255E0,\n 3.67983563856160859403E0,\n 1.37702099489081330271E0,\n 2.16236993594496635890E-1,\n 1.34204006088543189037E-2,\n 3.28014464682127739104E-4,\n 2.89247864745380683936E-6,\n 6.79019408009981274425E-9]))\n\n def _create_polynomial(var, coeffs):\n \"\"\"Compute n_th order polynomial via Horner's method.\"\"\"\n coeffs = np.array(coeffs, dtype_util.as_numpy_dtype(var.dtype))\n if not coeffs.size:\n return tf.zeros_like(var)\n return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var\n\n maybe_complement_p = tf1.where(p > -np.expm1(-2.), 1. - p, p)\n # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs\n # later on. The result from the computation when p == 0 is not used so any\n # number that doesn't result in NaNs is fine.\n sanitized_mcp = tf1.where(\n maybe_complement_p <= 0.,\n tf.fill(tf.shape(input=p),\n dtype_util.as_numpy_dtype(p.dtype)(0.5)), maybe_complement_p)\n\n # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).\n w = sanitized_mcp - 0.5\n ww = w ** 2\n x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)\n / _create_polynomial(ww, q0))\n x_for_big_p *= -np.sqrt(2. * np.pi)\n\n # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),\n # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different\n # arrays based on whether p < exp(-32).\n z = tf.sqrt(-2. * tf.math.log(sanitized_mcp))\n first_term = z - tf.math.log(z) / z\n second_term_small_p = (\n _create_polynomial(1. / z, p2) /\n _create_polynomial(1. / z, q2) / z)\n second_term_otherwise = (\n _create_polynomial(1. / z, p1) /\n _create_polynomial(1. / z, q1) / z)\n x_for_small_p = first_term - second_term_small_p\n x_otherwise = first_term - second_term_otherwise\n\n x = tf1.where(sanitized_mcp > np.exp(-2.), x_for_big_p,\n tf1.where(z >= 8.0, x_for_small_p, x_otherwise))\n\n x = tf1.where(p > 1. - np.exp(-2.), x, -x)\n infinity_scalar = tf.constant(np.inf, dtype=p.dtype)\n infinity = tf.fill(tf.shape(input=p), infinity_scalar)\n x_nan_replaced = tf1.where(p <= 0.0, -infinity,\n tf1.where(p >= 1.0, infinity, x))\n return x_nan_replaced\n\n\ndef log_ndtr(x, series_order=3, name=\"log_ndtr\"):\n \"\"\"Log Normal distribution function.\n\n For details of the Normal distribution function see `ndtr`.\n\n This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or\n using an asymptotic series. Specifically:\n - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on\n `log(1-x) ~= -x, x << 1`.\n - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique\n and take a log.\n - For `x <= lower_segment`, we use the series approximation of erf to compute\n the log CDF directly.\n\n The `lower_segment` is set based on the precision of the input:\n\n ```\n lower_segment = { -20, x.dtype=float64\n { -10, x.dtype=float32\n upper_segment = { 8, x.dtype=float64\n { 5, x.dtype=float32\n ```\n\n When `x < lower_segment`, the `ndtr` asymptotic series approximation is:\n\n ```\n ndtr(x) = scale * (1 + sum) + R_N\n scale = exp(-0.5 x**2) / (-x sqrt(2 pi))\n sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}\n R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})\n ```\n\n where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a\n [double-factorial](https://en.wikipedia.org/wiki/Double_factorial).\n\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n series_order: Positive Python `integer`. Maximum depth to\n evaluate the asymptotic expansion. This is the `N` above.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\n Returns:\n log_ndtr: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n TypeError: if `series_order` is a not Python `integer.`\n ValueError: if `series_order` is not in `[0, 30]`.\n \"\"\"\n if not isinstance(series_order, int):\n raise TypeError(\"series_order must be a Python integer.\")\n if series_order < 0:\n raise ValueError(\"series_order must be non-negative.\")\n if series_order > 30:\n raise ValueError(\"series_order must be <= 30.\")\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n\n if dtype_util.base_equal(x.dtype, tf.float64):\n lower_segment = LOGNDTR_FLOAT64_LOWER\n upper_segment = LOGNDTR_FLOAT64_UPPER\n elif dtype_util.base_equal(x.dtype, tf.float32):\n lower_segment = LOGNDTR_FLOAT32_LOWER\n upper_segment = LOGNDTR_FLOAT32_UPPER\n else:\n raise TypeError(\"x.dtype=%s is not supported.\" % x.dtype)\n\n # The basic idea here was ported from:\n # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html\n # We copy the main idea, with a few changes\n # * For x >> 1, and X ~ Normal(0, 1),\n # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],\n # which extends the range of validity of this function.\n # * We use one fixed series_order for all of 'x', rather than adaptive.\n # * Our docstring properly reflects that this is an asymptotic series, not a\n # Taylor series. We also provided a correct bound on the remainder.\n # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when\n # x=0. This happens even though the branch is unchosen because when x=0\n # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan\n # regardless of whether dy is finite. Note that the minimum is a NOP if\n # the branch is chosen.\n return tf1.where(\n tf.greater(x, upper_segment),\n -_ndtr(-x), # log(1-x) ~= -x, x << 1\n tf1.where(\n tf.greater(x, lower_segment),\n tf.math.log(_ndtr(tf.maximum(x, lower_segment))),\n _log_ndtr_lower(tf.minimum(x, lower_segment), series_order)))\n\n\ndef _log_ndtr_lower(x, series_order):\n \"\"\"Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.\"\"\"\n x_2 = tf.square(x)\n # Log of the term multiplying (1 + sum)\n log_scale = -0.5 * x_2 - tf.math.log(-x) - 0.5 * np.log(2. * np.pi)\n return log_scale + tf.math.log(_log_ndtr_asymptotic_series(x, series_order))\n\n\ndef _log_ndtr_asymptotic_series(x, series_order):\n \"\"\"Calculates the asymptotic series used in log_ndtr.\"\"\"\n npdt = dtype_util.as_numpy_dtype(x.dtype)\n if series_order <= 0:\n return npdt(1)\n x_2 = tf.square(x)\n even_sum = tf.zeros_like(x)\n odd_sum = tf.zeros_like(x)\n x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.\n for n in range(1, series_order + 1):\n y = npdt(_double_factorial(2 * n - 1)) / x_2n\n if n % 2:\n odd_sum += y\n else:\n even_sum += y\n x_2n *= x_2\n return 1. + even_sum - odd_sum\n\n\ndef erfinv(x, name=\"erfinv\"):\n \"\"\"The inverse function for erf, the error function.\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"erfinv\").\n\n Returns:\n x: `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x` is not floating-type.\n \"\"\"\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]:\n raise TypeError(\"x.dtype={} is not handled, see docstring for supported \"\n \"types.\".format(dtype_util.name(x.dtype)))\n return ndtri((x + 1.) / 2.) / np.sqrt(2.)\n\n\ndef _double_factorial(n):\n \"\"\"The double factorial function for small Python integer `n`.\"\"\"\n return np.prod(np.arange(n, 1, -2))\n\n\ndef log_cdf_laplace(x, name=\"log_cdf_laplace\"):\n \"\"\"Log Laplace distribution function.\n\n This function calculates `Log[L(x)]`, where `L(x)` is the cumulative\n distribution function of the Laplace distribution, i.e.\n\n ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```\n\n For numerical accuracy, `L(x)` is computed in different ways depending on `x`,\n\n ```\n x <= 0:\n Log[L(x)] = Log[0.5] + x, which is exact\n\n 0 < x:\n Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact\n ```\n\n Args:\n x: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\n Returns:\n `Tensor` with `dtype=x.dtype`.\n\n Raises:\n TypeError: if `x.dtype` is not handled.\n \"\"\"\n\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x, name=\"x\")\n\n # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.\n lower_solution = -np.log(2.) + x\n\n # safe_exp_neg_x = exp{-x} for x > 0, but is\n # bounded above by 1, which avoids\n # log[1 - 1] = -inf for x = log(1/2), AND\n # exp{-x} --> inf, for x << -1\n safe_exp_neg_x = tf.exp(-tf.abs(x))\n\n # log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used\n # internally by log1p, rather than being done explicitly here.\n upper_solution = tf.math.log1p(-0.5 * safe_exp_neg_x)\n\n return tf1.where(x < 0., lower_solution, upper_solution)\n" ]
[ [ "tensorflow.compat.v2.square", "numpy.log", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.math.log", "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.less", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v1.where", "tensorflow.compat.v2.math.log1p", "tensorflow.compat.v2.greater", "numpy.expm1", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.math.erfc", "numpy.arange", "tensorflow.compat.v2.minimum", "tensorflow.compat.v2.abs", "tensorflow.compat.v2.math.erf", "numpy.array", "tensorflow.compat.v2.maximum", "numpy.exp", "numpy.sqrt" ] ]
suytingwan/models
[ "ccdbfe77d071cc19b55fb9f4b738912e35d982ef", "ccdbfe77d071cc19b55fb9f4b738912e35d982ef" ]
[ "PaddleCV/video/metrics/youtube8m/eval_util.py", "dygraph/bmn/model.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Provides functions to help with evaluating models.\"\"\"\nimport datetime\nimport numpy\n\nfrom . import mean_average_precision_calculator as map_calculator\nfrom . import average_precision_calculator as ap_calculator\n\n\ndef flatten(l):\n \"\"\" Merges a list of lists into a single list. \"\"\"\n return [item for sublist in l for item in sublist]\n\n\ndef calculate_hit_at_one(predictions, actuals):\n \"\"\"Performs a local (numpy) calculation of the hit at one.\n\n Args:\n predictions: Matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n actuals: Matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n\n Returns:\n float: The average hit at one across the entire batch.\n \"\"\"\n top_prediction = numpy.argmax(predictions, 1)\n hits = actuals[numpy.arange(actuals.shape[0]), top_prediction]\n return numpy.average(hits)\n\n\ndef calculate_precision_at_equal_recall_rate(predictions, actuals):\n \"\"\"Performs a local (numpy) calculation of the PERR.\n\n Args:\n predictions: Matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n actuals: Matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n\n Returns:\n float: The average precision at equal recall rate across the entire batch.\n \"\"\"\n aggregated_precision = 0.0\n num_videos = actuals.shape[0]\n for row in numpy.arange(num_videos):\n num_labels = int(numpy.sum(actuals[row]))\n top_indices = numpy.argpartition(predictions[row],\n -num_labels)[-num_labels:]\n item_precision = 0.0\n for label_index in top_indices:\n if predictions[row][label_index] > 0:\n item_precision += actuals[row][label_index]\n item_precision /= top_indices.size\n aggregated_precision += item_precision\n aggregated_precision /= num_videos\n return aggregated_precision\n\n\ndef calculate_gap(predictions, actuals, top_k=20):\n \"\"\"Performs a local (numpy) calculation of the global average precision.\n\n Only the top_k predictions are taken for each of the videos.\n\n Args:\n predictions: Matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n actuals: Matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n top_k: How many predictions to use per video.\n\n Returns:\n float: The global average precision.\n \"\"\"\n gap_calculator = ap_calculator.AveragePrecisionCalculator()\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(\n predictions, actuals, top_k)\n gap_calculator.accumulate(\n flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))\n return gap_calculator.peek_ap_at_n()\n\n\ndef top_k_by_class(predictions, labels, k=20):\n \"\"\"Extracts the top k predictions for each video, sorted by class.\n\n Args:\n predictions: A numpy matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n k: the top k non-zero entries to preserve in each prediction.\n\n Returns:\n A tuple (predictions,labels, true_positives). 'predictions' and 'labels'\n are lists of lists of floats. 'true_positives' is a list of scalars. The\n length of the lists are equal to the number of classes. The entries in the\n predictions variable are probability predictions, and\n the corresponding entries in the labels variable are the ground truth for\n those predictions. The entries in 'true_positives' are the number of true\n positives for each class in the ground truth.\n\n Raises:\n ValueError: An error occurred when the k is not a positive integer.\n \"\"\"\n if k <= 0:\n raise ValueError(\"k must be a positive integer.\")\n k = min(k, predictions.shape[1])\n num_classes = predictions.shape[1]\n prediction_triplets = []\n for video_index in range(predictions.shape[0]):\n prediction_triplets.extend(\n top_k_triplets(predictions[video_index], labels[video_index], k))\n out_predictions = [[] for v in range(num_classes)]\n out_labels = [[] for v in range(num_classes)]\n for triplet in prediction_triplets:\n out_predictions[triplet[0]].append(triplet[1])\n out_labels[triplet[0]].append(triplet[2])\n out_true_positives = [numpy.sum(labels[:, i]) for i in range(num_classes)]\n\n return out_predictions, out_labels, out_true_positives\n\n\ndef top_k_triplets(predictions, labels, k=20):\n \"\"\"Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in\n (prediction, class) format\"\"\"\n m = len(predictions)\n k = min(k, m)\n indices = numpy.argpartition(predictions, -k)[-k:]\n return [(index, predictions[index], labels[index]) for index in indices]\n\n\nclass EvaluationMetrics(object):\n \"\"\"A class to store the evaluation metrics.\"\"\"\n\n def __init__(self, num_class, top_k):\n \"\"\"Construct an EvaluationMetrics object to store the evaluation metrics.\n\n Args:\n num_class: A positive integer specifying the number of classes.\n top_k: A positive integer specifying how many predictions are considered per video.\n\n Raises:\n ValueError: An error occurred when MeanAveragePrecisionCalculator cannot\n not be constructed.\n \"\"\"\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(\n num_class)\n self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()\n self.top_k = top_k\n self.num_examples = 0\n\n #def accumulate(self, predictions, labels, loss):\n def accumulate(self, loss, predictions, labels):\n \"\"\"Accumulate the metrics calculated locally for this mini-batch.\n\n Args:\n predictions: A numpy matrix containing the outputs of the model.\n Dimensions are 'batch' x 'num_classes'.\n labels: A numpy matrix containing the ground truth labels.\n Dimensions are 'batch' x 'num_classes'.\n loss: A numpy array containing the loss for each sample.\n\n Returns:\n dictionary: A dictionary storing the metrics for the mini-batch.\n\n Raises:\n ValueError: An error occurred when the shape of predictions and actuals\n does not match.\n \"\"\"\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions,\n labels)\n mean_loss = numpy.mean(loss)\n\n # Take the top 20 predictions.\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(\n predictions, labels, self.top_k)\n self.map_calculator.accumulate(sparse_predictions, sparse_labels,\n num_positives)\n self.global_ap_calculator.accumulate(\n flatten(sparse_predictions),\n flatten(sparse_labels), sum(num_positives))\n\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_loss += mean_loss * batch_size\n\n return {\n \"hit_at_one\": mean_hit_at_one,\n \"perr\": mean_perr,\n \"loss\": mean_loss\n }\n\n def get(self):\n \"\"\"Calculate the evaluation metrics for the whole epoch.\n\n Raises:\n ValueError: If no examples were accumulated.\n\n Returns:\n dictionary: a dictionary storing the evaluation metrics for the epoch. The\n dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and\n aps (default nan).\n \"\"\"\n if self.num_examples <= 0:\n raise ValueError(\"total_sample must be positive.\")\n avg_hit_at_one = self.sum_hit_at_one / self.num_examples\n avg_perr = self.sum_perr / self.num_examples\n avg_loss = self.sum_loss / self.num_examples\n\n aps = self.map_calculator.peek_map_at_n()\n gap = self.global_ap_calculator.peek_ap_at_n()\n\n epoch_info_dict = {}\n return {\n \"avg_hit_at_one\": avg_hit_at_one,\n \"avg_perr\": avg_perr,\n \"avg_loss\": avg_loss,\n \"aps\": aps,\n \"gap\": gap\n }\n\n def clear(self):\n \"\"\"Clear the evaluation metrics and reset the EvaluationMetrics object.\"\"\"\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import ParamAttr\nimport numpy as np\nimport math\n\nfrom bmn_utils import get_interp1d_mask\n\nDATATYPE = 'float32'\n\n\n# Net\nclass Conv1D(fluid.dygraph.Layer):\n def __init__(self,\n prefix,\n num_channels=256,\n num_filters=256,\n size_k=3,\n padding=1,\n groups=1,\n act=\"relu\"):\n super(Conv1D, self).__init__()\n fan_in = num_channels * size_k * 1\n k = 1. / math.sqrt(fan_in)\n param_attr = ParamAttr(\n name=prefix + \"_w\",\n initializer=fluid.initializer.Uniform(\n low=-k, high=k))\n bias_attr = ParamAttr(\n name=prefix + \"_b\",\n initializer=fluid.initializer.Uniform(\n low=-k, high=k))\n\n self._conv2d = fluid.dygraph.Conv2D(\n num_channels=num_channels,\n num_filters=num_filters,\n filter_size=(1, size_k),\n stride=1,\n padding=(0, padding),\n groups=groups,\n act=act,\n param_attr=param_attr,\n bias_attr=bias_attr)\n\n def forward(self, x):\n x = fluid.layers.unsqueeze(input=x, axes=[2])\n x = self._conv2d(x)\n x = fluid.layers.squeeze(input=x, axes=[2])\n return x\n\n\nclass BMN(fluid.dygraph.Layer):\n def __init__(self, cfg):\n super(BMN, self).__init__()\n\n #init config\n self.tscale = cfg.MODEL.tscale\n self.dscale = cfg.MODEL.dscale\n self.prop_boundary_ratio = cfg.MODEL.prop_boundary_ratio\n self.num_sample = cfg.MODEL.num_sample\n self.num_sample_perbin = cfg.MODEL.num_sample_perbin\n\n self.hidden_dim_1d = 256\n self.hidden_dim_2d = 128\n self.hidden_dim_3d = 512\n\n # Base Module\n self.b_conv1 = Conv1D(\n prefix=\"Base_1\",\n num_channels=400,\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n self.b_conv2 = Conv1D(\n prefix=\"Base_2\",\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n\n # Temporal Evaluation Module\n self.ts_conv1 = Conv1D(\n prefix=\"TEM_s1\",\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n self.ts_conv2 = Conv1D(\n prefix=\"TEM_s2\", num_filters=1, size_k=1, padding=0, act=\"sigmoid\")\n self.te_conv1 = Conv1D(\n prefix=\"TEM_e1\",\n num_filters=self.hidden_dim_1d,\n size_k=3,\n padding=1,\n groups=4,\n act=\"relu\")\n self.te_conv2 = Conv1D(\n prefix=\"TEM_e2\", num_filters=1, size_k=1, padding=0, act=\"sigmoid\")\n\n #Proposal Evaluation Module\n self.p_conv1 = Conv1D(\n prefix=\"PEM_1d\",\n num_filters=self.hidden_dim_2d,\n size_k=3,\n padding=1,\n act=\"relu\")\n\n # init to speed up\n sample_mask = get_interp1d_mask(self.tscale, self.dscale,\n self.prop_boundary_ratio,\n self.num_sample, self.num_sample_perbin)\n self.sample_mask = fluid.dygraph.base.to_variable(sample_mask)\n self.sample_mask.stop_gradient = True\n\n self.p_conv3d1 = fluid.dygraph.Conv3D(\n num_channels=128,\n num_filters=self.hidden_dim_3d,\n filter_size=(self.num_sample, 1, 1),\n stride=(self.num_sample, 1, 1),\n padding=0,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_3d1_w\"),\n bias_attr=ParamAttr(name=\"PEM_3d1_b\"))\n\n self.p_conv2d1 = fluid.dygraph.Conv2D(\n num_channels=512,\n num_filters=self.hidden_dim_2d,\n filter_size=1,\n stride=1,\n padding=0,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_2d1_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d1_b\"))\n self.p_conv2d2 = fluid.dygraph.Conv2D(\n num_channels=128,\n num_filters=self.hidden_dim_2d,\n filter_size=3,\n stride=1,\n padding=1,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_2d2_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d2_b\"))\n self.p_conv2d3 = fluid.dygraph.Conv2D(\n num_channels=128,\n num_filters=self.hidden_dim_2d,\n filter_size=3,\n stride=1,\n padding=1,\n act=\"relu\",\n param_attr=ParamAttr(name=\"PEM_2d3_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d3_b\"))\n self.p_conv2d4 = fluid.dygraph.Conv2D(\n num_channels=128,\n num_filters=2,\n filter_size=1,\n stride=1,\n padding=0,\n act=\"sigmoid\",\n param_attr=ParamAttr(name=\"PEM_2d4_w\"),\n bias_attr=ParamAttr(name=\"PEM_2d4_b\"))\n\n def forward(self, x):\n #Base Module\n x = self.b_conv1(x)\n x = self.b_conv2(x)\n\n #TEM\n xs = self.ts_conv1(x)\n xs = self.ts_conv2(xs)\n xs = fluid.layers.squeeze(xs, axes=[1])\n xe = self.te_conv1(x)\n xe = self.te_conv2(xe)\n xe = fluid.layers.squeeze(xe, axes=[1])\n\n #PEM\n xp = self.p_conv1(x)\n #BM layer\n xp = fluid.layers.matmul(xp, self.sample_mask)\n xp = fluid.layers.reshape(\n xp, shape=[0, 0, -1, self.dscale, self.tscale])\n\n xp = self.p_conv3d1(xp)\n xp = fluid.layers.squeeze(xp, axes=[2])\n xp = self.p_conv2d1(xp)\n xp = self.p_conv2d2(xp)\n xp = self.p_conv2d3(xp)\n xp = self.p_conv2d4(xp)\n return xp, xs, xe\n\n\ndef bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end,\n cfg):\n def _get_mask(cfg):\n dscale = cfg.MODEL.dscale\n tscale = cfg.MODEL.tscale\n bm_mask = []\n for idx in range(dscale):\n mask_vector = [1 for i in range(tscale - idx)\n ] + [0 for i in range(idx)]\n bm_mask.append(mask_vector)\n bm_mask = np.array(bm_mask, dtype=np.float32)\n self_bm_mask = fluid.layers.create_global_var(\n shape=[dscale, tscale], value=0, dtype=DATATYPE, persistable=True)\n fluid.layers.assign(bm_mask, self_bm_mask)\n self_bm_mask.stop_gradient = True\n return self_bm_mask\n\n def tem_loss_func(pred_start, pred_end, gt_start, gt_end):\n def bi_loss(pred_score, gt_label):\n pred_score = fluid.layers.reshape(\n x=pred_score, shape=[-1], inplace=False)\n gt_label = fluid.layers.reshape(\n x=gt_label, shape=[-1], inplace=False)\n gt_label.stop_gradient = True\n pmask = fluid.layers.cast(x=(gt_label > 0.5), dtype=DATATYPE)\n num_entries = fluid.layers.cast(\n fluid.layers.shape(pmask), dtype=DATATYPE)\n num_positive = fluid.layers.cast(\n fluid.layers.reduce_sum(pmask), dtype=DATATYPE)\n ratio = num_entries / num_positive\n coef_0 = 0.5 * ratio / (ratio - 1)\n coef_1 = 0.5 * ratio\n epsilon = 0.000001\n temp = fluid.layers.log(pred_score + epsilon)\n loss_pos = fluid.layers.elementwise_mul(\n fluid.layers.log(pred_score + epsilon), pmask)\n loss_pos = coef_1 * fluid.layers.reduce_mean(loss_pos)\n loss_neg = fluid.layers.elementwise_mul(\n fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask))\n loss_neg = coef_0 * fluid.layers.reduce_mean(loss_neg)\n loss = -1 * (loss_pos + loss_neg)\n return loss\n\n loss_start = bi_loss(pred_start, gt_start)\n loss_end = bi_loss(pred_end, gt_end)\n loss = loss_start + loss_end\n return loss\n\n def pem_reg_loss_func(pred_score, gt_iou_map, mask):\n\n gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask)\n\n u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE)\n u_mmask = fluid.layers.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3)\n u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE)\n u_lmask = fluid.layers.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.)\n u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE)\n u_lmask = fluid.layers.elementwise_mul(u_lmask, mask)\n\n num_h = fluid.layers.cast(\n fluid.layers.reduce_sum(u_hmask), dtype=DATATYPE)\n num_m = fluid.layers.cast(\n fluid.layers.reduce_sum(u_mmask), dtype=DATATYPE)\n num_l = fluid.layers.cast(\n fluid.layers.reduce_sum(u_lmask), dtype=DATATYPE)\n\n r_m = num_h / num_m\n u_smmask = fluid.layers.uniform_random(\n shape=[gt_iou_map.shape[1], gt_iou_map.shape[2]],\n dtype=DATATYPE,\n min=0.0,\n max=1.0)\n u_smmask = fluid.layers.elementwise_mul(u_mmask, u_smmask)\n u_smmask = fluid.layers.cast(x=(u_smmask > (1. - r_m)), dtype=DATATYPE)\n\n r_l = num_h / num_l\n u_slmask = fluid.layers.uniform_random(\n shape=[gt_iou_map.shape[1], gt_iou_map.shape[2]],\n dtype=DATATYPE,\n min=0.0,\n max=1.0)\n u_slmask = fluid.layers.elementwise_mul(u_lmask, u_slmask)\n u_slmask = fluid.layers.cast(x=(u_slmask > (1. - r_l)), dtype=DATATYPE)\n\n weights = u_hmask + u_smmask + u_slmask\n weights.stop_gradient = True\n loss = fluid.layers.square_error_cost(pred_score, gt_iou_map)\n loss = fluid.layers.elementwise_mul(loss, weights)\n loss = 0.5 * fluid.layers.reduce_sum(loss) / fluid.layers.reduce_sum(\n weights)\n\n return loss\n\n def pem_cls_loss_func(pred_score, gt_iou_map, mask):\n gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask)\n gt_iou_map.stop_gradient = True\n pmask = fluid.layers.cast(x=(gt_iou_map > 0.9), dtype=DATATYPE)\n nmask = fluid.layers.cast(x=(gt_iou_map <= 0.9), dtype=DATATYPE)\n nmask = fluid.layers.elementwise_mul(nmask, mask)\n\n num_positive = fluid.layers.reduce_sum(pmask)\n num_entries = num_positive + fluid.layers.reduce_sum(nmask)\n ratio = num_entries / num_positive\n coef_0 = 0.5 * ratio / (ratio - 1)\n coef_1 = 0.5 * ratio\n epsilon = 0.000001\n loss_pos = fluid.layers.elementwise_mul(\n fluid.layers.log(pred_score + epsilon), pmask)\n loss_pos = coef_1 * fluid.layers.reduce_sum(loss_pos)\n loss_neg = fluid.layers.elementwise_mul(\n fluid.layers.log(1.0 - pred_score + epsilon), nmask)\n loss_neg = coef_0 * fluid.layers.reduce_sum(loss_neg)\n loss = -1 * (loss_pos + loss_neg) / num_entries\n return loss\n\n pred_bm_reg = fluid.layers.squeeze(\n fluid.layers.slice(\n pred_bm, axes=[1], starts=[0], ends=[1]), axes=[1])\n pred_bm_cls = fluid.layers.squeeze(\n fluid.layers.slice(\n pred_bm, axes=[1], starts=[1], ends=[2]), axes=[1])\n\n bm_mask = _get_mask(cfg)\n\n pem_reg_loss = pem_reg_loss_func(pred_bm_reg, gt_iou_map, bm_mask)\n pem_cls_loss = pem_cls_loss_func(pred_bm_cls, gt_iou_map, bm_mask)\n\n tem_loss = tem_loss_func(pred_start, pred_end, gt_start, gt_end)\n\n loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss\n return loss, tem_loss, pem_reg_loss, pem_cls_loss\n" ]
[ [ "numpy.sum", "numpy.argpartition", "numpy.argmax", "numpy.arange", "numpy.average", "numpy.mean" ], [ "numpy.array" ] ]
Faiz99khan/ISL_hand_gesture_recognition_in_real-time
[ "dade99478e9b37440ebe7fb7842d451582132f0a" ]
[ "models/resnet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\n\n__all__ = [\n 'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet200'\n]\n\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n\n\ndef downsample_basic_block(x, planes, stride):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.Tensor(\n out.size(0), planes - out.size(1), out.size(2), out.size(3),\n out.size(4)).zero_()\n if isinstance(out.data, torch.cuda.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = Variable(torch.cat([out.data, zero_pads], dim=1))\n\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3x3(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n self.conv2 = nn.Conv3d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self,\n block,\n layers,\n sample_size,\n sample_duration,\n shortcut_type='B',\n num_classes=400):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv3d(\n 3,\n 64,\n kernel_size=7,\n stride=(1, 2, 2),\n padding=(3, 3, 3),\n bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)\n self.layer2 = self._make_layer(\n block, 128, layers[1], shortcut_type, stride=2)\n self.layer3 = self._make_layer(\n block, 256, layers[2], shortcut_type, stride=2)\n self.layer4 = self._make_layer(\n block, 512, layers[3], shortcut_type, stride=2)\n last_duration = int(math.ceil(sample_duration / 16))\n last_size = int(math.ceil(sample_size / 32))\n self.avgpool = nn.AvgPool3d(\n (last_duration, last_size, last_size), stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(\n downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef get_fine_tuning_parameters(model, ft_portion):\n if ft_portion == \"complete\":\n return model.parameters()\n\n elif ft_portion == \"last_layer\":\n ft_module_names = []\n ft_module_names.append('classifier')\n\n parameters = []\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ft_module in k:\n parameters.append({'params': v})\n break\n else:\n parameters.append({'params': v, 'lr': 0.0})\n return parameters\n\n else:\n raise ValueError(\"Unsupported ft_portion: 'complete' or 'last_layer' expected\")\n\n\ndef resnet10(**kwargs):\n \"\"\"Constructs a ResNet-10 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model\n\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef resnet152(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\n\ndef resnet200(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model\n" ]
[ [ "torch.nn.init.kaiming_normal", "torch.nn.MaxPool3d", "torch.nn.BatchNorm3d", "torch.nn.Linear", "torch.nn.Sequential", "torch.nn.AvgPool3d", "torch.nn.Conv3d", "torch.nn.functional.avg_pool3d", "torch.nn.ReLU", "torch.cat" ] ]
ghmagazine/python_ml_book
[ "57e874fd4fa86abaa2e2d032d18946942cf50c42" ]
[ "03/evaluation.py" ]
[ "from sklearn import tree\nfrom sklearn.datasets import load_wine\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nwine = load_wine()\ndata = wine.data\ntarget = wine.target\nX_train, X_test, Y_train, Y_test = train_test_split(data, target, test_size=0.2, random_state=0)\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X_train, Y_train)\n\n# テストデータのラベルを予測\nY_pred = clf.predict(X_test)\n\n# 各クラスの適合率と再現率を表示\nprint (classification_report(Y_test, Y_pred, target_names=wine.target_names))\n" ]
[ [ "sklearn.tree.DecisionTreeClassifier", "sklearn.model_selection.train_test_split", "sklearn.metrics.classification_report", "sklearn.datasets.load_wine" ] ]
cdbethune/d3m-primitives
[ "5530da1b8efba7de8cec6890401c5d4091acd45a" ]
[ "scripts/plot_forecasting_comparison.py" ]
[ "from typing import List\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style=\"whitegrid\")\n\nfrom compare_forecasting_methods import pred_lengths\n\ndef to_query(\n elements: List[str],\n):\n if len(elements) == 1:\n return elements[0]\n else:\n return '(' + ' or '.join(elements) + ')'\n\ndef plot(\n metrics: str = 'ts_metrics.csv',\n datasets: str = 'Sorghum',\n horizon: str = 'Short',\n metric: str = 'MAPE',\n predictors: List[str] = [\n 'DeepAR',\n 'DeepFactor',\n 'DeepState',\n 'NBEATS',\n 'NBEATS-Interp',\n 'MQCNN',\n 'MQRNN',\n 'WaveNet', \n 'NPTS',\n ]\n):\n\n metrics = pd.read_csv('ts_metrics.csv')\n\n if datasets == 'Sorghum':\n dataset_names = [\n 'LL1_terra_canopy_height_long_form_s4_70_MIN_METADATA',\n 'LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA',\n 'LL1_terra_canopy_height_long_form_s4_90_MIN_METADATA',\n 'LL1_terra_canopy_height_long_form_s4_100_MIN_METADATA',\n 'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA',\n ]\n elif datasets == 'Malnutrition':\n dataset_names = [\n 'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA', \n 'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA'\n ]\n else:\n raise ValueError(\"'Datasets' must be one of 'Sorghum' or 'Malnutrition'\") \n\n if horizon == 'Short':\n pred_ls = [pred_lengths[dataset_name][0] for dataset_name in dataset_names]\n elif horizon == 'Long':\n pred_ls = [pred_lengths[dataset_name][1] for dataset_name in dataset_names]\n else:\n raise ValueError(\"'Horizon' must be one of 'Short' or 'Long'\")\n\n pred_list = to_query(\n [f'Pred_Length==\"{pred_l}\"' for pred_l in pred_ls]\n )\n dataset_list = to_query(\n [f'Dataset==\"{dataset_name}\"' for dataset_name in dataset_names]\n )\n predictor_list = to_query(\n [f'Predictor==\"{predictor}\"' for predictor in predictors]\n )\n query_list = pred_list + ' and ' + dataset_list + ' and ' + predictor_list\n\n df_slice = metrics.query(query_list)\n plt.clf()\n sns.barplot(x=\"Predictor\", y=metric, data=df_slice)\n plt.xticks(rotation=45)\n plt.subplots_adjust(bottom=0.3)\n plt.xlabel('Forecasting Method')\n plt.title(f'Average {metric} on {datasets} Datasets with {horizon} Horizon')\n plt.savefig(f'{datasets}_{horizon}.png')\n\nplot(\n datasets='Sorghum', \n horizon='Short', \n metric = 'MAPE', \n)\nplot(\n datasets='Sorghum', \n horizon='Long', \n metric = 'MAPE', \n)\nplot(\n datasets='Malnutrition', \n horizon='Short', \n metric = 'MAPE', \n)\nplot(\n datasets='Malnutrition', \n horizon='Long', \n metric = 'MAPE', \n)\n" ]
[ [ "matplotlib.pyplot.xticks", "pandas.read_csv", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel" ] ]
YZ-Zheng/AI-Learns-Handwritten-Digits
[ "6ce2dcce7ed6e4689b3f7d0da3ddcf8ad06ce6ce" ]
[ "train_data.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\n# check if gpu is available\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass Flatten(nn.Module):\n \"\"\"\n performs the flatten operation\n \"\"\"\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\ndef batchify_data(x_data, y_data, batch_size):\n \"\"\"\n Takes a set of data points and labels and groups them into batches\n \"\"\"\n\n N = int(len(x_data) / batch_size) * batch_size\n batches = []\n for i in range(0, N, batch_size):\n batches.append({\n 'x': torch.tensor(x_data[i:i + batch_size],\n dtype=torch.float32),\n 'y': torch.tensor([y_data[0][i:i + batch_size],\n y_data[1][i:i + batch_size]],\n dtype=torch.int64)\n })\n return batches\n\n\ndef compute_accuracy(predictions, y):\n \"\"\"\n Computes the accuracy of predictions against actual label y\n \"\"\"\n return np.mean(np.equal(predictions.to('cpu').numpy(), y.to('cpu').numpy()))\n\n\ndef train_model(train_data, dev_data, model, lr=0.001, n_epochs=50):\n \"\"\"\n Train a model for N epochs given data and hyper-params\n \"\"\"\n # Optimize with Adam\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n print(\"Start training...\")\n acc_train_upper = []\n acc_train_lower = []\n acc_val_upper = []\n acc_val_lower = []\n\n for epoch in range(1, n_epochs + 1):\n print(\"Epoch {}:\\n\".format(epoch))\n\n # Run training\n loss, acc = run_epoch(train_data, model.train(), optimizer)\n print('Train | loss1: {:.6f} accuracy1: {:.6f} | loss2: {:.6f} accuracy2: {:.6f}'.format(loss[0], acc[0], loss[1], acc[1]))\n acc_train_upper.append(acc[0])\n acc_train_lower.append(acc[1])\n\n\n # Run validation\n val_loss, val_acc = run_epoch(dev_data, model.eval(), optimizer)\n print('Valid | loss1: {:.6f} accuracy1: {:.6f} | loss2: {:.6f} accuracy2: {:.6f}\\n'.format(val_loss[0], val_acc[0], val_loss[1], val_acc[1]))\n acc_val_upper.append(val_acc[0])\n acc_val_lower.append(val_acc[1])\n\n return acc_train_upper, acc_train_lower, acc_val_upper, acc_val_lower\n\n\n\n\ndef run_epoch(data, model, optimizer):\n \"\"\"\n Train model for one pass of train data, and return loss, acccuracy\n \"\"\"\n # Gather losses\n losses_first_label = []\n losses_second_label = []\n batch_accuracies_first = []\n batch_accuracies_second = []\n\n # If model is in train mode, use optimizer.\n is_training = model.training\n\n # Iterate through batches\n for batch in data:\n x, y = batch['x'].to(device), batch['y'].to(device)\n\n # Get output predictions for both the upper and lower numbers\n out1, out2 = model(x)\n\n # Predict and store accuracy\n predictions_first_label = torch.argmax(out1, dim=1)\n predictions_second_label = torch.argmax(out2, dim=1)\n batch_accuracies_first.append(compute_accuracy(predictions_first_label, y[0]))\n batch_accuracies_second.append(compute_accuracy(predictions_second_label, y[1]))\n\n # Compute both losses\n loss1 = F.cross_entropy(out1, y[0])\n loss2 = F.cross_entropy(out2, y[1])\n losses_first_label.append(loss1.data.item())\n losses_second_label.append(loss2.data.item())\n\n # If training, do an update.\n if is_training:\n optimizer.zero_grad()\n joint_loss = 0.5 * (loss1 + loss2)\n joint_loss.backward()\n optimizer.step()\n\n # Calculate epoch level scores\n avg_loss = np.mean(losses_first_label), np.mean(losses_second_label)\n avg_accuracy = np.mean(batch_accuracies_first), np.mean(batch_accuracies_second)\n return avg_loss, avg_accuracy\n" ]
[ [ "torch.argmax", "torch.tensor", "torch.cuda.is_available", "torch.nn.functional.cross_entropy", "numpy.mean" ] ]
ai2cm/fv3net
[ "e62038aee0a97d6207e66baabd8938467838cf51" ]
[ "workflows/diagnostics/fv3net/diagnostics/prognostic_run/computed_diagnostics.py" ]
[ "\"\"\"Utilities for loading computed diagnostics\n\n\"\"\"\nimport json\nfrom typing import Iterable, Hashable, Sequence, Tuple, Any, Set, Mapping\nimport os\nimport xarray as xr\nimport numpy as np\nimport fsspec\nimport pandas as pd\nfrom pathlib import Path\nfrom dataclasses import dataclass\nimport tempfile\n\nfrom .metrics import metrics_registry\nfrom .derived_diagnostics import derived_registry\nfrom .constants import MovieUrls\n\n\n__all__ = [\"ComputedDiagnosticsList\", \"RunDiagnostics\"]\n\n\nGRID_VARS = [\"area\", \"lonb\", \"latb\", \"lon\", \"lat\", \"land_sea_mask\"]\n\nDiagnostics = Sequence[xr.Dataset]\nMetadata = Any\n\n\n@dataclass\nclass ComputedDiagnosticsList:\n folders: Mapping[str, \"DiagnosticFolder\"]\n\n @staticmethod\n def from_directory(url: str) -> \"ComputedDiagnosticsList\":\n \"\"\"Open a directory of computed diagnostics\n\n Args:\n url: URL to directory containing rundirs as subdirectories.\n \"rundirs\". rundirs are subdirectories of this bucket. They each\n contain diags.nc, metrics.json, and .mp4 files.\n \"\"\"\n fs, _, _ = fsspec.get_fs_token_paths(url)\n return ComputedDiagnosticsList(detect_folders(url, fs))\n\n @staticmethod\n def from_urls(urls: Sequence[str]) -> \"ComputedDiagnosticsList\":\n \"\"\"Open computed diagnostics at the specified urls\n \"\"\"\n\n def url_to_folder(url):\n fs, _, path = fsspec.get_fs_token_paths(url)\n return DiagnosticFolder(fs, path[0])\n\n return ComputedDiagnosticsList(\n {str(k): url_to_folder(url) for k, url in enumerate(urls)}\n )\n\n @staticmethod\n def from_json(\n url: str, urls_are_rundirs: bool = False\n ) -> \"ComputedDiagnosticsList\":\n \"\"\"Open labeled computed diagnostics at urls specified in given JSON.\"\"\"\n\n def url_to_folder(url):\n fs, _, path = fsspec.get_fs_token_paths(url)\n return DiagnosticFolder(fs, path[0])\n\n with fsspec.open(url) as f:\n rundirs = json.load(f)\n\n if urls_are_rundirs:\n for item in rundirs:\n item[\"url\"] += \"_diagnostics\"\n\n return ComputedDiagnosticsList(\n {item[\"name\"]: url_to_folder(item[\"url\"]) for item in rundirs}\n )\n\n def load_metrics(self) -> \"RunMetrics\":\n return RunMetrics(load_metrics(self.folders))\n\n def load_diagnostics(self) -> Tuple[Metadata, \"RunDiagnostics\"]:\n metadata, xarray_diags = load_diagnostics(self.folders)\n return metadata, RunDiagnostics(xarray_diags)\n\n def load_metrics_from_diagnostics(self) -> \"RunMetrics\":\n \"\"\"Compute metrics on the fly from the pre-computed diagnostics.\"\"\"\n return RunMetrics(load_metrics_from_diagnostics(self.folders))\n\n def find_movie_urls(self) -> MovieUrls:\n return {name: folder.movie_urls for name, folder in self.folders.items()}\n\n\n@dataclass\nclass RunDiagnostics:\n \"\"\"A collection of diagnostics from different runs, not all of which have\n the same variables\n\n \"\"\"\n\n diagnostics: Diagnostics\n\n def __post_init__(self):\n # indexes for faster lookup\n self._attrs = {ds.run: ds.attrs for ds in self.diagnostics}\n self._varnames = {ds.run: set(ds) for ds in self.diagnostics}\n self._run_index = {ds.run: k for k, ds in enumerate(self.diagnostics)}\n\n @property\n def runs(self) -> Sequence[str]:\n \"\"\"The available runs\"\"\"\n return list(self._run_index)\n\n @property\n def variables(self) -> Set[str]:\n \"\"\"The available variables\"\"\"\n return set.union(*[set(d) for d in self.diagnostics])\n\n @property\n def long_names(self) -> Mapping[str, str]:\n \"\"\"Mapping from variable name to long names\"\"\"\n vars = self.variables\n run = self.runs[0]\n return {v: self.get_variable(run, v).attrs.get(\"long_name\", v) for v in vars}\n\n def _get_run(self, run: str) -> xr.Dataset:\n return self.diagnostics[self._run_index[run]]\n\n def get_variable(self, run: str, varname: Hashable) -> xr.DataArray:\n \"\"\"Query a collection of diagnostics for a given run and variable\n\n Args:\n diagnostics: list of xarray datasets, each with a \"run\" attribute\n varname: variable to exctract from the expected run\n\n Returns:\n varname of run if present, otherwise nans with the expected\n metadata\n\n \"\"\"\n if varname in self._varnames[run]:\n return self._get_run(run)[varname]\n else:\n for run in self._varnames:\n if varname in self._varnames[run]:\n template = self._get_run(run)[varname]\n return xr.full_like(template, np.nan)\n raise ValueError(f\"{varname} not found.\")\n\n def get_variables(self, run: str, varnames: Sequence[Hashable]) -> xr.Dataset:\n \"\"\"Query a collection of diagnostics and return dataset of variables.\"\"\"\n variables = [self.get_variable(run, v) for v in varnames]\n return xr.merge(variables)\n\n def matching_variables(self, varfilter: str) -> Set[str]:\n \"\"\"The available variabes that include varfilter in their names.\"\"\"\n return set(v for v in self.variables if varfilter in v)\n\n def is_baseline(self, run: str) -> bool:\n return self._attrs[run][\"baseline\"]\n\n @staticmethod\n def is_verification(run: str) -> bool:\n return run == \"verification\"\n\n\n@dataclass\nclass RunMetrics:\n \"\"\"A collection of metrics from different runs, not all of which have the\n same metrics\"\"\"\n\n metrics: pd.DataFrame\n\n @property\n def empty(self) -> bool:\n return self.metrics.empty\n\n @property\n def runs(self) -> Sequence[str]:\n \"\"\"The available runs\"\"\"\n return list(self.metrics.run.unique())\n\n @property\n def types(self) -> Set[str]:\n \"\"\"The available types of metrics\"\"\"\n metric_names = [self._prefix(m) for m in self.metrics.metric]\n return set(metric_names)\n\n def get_metric_variables(self, metric_type: str) -> Set[str]:\n \"\"\"The available variables for given metric_type\"\"\"\n metric_names = [\n m for m in self.metrics.metric if self._prefix(m) == metric_type\n ]\n return set([self._suffix(m) for m in metric_names])\n\n def get_metric_value(self, metric_type: str, variable: str, run: str) -> float:\n m = self._get_metric(metric_type, variable, run)\n if m.empty:\n return np.nan\n else:\n return m.value.item()\n\n def get_metric_units(self, metric_type: str, variable: str, run: str) -> str:\n m = self._get_metric(metric_type, variable, run)\n if m.empty:\n return \"\"\n else:\n return m.units.item()\n\n def get_metric_all_runs(self, metric_type: str, variable: str) -> pd.Series:\n metric_name = self.metric_name(metric_type, variable)\n return self.metrics[self.metrics.metric == metric_name]\n\n @staticmethod\n def _prefix(metric: str) -> str:\n return metric.split(\"/\")[0]\n\n @staticmethod\n def _suffix(metric: str) -> str:\n return metric.split(\"/\")[1]\n\n @staticmethod\n def metric_name(metric_type: str, variable: str) -> str:\n return f\"{metric_type}/{variable}\"\n\n def _get_metric(self, metric_type: str, variable: str, run: str) -> pd.Series:\n _metrics = self.get_metric_all_runs(metric_type, variable)\n return _metrics[_metrics.run == run]\n\n\ndef load_metrics(rundirs) -> pd.DataFrame:\n \"\"\"Load the metrics from a bucket\"\"\"\n metrics = _load_metrics(rundirs)\n return _metrics_dataframe_from_dict(metrics)\n\n\ndef load_metrics_from_diagnostics(rundirs) -> pd.DataFrame:\n \"\"\"Load the diagnostics from a bucket and compute metrics\"\"\"\n metrics = {}\n _, diagnostics = load_diagnostics(rundirs)\n for ds in diagnostics:\n metrics[ds.run] = metrics_registry.compute(ds, n_jobs=1)\n return _metrics_dataframe_from_dict(metrics)\n\n\ndef _metrics_dataframe_from_dict(metrics) -> pd.DataFrame:\n metric_table = pd.DataFrame.from_records(_yield_metric_rows(metrics))\n run_table = parse_rundirs(list(metrics.keys()))\n return pd.merge(run_table, metric_table, on=\"run\")\n\n\ndef load_diagnostics(rundirs) -> Tuple[Metadata, Diagnostics]:\n \"\"\"Load metadata and merged diagnostics from a bucket\"\"\"\n diags = _load_diags(rundirs)\n run_table_lookup = parse_rundirs(rundirs)\n diagnostics = [\n ds.assign_attrs(run=key, **run_table_lookup.loc[key])\n for key, ds in diags.items()\n ]\n diagnostics = [convert_index_to_datetime(ds, \"time\") for ds in diagnostics]\n longest_run_ds = _longest_run(diagnostics)\n diagnostics.append(_get_verification_diagnostics(longest_run_ds))\n diagnostics = [_add_derived_diagnostics(ds) for ds in diagnostics]\n return get_metadata(diags), diagnostics\n\n\ndef _add_derived_diagnostics(ds):\n merged = xr.merge([ds, derived_registry.compute(ds, n_jobs=1)])\n return merged.assign_attrs(ds.attrs)\n\n\ndef _longest_run(diagnostics: Iterable[xr.Dataset]) -> xr.Dataset:\n max_length = 0\n for ds in diagnostics:\n if ds.sizes[\"time\"] > max_length:\n longest_ds = ds\n max_length = ds.sizes[\"time\"]\n return longest_ds\n\n\n@dataclass\nclass DiagnosticFolder:\n \"\"\"Represents the output of compute diagnostics\"\"\"\n\n fs: fsspec.AbstractFileSystem\n path: str\n\n @property\n def metrics(self):\n path = os.path.join(self.path, \"metrics.json\")\n return json.loads(self.fs.cat(path))\n\n @property\n def diagnostics(self) -> xr.Dataset:\n path = os.path.join(self.path, \"diags.nc\")\n with tempfile.NamedTemporaryFile() as f:\n self.fs.get(path, f.name)\n return xr.open_dataset(f.name, engine=\"h5netcdf\").compute()\n\n @property\n def movie_urls(self) -> Sequence[str]:\n movie_paths = self.fs.glob(os.path.join(self.path, \"*.mp4\"))\n if \"gs\" in self.fs.protocol:\n movie_paths = [\"gs://\" + path for path in movie_paths]\n return movie_paths\n\n\ndef detect_folders(\n bucket: str, fs: fsspec.AbstractFileSystem,\n) -> Mapping[str, DiagnosticFolder]:\n diag_ncs = fs.glob(os.path.join(bucket, \"*\", \"diags.nc\"))\n return {\n Path(url).parent.name: DiagnosticFolder(fs, Path(url).parent.as_posix())\n for url in diag_ncs\n }\n\n\ndef _load_diags(rundirs: Mapping[str, DiagnosticFolder]):\n metrics = {}\n for rundir, diag_folder in rundirs.items():\n metrics[rundir] = diag_folder.diagnostics\n return metrics\n\n\ndef _yield_metric_rows(metrics):\n \"\"\"yield rows to be combined into a dataframe\n \"\"\"\n for run in metrics:\n for name in metrics[run]:\n yield {\n \"run\": run,\n \"metric\": name,\n \"value\": metrics[run][name][\"value\"],\n \"units\": metrics[run][name][\"units\"],\n }\n\n\ndef _load_metrics(rundirs):\n metrics = {}\n for rundir, diag_folder in rundirs.items():\n metrics[rundir] = diag_folder.metrics\n return metrics\n\n\ndef parse_rundirs(rundirs) -> pd.DataFrame:\n run_table = pd.DataFrame.from_records(_parse_metadata(run) for run in rundirs)\n return run_table.set_index(\"run\")\n\n\ndef _parse_metadata(run: str):\n\n if \"baseline\" in run:\n baseline = True\n else:\n baseline = False\n\n return {\"run\": run, \"baseline\": baseline}\n\n\ndef _get_verification_diagnostics(ds: xr.Dataset) -> xr.Dataset:\n \"\"\"Back out verification diagnostics from prognostic run values and biases\"\"\"\n verif_diagnostics = {}\n verif_attrs = {\"run\": \"verification\", \"baseline\": True}\n mean_bias_pairs = {\n \"spatial_mean\": \"mean_bias\",\n \"diurn_component\": \"diurn_bias\",\n \"zonal_and_time_mean\": \"zonal_bias\",\n \"zonal_mean_value\": \"zonal_mean_bias\",\n \"time_mean_value\": \"time_mean_bias\",\n \"histogram\": \"hist_bias\",\n \"hist_2d\": \"hist2d_bias\",\n \"pressure_level_zonal_time_mean\": \"pressure_level_zonal_bias\",\n }\n for mean_filter, bias_filter in mean_bias_pairs.items():\n mean_vars = [var for var in ds if mean_filter in var]\n for var in mean_vars:\n matching_bias_var = var.replace(mean_filter, bias_filter)\n if matching_bias_var in ds:\n # verification = prognostic - bias\n verif_diagnostics[var] = ds[var] - ds[matching_bias_var]\n verif_diagnostics[var].attrs = ds[var].attrs\n # special handling for histogram bin widths\n bin_width_vars = [var for var in ds if \"bin_width_histogram\" in var]\n bin_width_vars += [var for var in ds if \"bin_width_hist_2d\" in var]\n for var in bin_width_vars:\n verif_diagnostics[var] = ds[var]\n verif_dataset = xr.Dataset(verif_diagnostics)\n return xr.merge([ds[GRID_VARS], verif_dataset]).assign_attrs(verif_attrs)\n\n\ndef get_metadata(diags):\n run_urls = {key: ds.attrs[\"url\"] for key, ds in diags.items()}\n verification_datasets = [ds.attrs[\"verification\"] for ds in diags.values()]\n if any([verification_datasets[0] != item for item in verification_datasets]):\n raise ValueError(\n \"Report cannot be generated with diagnostics computed against \"\n \"different verification datasets.\"\n )\n verification_label = {\"verification dataset\": verification_datasets[0]}\n return {**verification_label, **run_urls}\n\n\ndef convert_index_to_datetime(ds, dim):\n return ds.assign_coords({dim: ds.indexes[dim].to_datetimeindex()})\n" ]
[ [ "pandas.merge" ] ]
karen-pal/scikit-learn
[ "2a67d88258264eb2b6dfad221be8f8d61684dcba" ]
[ "sklearn/cross_decomposition/tests/test_pls.py" ]
[ "import pytest\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose\n\nfrom sklearn.datasets import load_linnerud\nfrom sklearn.cross_decomposition._pls import (\n _center_scale_xy,\n _get_first_singular_vectors_power_method,\n _get_first_singular_vectors_svd,\n _svd_flip_1d,\n)\nfrom sklearn.cross_decomposition import CCA\nfrom sklearn.cross_decomposition import PLSSVD, PLSRegression, PLSCanonical\nfrom sklearn.datasets import make_regression\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.extmath import svd_flip\nfrom sklearn.exceptions import ConvergenceWarning\n\n\ndef assert_matrix_orthogonal(M):\n K = np.dot(M.T, M)\n assert_array_almost_equal(K, np.diag(np.diag(K)))\n\n\ndef test_pls_canonical_basics():\n # Basic checks for PLSCanonical\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n pls = PLSCanonical(n_components=X.shape[1])\n pls.fit(X, Y)\n\n assert_matrix_orthogonal(pls.x_weights_)\n assert_matrix_orthogonal(pls.y_weights_)\n assert_matrix_orthogonal(pls._x_scores)\n assert_matrix_orthogonal(pls._y_scores)\n\n # Check X = TP' and Y = UQ'\n T = pls._x_scores\n P = pls.x_loadings_\n U = pls._y_scores\n Q = pls.y_loadings_\n # Need to scale first\n Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(\n X.copy(), Y.copy(), scale=True\n )\n assert_array_almost_equal(Xc, np.dot(T, P.T))\n assert_array_almost_equal(Yc, np.dot(U, Q.T))\n\n # Check that rotations on training data lead to scores\n Xt = pls.transform(X)\n assert_array_almost_equal(Xt, pls._x_scores)\n Xt, Yt = pls.transform(X, Y)\n assert_array_almost_equal(Xt, pls._x_scores)\n assert_array_almost_equal(Yt, pls._y_scores)\n\n # Check that inverse_transform works\n X_back = pls.inverse_transform(Xt)\n assert_array_almost_equal(X_back, X)\n\n\ndef test_sanity_check_pls_regression():\n # Sanity check for PLSRegression\n # The results were checked against the R-packages plspm, misOmics and pls\n\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n pls = PLSRegression(n_components=X.shape[1])\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [-0.61330704, -0.00443647, 0.78983213],\n [-0.74697144, -0.32172099, -0.58183269],\n [-0.25668686, 0.94682413, -0.19399983],\n ]\n )\n\n expected_x_loadings = np.array(\n [\n [-0.61470416, -0.24574278, 0.78983213],\n [-0.65625755, -0.14396183, -0.58183269],\n [-0.51733059, 1.00609417, -0.19399983],\n ]\n )\n\n expected_y_weights = np.array(\n [\n [+0.32456184, 0.29892183, 0.20316322],\n [+0.42439636, 0.61970543, 0.19320542],\n [-0.13143144, -0.26348971, -0.17092916],\n ]\n )\n\n expected_y_loadings = np.array(\n [\n [+0.32456184, 0.29892183, 0.20316322],\n [+0.42439636, 0.61970543, 0.19320542],\n [-0.13143144, -0.26348971, -0.17092916],\n ]\n )\n\n assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))\n assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))\n assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))\n\n # The R / Python difference in the signs should be consistent across\n # loadings, weights, etc.\n x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)\n x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)\n y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)\n y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)\n assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)\n assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)\n\n\ndef test_sanity_check_pls_regression_constant_column_Y():\n # Check behavior when the first column of Y is constant\n # The results are checked against a modified version of plsreg2\n # from the R-package plsdepot\n d = load_linnerud()\n X = d.data\n Y = d.target\n Y[:, 0] = 1\n pls = PLSRegression(n_components=X.shape[1])\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [-0.6273573, 0.007081799, 0.7786994],\n [-0.7493417, -0.277612681, -0.6011807],\n [-0.2119194, 0.960666981, -0.1794690],\n ]\n )\n\n expected_x_loadings = np.array(\n [\n [-0.6273512, -0.22464538, 0.7786994],\n [-0.6643156, -0.09871193, -0.6011807],\n [-0.5125877, 1.01407380, -0.1794690],\n ]\n )\n\n expected_y_loadings = np.array(\n [\n [0.0000000, 0.0000000, 0.0000000],\n [0.4357300, 0.5828479, 0.2174802],\n [-0.1353739, -0.2486423, -0.1810386],\n ]\n )\n\n assert_array_almost_equal(np.abs(expected_x_weights), np.abs(pls.x_weights_))\n assert_array_almost_equal(np.abs(expected_x_loadings), np.abs(pls.x_loadings_))\n # For the PLSRegression with default parameters, y_loadings == y_weights\n assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_loadings))\n\n x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)\n x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)\n # we ignore the first full-zeros row for y\n y_loadings_sign_flip = np.sign(expected_y_loadings[1:] / pls.y_loadings_[1:])\n\n assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)\n assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip)\n\n\ndef test_sanity_check_pls_canonical():\n # Sanity check for PLSCanonical\n # The results were checked against the R-package plspm\n\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n pls = PLSCanonical(n_components=X.shape[1])\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [-0.61330704, 0.25616119, -0.74715187],\n [-0.74697144, 0.11930791, 0.65406368],\n [-0.25668686, -0.95924297, -0.11817271],\n ]\n )\n\n expected_x_rotations = np.array(\n [\n [-0.61330704, 0.41591889, -0.62297525],\n [-0.74697144, 0.31388326, 0.77368233],\n [-0.25668686, -0.89237972, -0.24121788],\n ]\n )\n\n expected_y_weights = np.array(\n [\n [+0.58989127, 0.7890047, 0.1717553],\n [+0.77134053, -0.61351791, 0.16920272],\n [-0.23887670, -0.03267062, 0.97050016],\n ]\n )\n\n expected_y_rotations = np.array(\n [\n [+0.58989127, 0.7168115, 0.30665872],\n [+0.77134053, -0.70791757, 0.19786539],\n [-0.23887670, -0.00343595, 0.94162826],\n ]\n )\n\n assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))\n assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))\n assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))\n\n x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)\n x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)\n y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)\n y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)\n assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)\n assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)\n\n assert_matrix_orthogonal(pls.x_weights_)\n assert_matrix_orthogonal(pls.y_weights_)\n\n assert_matrix_orthogonal(pls._x_scores)\n assert_matrix_orthogonal(pls._y_scores)\n\n\ndef test_sanity_check_pls_canonical_random():\n # Sanity check for PLSCanonical on random data\n # The results were checked against the R-package plspm\n n = 500\n p_noise = 10\n q_noise = 5\n # 2 latents vars:\n rng = check_random_state(11)\n l1 = rng.normal(size=n)\n l2 = rng.normal(size=n)\n latents = np.array([l1, l1, l2, l2]).T\n X = latents + rng.normal(size=4 * n).reshape((n, 4))\n Y = latents + rng.normal(size=4 * n).reshape((n, 4))\n X = np.concatenate((X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)\n Y = np.concatenate((Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)\n\n pls = PLSCanonical(n_components=3)\n pls.fit(X, Y)\n\n expected_x_weights = np.array(\n [\n [0.65803719, 0.19197924, 0.21769083],\n [0.7009113, 0.13303969, -0.15376699],\n [0.13528197, -0.68636408, 0.13856546],\n [0.16854574, -0.66788088, -0.12485304],\n [-0.03232333, -0.04189855, 0.40690153],\n [0.1148816, -0.09643158, 0.1613305],\n [0.04792138, -0.02384992, 0.17175319],\n [-0.06781, -0.01666137, -0.18556747],\n [-0.00266945, -0.00160224, 0.11893098],\n [-0.00849528, -0.07706095, 0.1570547],\n [-0.00949471, -0.02964127, 0.34657036],\n [-0.03572177, 0.0945091, 0.3414855],\n [0.05584937, -0.02028961, -0.57682568],\n [0.05744254, -0.01482333, -0.17431274],\n ]\n )\n\n expected_x_loadings = np.array(\n [\n [0.65649254, 0.1847647, 0.15270699],\n [0.67554234, 0.15237508, -0.09182247],\n [0.19219925, -0.67750975, 0.08673128],\n [0.2133631, -0.67034809, -0.08835483],\n [-0.03178912, -0.06668336, 0.43395268],\n [0.15684588, -0.13350241, 0.20578984],\n [0.03337736, -0.03807306, 0.09871553],\n [-0.06199844, 0.01559854, -0.1881785],\n [0.00406146, -0.00587025, 0.16413253],\n [-0.00374239, -0.05848466, 0.19140336],\n [0.00139214, -0.01033161, 0.32239136],\n [-0.05292828, 0.0953533, 0.31916881],\n [0.04031924, -0.01961045, -0.65174036],\n [0.06172484, -0.06597366, -0.1244497],\n ]\n )\n\n expected_y_weights = np.array(\n [\n [0.66101097, 0.18672553, 0.22826092],\n [0.69347861, 0.18463471, -0.23995597],\n [0.14462724, -0.66504085, 0.17082434],\n [0.22247955, -0.6932605, -0.09832993],\n [0.07035859, 0.00714283, 0.67810124],\n [0.07765351, -0.0105204, -0.44108074],\n [-0.00917056, 0.04322147, 0.10062478],\n [-0.01909512, 0.06182718, 0.28830475],\n [0.01756709, 0.04797666, 0.32225745],\n ]\n )\n\n expected_y_loadings = np.array(\n [\n [0.68568625, 0.1674376, 0.0969508],\n [0.68782064, 0.20375837, -0.1164448],\n [0.11712173, -0.68046903, 0.12001505],\n [0.17860457, -0.6798319, -0.05089681],\n [0.06265739, -0.0277703, 0.74729584],\n [0.0914178, 0.00403751, -0.5135078],\n [-0.02196918, -0.01377169, 0.09564505],\n [-0.03288952, 0.09039729, 0.31858973],\n [0.04287624, 0.05254676, 0.27836841],\n ]\n )\n\n assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))\n assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))\n assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))\n assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))\n\n x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)\n x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)\n y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)\n y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)\n assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)\n assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)\n\n assert_matrix_orthogonal(pls.x_weights_)\n assert_matrix_orthogonal(pls.y_weights_)\n\n assert_matrix_orthogonal(pls._x_scores)\n assert_matrix_orthogonal(pls._y_scores)\n\n\ndef test_convergence_fail():\n # Make sure ConvergenceWarning is raised if max_iter is too small\n d = load_linnerud()\n X = d.data\n Y = d.target\n pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2)\n with pytest.warns(ConvergenceWarning):\n pls_nipals.fit(X, Y)\n\n\[email protected](\"ignore:.*scores_ was deprecated\") # 1.1\[email protected](\"Est\", (PLSSVD, PLSRegression, PLSCanonical))\ndef test_attibutes_shapes(Est):\n # Make sure attributes are of the correct shape depending on n_components\n d = load_linnerud()\n X = d.data\n Y = d.target\n n_components = 2\n pls = Est(n_components=n_components)\n pls.fit(X, Y)\n assert all(\n attr.shape[1] == n_components\n for attr in (pls.x_scores_, pls.y_scores_, pls.x_weights_, pls.y_weights_)\n )\n\n\[email protected](\"Est\", (PLSRegression, PLSCanonical, CCA))\ndef test_univariate_equivalence(Est):\n # Ensure 2D Y with 1 column is equivalent to 1D Y\n d = load_linnerud()\n X = d.data\n Y = d.target\n\n est = Est(n_components=1)\n one_d_coeff = est.fit(X, Y[:, 0]).coef_\n two_d_coeff = est.fit(X, Y[:, :1]).coef_\n\n assert one_d_coeff.shape == two_d_coeff.shape\n assert_array_almost_equal(one_d_coeff, two_d_coeff)\n\n\[email protected](\"Est\", (PLSRegression, PLSCanonical, CCA, PLSSVD))\ndef test_copy(Est):\n # check that the \"copy\" keyword works\n d = load_linnerud()\n X = d.data\n Y = d.target\n X_orig = X.copy()\n\n # copy=True won't modify inplace\n pls = Est(copy=True).fit(X, Y)\n assert_array_equal(X, X_orig)\n\n # copy=False will modify inplace\n with pytest.raises(AssertionError):\n Est(copy=False).fit(X, Y)\n assert_array_almost_equal(X, X_orig)\n\n if Est is PLSSVD:\n return # PLSSVD does not support copy param in predict or transform\n\n X_orig = X.copy()\n with pytest.raises(AssertionError):\n pls.transform(X, Y, copy=False),\n assert_array_almost_equal(X, X_orig)\n\n X_orig = X.copy()\n with pytest.raises(AssertionError):\n pls.predict(X, copy=False),\n assert_array_almost_equal(X, X_orig)\n\n # Make sure copy=True gives same transform and predictions as predict=False\n assert_array_almost_equal(\n pls.transform(X, Y, copy=True), pls.transform(X.copy(), Y.copy(), copy=False)\n )\n assert_array_almost_equal(\n pls.predict(X, copy=True), pls.predict(X.copy(), copy=False)\n )\n\n\ndef _generate_test_scale_and_stability_datasets():\n \"\"\"Generate dataset for test_scale_and_stability\"\"\"\n # dataset for non-regression 7818\n rng = np.random.RandomState(0)\n n_samples = 1000\n n_targets = 5\n n_features = 10\n Q = rng.randn(n_targets, n_features)\n Y = rng.randn(n_samples, n_targets)\n X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1\n X *= 1000\n yield X, Y\n\n # Data set where one of the features is constaint\n X, Y = load_linnerud(return_X_y=True)\n # causes X[:, -1].std() to be zero\n X[:, -1] = 1.0\n yield X, Y\n\n X = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [3.0, 5.0, 4.0]])\n Y = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]])\n yield X, Y\n\n # Seeds that provide a non-regression test for #18746, where CCA fails\n seeds = [530, 741]\n for seed in seeds:\n rng = np.random.RandomState(seed)\n X = rng.randn(4, 3)\n Y = rng.randn(4, 2)\n yield X, Y\n\n\[email protected](\"Est\", (CCA, PLSCanonical, PLSRegression, PLSSVD))\[email protected](\"X, Y\", _generate_test_scale_and_stability_datasets())\ndef test_scale_and_stability(Est, X, Y):\n \"\"\"scale=True is equivalent to scale=False on centered/scaled data\n This allows to check numerical stability over platforms as well\"\"\"\n\n X_s, Y_s, *_ = _center_scale_xy(X, Y)\n\n X_score, Y_score = Est(scale=True).fit_transform(X, Y)\n X_s_score, Y_s_score = Est(scale=False).fit_transform(X_s, Y_s)\n\n assert_allclose(X_s_score, X_score, atol=1e-4)\n assert_allclose(Y_s_score, Y_score, atol=1e-4)\n\n\[email protected](\"Est\", (PLSSVD, PLSCanonical, CCA))\[email protected](\"n_components\", (0, 4))\ndef test_n_components_bounds(Est, n_components):\n # n_components should be in [1, min(n_samples, n_features, n_targets)]\n # TODO: catch error instead of warning in 1.1\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = Est(n_components=n_components)\n with pytest.warns(FutureWarning, match=\"n_components=3 will be used instead\"):\n est.fit(X, Y)\n # make sure upper bound of rank is used as a fallback\n assert est.transform(X).shape[1] == 3\n\n\[email protected](\"n_components\", (0, 6))\ndef test_n_components_bounds_pls_regression(n_components):\n # For PLSRegression, the upper bound for n_components is n_features\n # TODO: catch error instead of warning in 1.1\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = PLSRegression(n_components=n_components)\n with pytest.warns(FutureWarning, match=\"n_components=5 will be used instead\"):\n est.fit(X, Y)\n # make sure upper bound of rank is used as a fallback\n assert est.transform(X).shape[1] == 5\n\n\[email protected](\"Est\", (PLSSVD, CCA, PLSCanonical))\ndef test_scores_deprecations(Est):\n # Make sure x_scores_ and y_scores_ are deprecated.\n # It's not deprecated for PLSRegression because y_score_ is different from\n # transform(Y_train)\n # TODO: remove attributes and test in 1.1\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = Est().fit(X, Y)\n with pytest.warns(FutureWarning, match=\"x_scores_ was deprecated\"):\n assert_allclose(est.x_scores_, est.transform(X))\n with pytest.warns(FutureWarning, match=\"y_scores_ was deprecated\"):\n assert_allclose(est.y_scores_, est.transform(X, Y)[1])\n\n\[email protected](\"Est\", (PLSRegression, PLSCanonical, CCA))\ndef test_norm_y_weights_deprecation(Est):\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n est = Est().fit(X, Y)\n with pytest.warns(FutureWarning, match=\"norm_y_weights was deprecated\"):\n est.norm_y_weights\n\n\n# TODO: Remove test in 1.1\[email protected](\"Estimator\", (PLSRegression, PLSCanonical, CCA, PLSSVD))\[email protected](\"attribute\", (\"x_mean_\", \"y_mean_\", \"x_std_\", \"y_std_\"))\ndef test_mean_and_std_deprecation(Estimator, attribute):\n rng = np.random.RandomState(0)\n X = rng.randn(10, 5)\n Y = rng.randn(10, 3)\n estimator = Estimator().fit(X, Y)\n with pytest.warns(FutureWarning, match=f\"{attribute} was deprecated\"):\n getattr(estimator, attribute)\n\n\[email protected](\"n_samples, n_features\", [(100, 10), (100, 200)])\[email protected](\"seed\", range(10))\ndef test_singular_value_helpers(n_samples, n_features, seed):\n # Make sure SVD and power method give approximately the same results\n X, Y = make_regression(n_samples, n_features, n_targets=5, random_state=seed)\n u1, v1, _ = _get_first_singular_vectors_power_method(X, Y, norm_y_weights=True)\n u2, v2 = _get_first_singular_vectors_svd(X, Y)\n\n _svd_flip_1d(u1, v1)\n _svd_flip_1d(u2, v2)\n\n rtol = 1e-1\n assert_allclose(u1, u2, rtol=rtol)\n assert_allclose(v1, v2, rtol=rtol)\n\n\ndef test_one_component_equivalence():\n # PLSSVD, PLSRegression and PLSCanonical should all be equivalent when\n # n_components is 1\n X, Y = make_regression(100, 10, n_targets=5, random_state=0)\n svd = PLSSVD(n_components=1).fit(X, Y).transform(X)\n reg = PLSRegression(n_components=1).fit(X, Y).transform(X)\n canonical = PLSCanonical(n_components=1).fit(X, Y).transform(X)\n\n assert_allclose(svd, reg, rtol=1e-2)\n assert_allclose(svd, canonical, rtol=1e-2)\n\n\ndef test_svd_flip_1d():\n # Make sure svd_flip_1d is equivalent to svd_flip\n u = np.array([1, -4, 2])\n v = np.array([1, 2, 3])\n\n u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1))\n _svd_flip_1d(u, v) # inplace\n\n assert_allclose(u, u_expected.ravel())\n assert_allclose(u, [-1, 4, -2])\n\n assert_allclose(v, v_expected.ravel())\n assert_allclose(v, [-1, -2, -3])\n\n\ndef test_loadings_converges():\n \"\"\"Test that CCA converges. Non-regression test for #19549.\"\"\"\n X, y = make_regression(n_samples=200, n_features=20, n_targets=20, random_state=20)\n\n cca = CCA(n_components=10, max_iter=500)\n\n with pytest.warns(None) as record:\n cca.fit(X, y)\n # ConvergenceWarning should not be raised\n if len(record) > 0:\n pytest.fail(f\"Unexpected warning: {str(record[0].message)}\")\n\n # Loadings converges to reasonable values\n assert np.all(np.abs(cca.x_loadings_) < 1)\n\n\ndef test_pls_constant_y():\n \"\"\"Checks warning when y is constant. Non-regression test for #19831\"\"\"\n rng = np.random.RandomState(42)\n x = rng.rand(100, 3)\n y = np.zeros(100)\n\n pls = PLSRegression()\n\n msg = \"Y residual is constant at iteration\"\n with pytest.warns(UserWarning, match=msg):\n pls.fit(x, y)\n\n assert_allclose(pls.x_rotations_, 0)\n" ]
[ [ "numpy.diag", "numpy.random.RandomState", "sklearn.datasets.make_regression", "sklearn.datasets.load_linnerud", "sklearn.cross_decomposition._pls._center_scale_xy", "numpy.abs", "numpy.testing.assert_array_equal", "sklearn.cross_decomposition._pls._get_first_singular_vectors_power_method", "sklearn.utils.check_random_state", "numpy.zeros", "sklearn.cross_decomposition.CCA", "numpy.testing.assert_array_almost_equal", "sklearn.cross_decomposition._pls._svd_flip_1d", "sklearn.cross_decomposition.PLSRegression", "sklearn.cross_decomposition.PLSCanonical", "numpy.sign", "sklearn.cross_decomposition._pls._get_first_singular_vectors_svd", "sklearn.cross_decomposition.PLSSVD", "numpy.testing.assert_allclose", "numpy.array", "numpy.dot" ] ]
rohithdesikan/evprediction
[ "3ea5a2b3db350397385c9c9835483eb7dfb2773b" ]
[ "app/app.py" ]
[ "# %%\nimport os\nimport numpy as np\nimport pandas as pd \nimport flask\nfrom flask import Flask, jsonify, request, make_response\nimport tensorflow as tf\n\nfrom evprediction import convert_to_array\n\n# %%\n# Load saved model\n# model_path = os.path.abspath(os.path.join(os.getcwd(), 'models'))\nmodel_name = 'evmodel.h5'\nmodel = tf.keras.models.load_model(model_name)\n\n# %%\napp = Flask(__name__)\n\[email protected]('/') \ndef hello(): \n return \"Welcome to EV Prediction\"\n\n\n# Works for any number of test points\[email protected]('/predict', methods = ['POST'])\ndef make_prediction():\n\n # Make the request in json format\n json = request.get_json()\n\n # It comes in as a list of list where the 2nd element is the meter data and convert to np array\n data = json[1]\n arr = np.array(data)\n\n # If there is only 1 point to be tested, reshape it as necessary (1, 2880)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, (-1, arr.shape[0]))\n\n # The House_ID could or could not be included in the data, so make sure to get rid of the 1st point\n if arr.shape[1] == 2881:\n arr = np.array(arr[:, 1:])\n\n \n # Reshape array to the required shape for predictions\n arr_reshaped = np.reshape(arr, (arr.shape[0], 60, -1))\n\n # Use the saved model to make a prediction\n out = model.predict(arr_reshaped)\n\n # Reshape the output into a single dimension, convert to list and then to int (for boolean prediction)\n out_reshaped = np.reshape(out, (out.shape[0], ))\n out_pred = np.round(out_reshaped).tolist()\n out_int = [int(p) for p in out_pred]\n\n # Return predictions as a dictionary, works as both single and multi input prediction\n return make_response({'Predictions': out_int})\n\nif __name__ == \"__main__\": \n app.run(host ='0.0.0.0', port = 5000, debug = True)\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.round", "tensorflow.keras.models.load_model" ] ]
prateekiiest/interpret
[ "b5530a587251a77516ab443037fc37f71708564c" ]
[ "python/interpret-core/interpret/glassbox/ebm/test/test_internal.py" ]
[ "# Copyright (c) 2019 Microsoft Corporation\n# Distributed under the MIT software license\n\nfrom ..internal import Native, NativeEBMBooster\n\nimport numpy as np\nimport ctypes as ct\nfrom contextlib import closing\n\ndef test_booster_internals():\n with closing(\n NativeEBMBooster(\n model_type=\"classification\",\n n_classes=2,\n features_categorical=np.array([0], dtype=ct.c_int64, order=\"C\"), \n features_bin_count=np.array([2], dtype=ct.c_int64, order=\"C\"),\n feature_groups=[[0]],\n X_train=np.array([[0]], dtype=ct.c_int64, order=\"C\"),\n y_train=np.array([0], dtype=ct.c_int64, order=\"C\"),\n scores_train=None,\n X_val=np.array([[0]], dtype=ct.c_int64, order=\"C\"),\n y_val=np.array([0], dtype=ct.c_int64, order=\"C\"),\n scores_val=None,\n n_inner_bags=0,\n random_state=42,\n optional_temp_params=None,\n )\n ) as native_ebm_booster:\n gain = native_ebm_booster.generate_model_update(\n feature_group_index=0,\n generate_update_options=Native.GenerateUpdateOptions_Default,\n learning_rate=0.01,\n min_samples_leaf=2,\n max_leaves=np.array([2], dtype=ct.c_int64, order=\"C\"),\n )\n assert gain == 0\n\n cuts = native_ebm_booster.get_model_update_cuts()\n assert len(cuts) == 1\n assert len(cuts[0]) == 0\n\n model_update = native_ebm_booster.get_model_update_expanded()\n assert len(model_update.shape) == 1\n assert model_update.shape[0] == 2\n assert model_update[0] < 0\n\n native_ebm_booster.set_model_update_expanded(0, model_update)\n\n metric = native_ebm_booster.apply_model_update()\n assert 0 < metric\n\n model = native_ebm_booster.get_best_model()\n assert len(model) == 1\n assert len(model[0].shape) == 1\n assert model[0].shape[0] == 2\n assert model[0][0] < 0\n\n\ndef test_one_class():\n with closing(\n NativeEBMBooster(\n model_type=\"classification\",\n n_classes=1,\n features_categorical=np.array([0], dtype=ct.c_int64, order=\"C\"), \n features_bin_count=np.array([2], dtype=ct.c_int64, order=\"C\"),\n feature_groups=[[0]],\n X_train=np.array([[0, 1, 0]], dtype=ct.c_int64, order=\"C\"),\n y_train=np.array([0, 0, 0], dtype=ct.c_int64, order=\"C\"),\n scores_train=None,\n X_val=np.array([[1, 0, 1]], dtype=ct.c_int64, order=\"C\"),\n y_val=np.array([0, 0, 0], dtype=ct.c_int64, order=\"C\"),\n scores_val=None,\n n_inner_bags=0,\n random_state=42,\n optional_temp_params=None,\n )\n ) as native_ebm_booster:\n gain = native_ebm_booster.generate_model_update(\n feature_group_index=0,\n generate_update_options=Native.GenerateUpdateOptions_Default,\n learning_rate=0.01,\n min_samples_leaf=2,\n max_leaves=np.array([2], dtype=ct.c_int64, order=\"C\"),\n )\n assert gain == 0\n\n cuts = native_ebm_booster.get_model_update_cuts()\n assert len(cuts) == 1\n assert len(cuts[0]) == 0\n\n model_update = native_ebm_booster.get_model_update_expanded()\n assert model_update is None\n\n native_ebm_booster.set_model_update_expanded(0, model_update)\n\n metric = native_ebm_booster.apply_model_update()\n assert metric == 0\n\n model = native_ebm_booster.get_best_model()\n assert len(model) == 1\n assert model[0] is None\n" ]
[ [ "numpy.array" ] ]
mohammadpz/Associative_LSTM
[ "5094829ed8432be738c79c6a87396e0edf63b008" ]
[ "holographic_memory.py" ]
[ "import numpy as np\nimport theano\nimport theano.tensor as T\n# import matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nB = 10\nF = 110 * 110 * 3\nC = 20\n\n# shape: C x F/2\npermutations = []\nindices = np.arange(F / 2)\nfor i in range(C):\n np.random.shuffle(indices)\n permutations.append(np.concatenate(\n [indices,\n [ind + F / 2 for ind in indices]]))\n# C x F (numpy)\nPERMUTATIONS = np.vstack(permutations)\n\n\n# input: B x F\n# output: C x B x F\ndef permute(input):\n inputs_permuted = []\n for i in range(PERMUTATIONS.shape[0]):\n inputs_permuted.append(\n input[:, PERMUTATIONS[i]].dimshuffle('x', 0, 1))\n return T.concatenate(inputs_permuted, axis=0)\n\n\n# r: C x B x F\n# u: if mem: C x 1 x F\n# u: if value: 1 x B x F\ndef complex_mult(r, u, inverse_r=False, moduli_1=False):\n _, _, F = u.shape\n r_rl = r[:, :, :F / 2]\n r_im = r[:, :, F / 2:]\n if inverse_r:\n if moduli_1:\n r_im = -r_im\n else:\n tmp = r_rl / (r_rl ** 2 + r_im ** 2)\n r_im = -r_im / (r_rl ** 2 + r_im ** 2)\n r_rl = tmp\n u_rl = u[:, :, :F / 2]\n u_im = u[:, :, F / 2:]\n res_rl = r_rl * u_rl - r_im * u_im\n res_im = r_rl * u_im + r_im * u_rl\n res = T.concatenate([res_rl, res_im], axis=2)\n # C x B x F\n return res\n\n\n# key: C x B x F\n# mem: C x F\ndef read(key, mem):\n value = complex_mult(\n permute(key),\n mem.dimshuffle(0, 'x', 1),\n inverse_r=True, moduli_1=True)\n return value.mean(axis=0)\n\n\n# key: C x B x F\n# value: B x F\n# mem: C x F\ndef write(key, value):\n coded_value = complex_mult(permute(key), value.dimshuffle('x', 0, 1))\n # C x F\n return coded_value.sum(axis=1)\n\nif __name__ == \"__main__\":\n # B x F\n key = T.matrix('key')\n # B x F\n value = T.matrix('value')\n # C x F\n mem = T.matrix('mem')\n\n read_func = theano.function([key, mem], read(key, mem))\n write_func = theano.function([key, value], write(key, value))\n\n # shape: 20 x 110 x 110 x 3\n data = np.load('20_images_from_imagenet.npy')[:B]\n VALUES = data.reshape(B, F) - np.mean(data.reshape(B, F),\n axis=1, keepdims=True)\n\n phis = np.random.random((B, F / 2)) * 2 * np.pi\n KEYS = np.concatenate([np.cos(phis), np.sin(phis)], axis=1)\n\n MEM = write_func(KEYS, VALUES)\n\n all_imgs = read_func(KEYS, MEM)\n\n VALUES = VALUES + np.mean(data.reshape(B, F), axis=1, keepdims=True)\n VALUES = VALUES.reshape(B, 110, 110, 3)\n VALUES = np.swapaxes(VALUES, 0, 1)\n VALUES = np.reshape(VALUES, (110, 110 * B, 3))\n plt.imshow(VALUES[:, :110 * B])\n plt.show()\n\n all_imgs = all_imgs + np.mean(data.reshape(B, F), axis=1, keepdims=True)\n all_imgs = all_imgs.reshape(B, 110, 110, 3)\n all_imgs = np.swapaxes(all_imgs, 0, 1)\n all_imgs = np.reshape(all_imgs, (110, 110 * B, 3))\n plt.imshow(all_imgs[:, :110 * B])\n plt.show()\n" ]
[ [ "numpy.vstack", "numpy.load", "numpy.random.shuffle", "numpy.swapaxes", "numpy.reshape", "numpy.cos", "numpy.arange", "numpy.random.random", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.sin", "numpy.concatenate" ] ]