repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
JiaheXu/CIS
[ "01881beb8db270ecda917af07c2588ed66dddbc9" ]
[ "CIS1_PA3/CIS1-PA3/KD_tree.py" ]
[ "import numpy as np\nimport sys, os\nimport time\nimport glob\nfrom registration_3d import *\nfrom cartesian import *\nfrom collections import namedtuple\nfrom operator import itemgetter\nfrom pprint import pformat\nimport matplotlib.pyplot as plt\nfrom read_files import *\nimport argparse\nfrom ICPmatching import *\n\nclass KD_tree():\n def __init__(self, Np , mesh_points , Ntriangles , mesh_vertices , rectangles):\n \n self.Np = Np\n self.mesh_points = mesh_points\n self.mesh_vertices = mesh_vertices\n self.Ntriangles = Ntriangles\n self.rectangles = rectangles\n self.num = np.zeros(Ntriangles+10).astype(int)\n #self.D = mesh_points.shape[1]\n self.D = 3\n \n # ls saves the index of left son\n self.ls = np.zeros( Ntriangles+10 )\n # rs saves the index of right son\n self.rs = np.zeros( Ntriangles+10 )\n \n # self.Rectangle : [; , 0:3] represent min(x , y , z)\n # self.Rectangle : [; , 3:6] represent max(x , y , z) \n self.Rectangle = np.zeros( (Ntriangles+10,6) )\n \n # rectangles[:,6] save the index of triangle each rectangle represent\n self.tree = self.build(rectangles.tolist(), 1 , Ntriangles , depth =0 )\n self.nearest_point = []\n self.nearest_dist = 0\n \n def pushup(self, root):\n # updating current node from son nodes\n # root is the current index number\n ls = self.ls[root]\n rs = self.rs[root]\n if(ls!=0):\n for i in range(3):\n self.Rectangle[root,i] = min(self.Rectangle[root,i],self.Rectangle[int(ls),i])\n self.Rectangle[root,i+3] = max(self.Rectangle[root,i+3],self.Rectangle[int(ls),i+3]) \n if(rs!=0):\n for i in range(3):\n self.Rectangle[root,i] = min(self.Rectangle[root,i],self.Rectangle[int(rs),i])\n self.Rectangle[root,i+3] = max(self.Rectangle[root,i+3],self.Rectangle[int(rs),i+3]) \n \n def point_to_cube(self, start_point, root):\n # compute the shortest distant from a point to a cube\n dis = np.zeros(self.D)\n \n for i in range(self.D):\n if(start_point[i] < self.Rectangle[root,i]):\n dis[i] = self.Rectangle[root,i] - start_point[i]\n if(start_point[i] > self.Rectangle[root,i+3]):\n dis[i] = start_point[i] - self.Rectangle[root,i+3]\n dist = np.linalg.norm(dis)\n return dist\n \n def find(self, start_point , left,right,depth):\n # find the closest point from start_point in a tree\n # depth tell us which dimension we should look to\n # left and right means which nodes we are looking at from 1 <=left <=right <= n\n if(left>right):\n return \n \n middle = ((left + right) // 2) \n \n dist = self.point_to_cube(start_point , middle)\n \n # if the current optimal solution is better than the possible solution in the cube\n # just return\n if(dist > self.nearest_dist):\n return\n \n # check the distance from start_point to the current node's mesh triangle\n tmp = point_to_triangle(start_point , self.num[middle], self.mesh_points, self.mesh_vertices)\n dist = np.linalg.norm(start_point - tmp)\n \n if( dist < self.nearest_dist):\n self.nearest_dist = dist\n self.nearest_point = tmp\n \n # look into son nodes\n self.find( start_point , left , middle-1 ,depth)\n self.find( start_point , middle+1 , right,depth)\n \n def FindClosestPoint(self, start_point ):\n \n self.nearest_dist = np.finfo(np.float32).max\n \n self.find( start_point , 1 , self.Ntriangles , depth=0 ) \n \n return self.nearest_point\n \n def build( self, points, left,right,depth ):\n # build a KD-tree\n # left and right means which nodes we are looking at from 1 <=left <=right <= n\n if(left>right):\n return 0\n \n axis = depth % self.D\n \n # sort with axis, since the number of nodes is not too big\n # we directly use O(nlogn) sort in list, rather than a O(n) sort\n points.sort(key=itemgetter(axis))\n middle = ((left + right) // 2) \n #print(\"points: \",len(points))\n #print(\"middle: \",middle)\n self.Rectangle[middle] = np.array(points[ middle - left ][0:6]).astype(float)\n \n # self.num saves the index number of mesh triangle\n self.num[middle] = points[middle - left ][6]\n \n self.ls[ middle ] = self.build(points[:middle- left] ,left , middle-1 , depth+1 )\n self.rs[ middle ] = self.build(points[middle-left+1:] ,middle+1, right , depth+1 )\n\n # after finished building son nodes, we need update father node's info \n self.pushup(middle)\n \n return middle\n\ndef kd_matching(s_k , Np , mesh_points , Ntriangles , mesh_vertices ):\n \n rectangles = get_rectangles(Np , mesh_points , Ntriangles , mesh_vertices)\n\n kdtree = KD_tree(Np , mesh_points , Ntriangles , mesh_vertices , rectangles)\n \n Ns = s_k.shape[0]\n \n closest_p = []\n for i in range(Ns):\n min_dist = np.finfo(np.float32).max\n tmp = kdtree.FindClosestPoint( s_k[i] )\n closest_p.append(tmp)\n return closest_p" ]
[ [ "numpy.array", "numpy.finfo", "numpy.linalg.norm", "numpy.zeros" ] ]
waitingkuo/tensorflow
[ "ce3572a08b9ecfa5c8dd94921c2011f37b58e608" ]
[ "tensorflow/contrib/bayesflow/python/kernel_tests/variational_inference_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for variational inference.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nsg = tf.contrib.bayesflow.stochastic_graph\nvi = tf.contrib.bayesflow.variational_inference\ndistributions = tf.contrib.distributions\n\n\nclass NormalNoEntropy(distributions.Normal):\n\n def entropy(self):\n raise NotImplementedError(\"entropy not implemented\")\n\n\n# For mini-VAE\ndef inference_net(x, latent_size):\n return tf.contrib.layers.linear(x, latent_size)\n\n\ndef generative_net(z, data_size):\n return tf.contrib.layers.linear(z, data_size)\n\n\ndef mini_vae():\n x = [[-6., 3., 6.], [-8., 4., 8.]]\n prior = distributions.Normal(mu=0., sigma=1.)\n variational = sg.DistributionTensor(\n distributions.Normal, mu=inference_net(x, 1), sigma=1.)\n vi.register_prior(variational, prior)\n px = distributions.Normal(mu=generative_net(variational, 3), sigma=1.)\n log_likelihood = tf.reduce_sum(px.log_prob(x), 1)\n log_likelihood = tf.expand_dims(log_likelihood, -1)\n return x, prior, variational, px, log_likelihood\n\n\nclass VariationalInferenceTest(tf.test.TestCase):\n\n def testDefaultVariationalAndPrior(self):\n _, prior, variational, _, log_likelihood = mini_vae()\n elbo = vi.elbo(log_likelihood)\n expected_elbo = log_likelihood - tf.contrib.distributions.kl(\n variational.distribution, prior)\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(*sess.run([expected_elbo, elbo]))\n\n def testExplicitVariationalAndPrior(self):\n with self.test_session() as sess:\n _, _, variational, _, log_likelihood = mini_vae()\n prior = tf.contrib.distributions.Normal(mu=3., sigma=2.)\n elbo = vi.elbo(\n log_likelihood, variational_with_prior={variational: prior})\n expected_elbo = log_likelihood - tf.contrib.distributions.kl(\n variational.distribution, prior)\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(*sess.run([expected_elbo, elbo]))\n\n def testExplicitForms(self):\n _, prior, variational, _, log_likelihood = mini_vae()\n\n elbos = []\n forms = vi.ELBOForms\n for form in [forms.default, forms.analytic_kl, forms.sample,\n forms.analytic_entropy]:\n elbo = vi.elbo(\n log_likelihood=log_likelihood,\n variational_with_prior={variational: prior},\n form=form)\n elbos.append(elbo)\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n log_likelihood_shape = tf.shape(log_likelihood).eval()\n for elbo in elbos:\n elbo.eval()\n elbo_shape = tf.shape(elbo).eval()\n self.assertAllEqual(log_likelihood_shape, elbo_shape)\n self.assertEqual(elbo.dtype, log_likelihood.dtype)\n\n def testDefaultsSampleKLWithoutAnalyticKLOrEntropy(self):\n x = tf.constant([[-6., 3., 6.]])\n\n prior = distributions.Bernoulli(0.5)\n variational = sg.DistributionTensor(\n NormalNoEntropy, mu=inference_net(x, 1), sigma=1.)\n vi.register_prior(variational, prior)\n px = distributions.Normal(mu=generative_net(variational, 3), sigma=1.)\n log_likelihood = tf.reduce_sum(px.log_prob(x), 1)\n\n # No analytic KL available between prior and variational distributions.\n with self.assertRaisesRegexp(NotImplementedError, \"No KL\"):\n distributions.kl(variational.distribution, prior)\n\n elbo = vi.elbo(\n variational_with_prior={variational: prior},\n log_likelihood=log_likelihood)\n expected_elbo = log_likelihood + prior.log_prob(\n variational) - variational.distribution.log_prob(variational)\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(*sess.run([expected_elbo, elbo]))\n\n def testElboWithLogJoint(self):\n with self.test_session() as sess:\n _, prior, variational, _, log_likelihood = mini_vae()\n log_joint = log_likelihood + prior.log_prob(variational)\n elbo = vi.elbo_with_log_joint(log_joint)\n sess.run(tf.initialize_all_variables())\n elbo.eval()\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.initialize_all_variables", "tensorflow.shape", "tensorflow.contrib.distributions.kl", "tensorflow.expand_dims", "tensorflow.contrib.distributions.Normal", "tensorflow.constant", "tensorflow.contrib.layers.linear", "tensorflow.test.main" ] ]
iyerr3/sagemaker-python-sdk
[ "cfaa2c6aabb3860e722bf68b27e0f9c3b8fc5570" ]
[ "tests/integ/test_tf_script_mode.py" ]
[ "# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport os\nimport time\n\nimport pytest\n\nimport boto3\nfrom sagemaker.tensorflow import TensorFlow\nfrom six.moves.urllib.parse import urlparse\nfrom sagemaker.utils import unique_name_from_base\n\nimport tests.integ\nfrom tests.integ import timeout\n\nROLE = \"SageMakerRole\"\n\nRESOURCE_PATH = os.path.join(os.path.dirname(__file__), \"..\", \"data\")\nMNIST_RESOURCE_PATH = os.path.join(RESOURCE_PATH, \"tensorflow_mnist\")\nTFS_RESOURCE_PATH = os.path.join(RESOURCE_PATH, \"tfs\", \"tfs-test-entrypoint-with-handler\")\n\nSCRIPT = os.path.join(MNIST_RESOURCE_PATH, \"mnist.py\")\nPARAMETER_SERVER_DISTRIBUTION = {\"parameter_server\": {\"enabled\": True}}\nMPI_DISTRIBUTION = {\"mpi\": {\"enabled\": True}}\nTAGS = [{\"Key\": \"some-key\", \"Value\": \"some-value\"}]\n\n\[email protected](scope=\"session\", params=[\"ml.c4.xlarge\"])\ndef instance_type(request):\n return request.param\n\n\ndef test_mnist(sagemaker_session, instance_type):\n estimator = TensorFlow(\n entry_point=SCRIPT,\n role=\"SageMakerRole\",\n train_instance_count=1,\n train_instance_type=instance_type,\n sagemaker_session=sagemaker_session,\n script_mode=True,\n framework_version=TensorFlow.LATEST_VERSION,\n py_version=tests.integ.PYTHON_VERSION,\n metric_definitions=[{\"Name\": \"train:global_steps\", \"Regex\": r\"global_step\\/sec:\\s(.*)\"}],\n )\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/mnist\"\n )\n\n with tests.integ.timeout.timeout(minutes=tests.integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):\n estimator.fit(inputs=inputs, job_name=unique_name_from_base(\"test-tf-sm-mnist\"))\n _assert_s3_files_exist(\n estimator.model_dir, [\"graph.pbtxt\", \"model.ckpt-0.index\", \"model.ckpt-0.meta\"]\n )\n df = estimator.training_job_analytics.dataframe()\n assert df.size > 0\n\n\ndef test_server_side_encryption(sagemaker_session):\n boto_session = sagemaker_session.boto_session\n with tests.integ.kms_utils.bucket_with_encryption(boto_session, ROLE) as (\n bucket_with_kms,\n kms_key,\n ):\n output_path = os.path.join(\n bucket_with_kms, \"test-server-side-encryption\", time.strftime(\"%y%m%d-%H%M\")\n )\n\n estimator = TensorFlow(\n entry_point=SCRIPT,\n role=ROLE,\n train_instance_count=1,\n train_instance_type=\"ml.c5.xlarge\",\n sagemaker_session=sagemaker_session,\n script_mode=True,\n framework_version=TensorFlow.LATEST_VERSION,\n py_version=tests.integ.PYTHON_VERSION,\n code_location=output_path,\n output_path=output_path,\n model_dir=\"/opt/ml/model\",\n output_kms_key=kms_key,\n )\n\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/mnist\"\n )\n\n with tests.integ.timeout.timeout(minutes=tests.integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):\n estimator.fit(\n inputs=inputs, job_name=unique_name_from_base(\"test-server-side-encryption\")\n )\n\n\[email protected]_quick\ndef test_mnist_distributed(sagemaker_session, instance_type):\n estimator = TensorFlow(\n entry_point=SCRIPT,\n role=ROLE,\n train_instance_count=2,\n train_instance_type=instance_type,\n sagemaker_session=sagemaker_session,\n py_version=tests.integ.PYTHON_VERSION,\n script_mode=True,\n framework_version=TensorFlow.LATEST_VERSION,\n distributions=PARAMETER_SERVER_DISTRIBUTION,\n )\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/distributed_mnist\"\n )\n\n with tests.integ.timeout.timeout(minutes=tests.integ.TRAINING_DEFAULT_TIMEOUT_MINUTES):\n estimator.fit(inputs=inputs, job_name=unique_name_from_base(\"test-tf-sm-distributed\"))\n _assert_s3_files_exist(\n estimator.model_dir, [\"graph.pbtxt\", \"model.ckpt-0.index\", \"model.ckpt-0.meta\"]\n )\n\n\ndef test_mnist_async(sagemaker_session):\n estimator = TensorFlow(\n entry_point=SCRIPT,\n role=ROLE,\n train_instance_count=1,\n train_instance_type=\"ml.c5.4xlarge\",\n py_version=tests.integ.PYTHON_VERSION,\n sagemaker_session=sagemaker_session,\n script_mode=True,\n framework_version=TensorFlow.LATEST_VERSION,\n tags=TAGS,\n )\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(MNIST_RESOURCE_PATH, \"data\"), key_prefix=\"scriptmode/mnist\"\n )\n estimator.fit(inputs=inputs, wait=False, job_name=unique_name_from_base(\"test-tf-sm-async\"))\n training_job_name = estimator.latest_training_job.name\n time.sleep(20)\n endpoint_name = training_job_name\n _assert_training_job_tags_match(\n sagemaker_session.sagemaker_client, estimator.latest_training_job.name, TAGS\n )\n with tests.integ.timeout.timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):\n estimator = TensorFlow.attach(\n training_job_name=training_job_name, sagemaker_session=sagemaker_session\n )\n model_name = \"model-mnist-async\"\n predictor = estimator.deploy(\n initial_instance_count=1,\n instance_type=\"ml.c4.xlarge\",\n endpoint_name=endpoint_name,\n model_name=model_name,\n )\n\n result = predictor.predict(np.zeros(784))\n print(\"predict result: {}\".format(result))\n _assert_endpoint_tags_match(sagemaker_session.sagemaker_client, predictor.endpoint, TAGS)\n _assert_model_tags_match(sagemaker_session.sagemaker_client, model_name, TAGS)\n _assert_model_name_match(sagemaker_session.sagemaker_client, endpoint_name, model_name)\n\n\ndef test_deploy_with_input_handlers(sagemaker_session, instance_type):\n estimator = TensorFlow(\n entry_point=\"training.py\",\n source_dir=TFS_RESOURCE_PATH,\n role=ROLE,\n train_instance_count=1,\n train_instance_type=instance_type,\n py_version=tests.integ.PYTHON_VERSION,\n sagemaker_session=sagemaker_session,\n script_mode=True,\n framework_version=TensorFlow.LATEST_VERSION,\n tags=TAGS,\n )\n\n estimator.fit(job_name=unique_name_from_base(\"test-tf-tfs-deploy\"))\n\n endpoint_name = estimator.latest_training_job.name\n\n with timeout.timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):\n predictor = estimator.deploy(\n initial_instance_count=1,\n instance_type=instance_type,\n endpoint_name=endpoint_name,\n entry_point=os.path.join(TFS_RESOURCE_PATH, \"inference.py\"),\n )\n\n input_data = {\"instances\": [1.0, 2.0, 5.0]}\n expected_result = {\"predictions\": [4.0, 4.5, 6.0]}\n\n result = predictor.predict(input_data)\n assert expected_result == result\n\n\ndef _assert_s3_files_exist(s3_url, files):\n parsed_url = urlparse(s3_url)\n s3 = boto3.client(\"s3\")\n contents = s3.list_objects_v2(Bucket=parsed_url.netloc, Prefix=parsed_url.path.lstrip(\"/\"))[\n \"Contents\"\n ]\n for f in files:\n found = [x[\"Key\"] for x in contents if x[\"Key\"].endswith(f)]\n if not found:\n raise ValueError(\"File {} is not found under {}\".format(f, s3_url))\n\n\ndef _assert_tags_match(sagemaker_client, resource_arn, tags, retries=15):\n actual_tags = None\n for _ in range(retries):\n actual_tags = sagemaker_client.list_tags(ResourceArn=resource_arn)[\"Tags\"]\n if actual_tags:\n break\n else:\n # endpoint and training tags might take minutes to propagate. Sleeping.\n time.sleep(30)\n assert actual_tags == tags\n\n\ndef _assert_model_tags_match(sagemaker_client, model_name, tags):\n model_description = sagemaker_client.describe_model(ModelName=model_name)\n _assert_tags_match(sagemaker_client, model_description[\"ModelArn\"], tags)\n\n\ndef _assert_endpoint_tags_match(sagemaker_client, endpoint_name, tags):\n endpoint_description = sagemaker_client.describe_endpoint(EndpointName=endpoint_name)\n\n _assert_tags_match(sagemaker_client, endpoint_description[\"EndpointArn\"], tags)\n\n\ndef _assert_training_job_tags_match(sagemaker_client, training_job_name, tags):\n training_job_description = sagemaker_client.describe_training_job(\n TrainingJobName=training_job_name\n )\n _assert_tags_match(sagemaker_client, training_job_description[\"TrainingJobArn\"], tags)\n\n\ndef _assert_model_name_match(sagemaker_client, endpoint_config_name, model_name):\n endpoint_config_description = sagemaker_client.describe_endpoint_config(\n EndpointConfigName=endpoint_config_name\n )\n assert model_name == endpoint_config_description[\"ProductionVariants\"][0][\"ModelName\"]\n" ]
[ [ "numpy.zeros" ] ]
lsst-uk/macauff
[ "02ce5caeaa1523957f914155dd433c7d1bf65869" ]
[ "macauff/tests/test_perturbation_auf.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE\n'''\nTests for the \"perturbation_auf\" module.\n'''\n\nimport pytest\nimport os\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom scipy.special import j0, j1\n\nfrom ..matching import CrossMatch\nfrom ..misc_functions_fortran import misc_functions_fortran as mff\nfrom ..perturbation_auf import (make_perturb_aufs, download_trilegal_simulation)\nfrom ..perturbation_auf_fortran import perturbation_auf_fortran as paf\n\nfrom .test_matching import _replace_line\n\n\nclass TestCreatePerturbAUF:\n def setup_class(self):\n os.makedirs('gaia_auf_folder', exist_ok=True)\n os.makedirs('wise_auf_folder', exist_ok=True)\n self.cm = CrossMatch(os.path.join(os.path.dirname(__file__), 'data/crossmatch_params.txt'),\n os.path.join(os.path.dirname(__file__), 'data/cat_a_params.txt'),\n os.path.join(os.path.dirname(__file__), 'data/cat_b_params.txt'))\n self.cm.a_auf_region_points = np.array([[0, 0], [50, 50]], dtype=float)\n self.cm.b_auf_region_points = np.array([[0, 0], [50, 50]], dtype=float)\n self.cm.mem_chunk_num = 4\n self.files_per_auf_sim = 7\n\n def test_no_perturb_outputs(self):\n # Randomly generate two catalogues (x3 files) between coordinates\n # 0, 0 and 50, 50.\n rng = np.random.default_rng()\n for path, Nf, size in zip([self.cm.a_cat_folder_path, self.cm.b_cat_folder_path], [3, 4],\n [25, 54]):\n cat = np.zeros((size, 3), float)\n rand_inds = rng.permutation(cat.shape[0])[:size // 2 - 1]\n cat[rand_inds, 0] = 50\n cat[rand_inds, 1] = 50\n cat += rng.uniform(-0.1, 0.1, cat.shape)\n np.save('{}/con_cat_astro.npy'.format(path), cat)\n\n cat = rng.uniform(10, 20, (size, Nf))\n np.save('{}/con_cat_photo.npy'.format(path), cat)\n\n cat = rng.choice(Nf, size=(size,))\n np.save('{}/magref.npy'.format(path), cat)\n\n self.cm.include_perturb_auf = False\n self.cm.run_auf = True\n self.cm.create_perturb_auf(self.files_per_auf_sim)\n lenr = len(self.cm.r)\n lenrho = len(self.cm.rho)\n for coord in ['0.0', '50.0']:\n for filt in ['W1', 'W2', 'W3', 'W4']:\n path = '{}/{}/{}/{}'.format(self.cm.b_auf_folder_path, coord, coord, filt)\n for filename, shape in zip(['frac', 'flux', 'offset', 'cumulative', 'fourier',\n 'N', 'mag'],\n [(1, 1), (1,), (lenr-1, 1), (lenr-1, 1), (lenrho-1, 1),\n (1, 1), (1, 1)]):\n assert os.path.isfile('{}/{}.npy'.format(path, filename))\n file = np.load('{}/{}.npy'.format(path, filename))\n assert np.all(file.shape == shape)\n assert np.all(np.load('{}/frac.npy'.format(path)) == 0)\n assert np.all(np.load('{}/cumulative.npy'.format(path)) == 1)\n assert np.all(np.load('{}/fourier.npy'.format(path)) == 1)\n assert np.all(np.load('{}/mag.npy'.format(path)) == 1)\n file = np.load('{}/offset.npy'.format(path))\n assert np.all(file[1:] == 0)\n assert file[0] == 1/(2 * np.pi * (self.cm.r[0] + self.cm.dr[0]/2) * self.cm.dr[0])\n\n file = np.load('{}/modelrefinds.npy'.format(self.cm.a_auf_folder_path))\n assert np.all(file[0, :] == 0)\n assert np.all(file[1, :] == np.load('{}/magref.npy'.format(self.cm.a_cat_folder_path)))\n\n # Select AUF pointing index based on a 0 vs 50 cut in longitude.\n cat = np.load('{}/con_cat_astro.npy'.format(self.cm.a_cat_folder_path))\n inds = np.ones(file.shape[1], int)\n inds[np.where(cat[:, 0] < 1)[0]] = 0\n assert np.all(file[2, :] == inds)\n\n def test_run_auf_file_number(self):\n # Reset any saved files from the above tests\n os.system(\"rm -rf {}/*\".format(self.cm.a_auf_folder_path))\n os.system(\"rm -rf {}/*\".format(self.cm.b_auf_folder_path))\n self.cm.run_auf = False\n with pytest.warns(UserWarning, match='Incorrect number of files in catalogue \"a\"'):\n self.cm.create_perturb_auf(self.files_per_auf_sim)\n\n # Now create fake files to simulate catalogue \"a\" having the right files.\n # For 2 AUF pointings this comes to 8 + 2*N_filt*files_per_auf_sim files.\n os.system(\"rm -rf {}/*\".format(self.cm.a_auf_folder_path))\n for i in range(6 + 2 + 2 * 3 * self.files_per_auf_sim):\n np.save('{}/random_file_{}.npy'.format(self.cm.a_auf_folder_path, i), np.zeros(1))\n\n # This should still return the same warning, just for catalogue \"b\" now.\n with pytest.warns(UserWarning) as record:\n self.cm.create_perturb_auf(self.files_per_auf_sim)\n assert len(record) == 1\n assert 'Incorrect number of files in catalogue \"b\"' in record[0].message.args[0]\n\n @pytest.mark.filterwarnings(\"ignore:Incorrect number of files in\")\n def test_load_auf_print(self, capsys):\n # Reset any saved files from the above tests\n os.system(\"rm -rf {}/*\".format(self.cm.a_auf_folder_path))\n os.system(\"rm -rf {}/*\".format(self.cm.b_auf_folder_path))\n\n # Generate new dummy data for catalogue \"b\"'s AUF folder.\n for i in range(6 + 2 + 2 * 4 * self.files_per_auf_sim):\n np.save('{}/random_file_{}.npy'.format(self.cm.b_auf_folder_path, i), np.zeros(1))\n capsys.readouterr()\n # This test will create catalogue \"a\" files because of the wrong\n # number of files (zero) in the folder.\n self.cm.create_perturb_auf(self.files_per_auf_sim)\n output = capsys.readouterr().out\n assert 'Loading empirical perturbation AUFs for catalogue \"a\"' not in output\n assert 'Loading empirical perturbation AUFs for catalogue \"b\"' in output\n\n os.system(\"rm -rf {}/*\".format(self.cm.a_auf_folder_path))\n os.system(\"rm -rf {}/*\".format(self.cm.b_auf_folder_path))\n # Generate new dummy data for each catalogue's AUF folder.\n for path, fn in zip([self.cm.a_auf_folder_path, self.cm.b_auf_folder_path], [3, 4]):\n for i in range(6 + 2 + 2 * fn * self.files_per_auf_sim):\n np.save('{}/random_file_{}.npy'.format(path, i), np.zeros(1))\n capsys.readouterr()\n self.cm.create_perturb_auf(self.files_per_auf_sim)\n output = capsys.readouterr().out\n assert 'Loading empirical perturbation AUFs for catalogue \"a\"' in output\n assert 'Loading empirical perturbation AUFs for catalogue \"b\"' in output\n\n\ndef test_perturb_aufs():\n # Poisson distribution with mean 0.08 gives 92.3% zero, 7.4% one, and 0.3% two draws.\n mean = 0.08\n prob_0_draw = mean**0 * np.exp(-mean) / np.math.factorial(0)\n prob_1_draw = mean**1 * np.exp(-mean) / np.math.factorial(1)\n prob_2_draw = mean**2 * np.exp(-mean) / np.math.factorial(2)\n\n N = np.array([1.0])\n m = np.array([0.0])\n R = 1.185*6.1\n r = np.linspace(0, R, 1500)\n dr = np.diff(r)\n rho = np.linspace(0, 100, 10000)\n drho = np.diff(rho)\n j0s = mff.calc_j0(r[:-1]+dr/2, rho[:-1]+drho/2)\n\n model_count = 1.0\n num_trials = 100000\n mag_cut = np.array([5.0])\n model_mags = np.array([0])\n model_mags_interval = np.array([1.0e-8])\n\n log10y = np.log10(mean / model_mags_interval / np.pi / (R / 3600)**2)\n\n track_sp_hist = np.zeros(len(r)-1, float)\n track_pa_hist = np.zeros(len(r)-1, float)\n # Have to keep track of the uncertainty in the counts, to compare fairly\n # with expected Poissonian counting statistics scatter.\n track_pa_hist_err_sq = np.zeros(len(r)-1, float)\n\n track_pa_fourier = np.zeros(len(rho)-1, float)\n\n seed_size = paf.get_random_seed_size()\n rng = np.random.default_rng(seed=123124)\n\n # Limit the size of each simulation, but run many to aggregate\n # better counting statistics.\n num = 100\n for _ in range(num):\n seed = rng.choice(1000000, size=(seed_size, 1))\n offsets, fracs, fluxs = paf.scatter_perturbers(np.array([mean]), m, R, 5, mag_cut,\n model_mags_interval, num_trials, seed[:, 0])\n hist, _ = np.histogram(offsets, bins=r)\n assert_allclose(fracs[0], 1-prob_0_draw, rtol=0.05)\n assert_allclose(np.mean(fluxs), prob_0_draw*0+prob_1_draw*1+prob_2_draw*2, rtol=0.05)\n\n Frac, Flux, fourieroffset, offsets, cumulative = paf.perturb_aufs(\n N, m, r[:-1]+dr/2, dr, r, j0s,\n model_mags+model_mags_interval/2, model_mags_interval, log10y, model_count,\n np.array([1]), mag_cut, R, num_trials, seed)\n\n assert_allclose(Frac, 1-prob_0_draw, rtol=0.05)\n assert_allclose(Flux, prob_0_draw*0+prob_1_draw*1+prob_2_draw*2, rtol=0.05)\n\n track_sp_hist += hist / np.sum(hist) / (np.pi * (r[1:]**2 - r[:-1]**2)) / num\n\n track_pa_hist += offsets[:, 0] / num\n track_pa_hist_err_sq += (np.sqrt(\n offsets[:, 0] * np.sum(hist) * (np.pi * (r[1:]**2 - r[:-1]**2))) / (\n np.sum(hist) * (np.pi * (r[1:]**2 - r[:-1]**2))) / num)**2\n\n track_pa_fourier += fourieroffset[:, 0] / num\n\n # Here we assume that the direct and wrapper calls had the same seed for\n # each call, and identical results, and hence should basically agree perfectly.\n assert_allclose(track_pa_hist, track_sp_hist, atol=1e-6)\n\n offsets_fake = np.zeros_like(r[:-1])\n offsets_fake[0] += prob_0_draw / (np.pi * (r[1]**2 - r[0]**2))\n # Given that we force equal-brightness \"binaries\", we expect a maximum\n # perturbation of half the total radius.\n q = np.where(r[1:] > R/2)[0]\n offsets_fake[:q[0]] += prob_1_draw / np.pi / (R/2)**2\n final_bin_frac = (R/2 - r[q[0]]) / (r[q[0]+1] - r[q[0]])\n offsets_fake[q[0]] += final_bin_frac * prob_1_draw / np.pi / (R/2)**2\n\n track_pa_hist_err_sq[track_pa_hist_err_sq == 0] = 1e-8\n for i in range(len(r)-1):\n assert_allclose(track_pa_hist[i], offsets_fake[i], rtol=0.05,\n atol=1e-5 + 4 * np.sqrt(track_pa_hist_err_sq[i]))\n\n # Take the average fourier representation of the offsets and compare. The\n # first expression is the integral of the constant 1/(pi (R/2)^2) probability\n # for the randomly placed, single-draw perturbations.\n fake_fourier = (1-prob_0_draw) * 2 / (np.pi * R * (rho[:-1]+drho/2)) * j1(\n np.pi * R * (rho[:-1]+drho/2))\n # The second necessary half of the fourier representation should be a delta\n # function, which becomes the unity function, but since we do numerical\n # integrations, is in practice just the fraction of zero-draw \"perturbations\"\n # in an annulus of the first radial bin, Fourier-Bessel transformed. This\n # gives f(r) = P / (2 pi r dr), which then cancels everything except J0.\n fake_fourier += prob_0_draw * j0(2 * np.pi * (r[0]+dr[0]/2) * (rho[:-1]+drho/2))\n\n # Quickly verify fourier_transform by comparing the theoretical\n # real-space distribution, fourier transformed, to the theoretical\n # fourier transformation as well.\n another_fake_fourier = paf.fourier_transform(offsets_fake, r[:-1]+dr/2, dr, j0s)\n\n assert_allclose(fake_fourier, another_fake_fourier, rtol=5e-3)\n assert_allclose(fake_fourier, track_pa_fourier, rtol=5e-3)\n\n\ndef test_histogram():\n rng = np.random.default_rng(11111)\n x = rng.uniform(0, 1, 10000)\n bins = np.linspace(0, 1, 15)\n\n _, counts_f = paf.histogram1d_dp(x, bins[0], bins[-1], len(bins)-1,\n np.array([True] * (len(x))), np.ones_like(x))\n counts_p, _ = np.histogram(x, bins=bins)\n assert np.all(counts_f == counts_p)\n\n\ndef test_circle_area():\n rng = np.random.default_rng(123897123)\n R = 0.1\n\n x_edges = [0, 1]\n y_edges = [0, 1]\n\n # If circle is inside rectangle, get full area:\n done = 0\n while done < 10:\n [x, y] = rng.uniform(0, 1, size=2)\n if (x - R >= x_edges[0] and x + R <= x_edges[1] and\n y - R >= y_edges[0] and y + R <= y_edges[1]):\n calc_area = paf.get_circle_area_overlap([x], [y], R, x_edges[0], x_edges[1],\n y_edges[0], y_edges[1])\n assert_allclose(calc_area, np.pi * R**2)\n done += 1\n\n # Now, if the circle is exactly on the corners of the rectangle\n # we should have a quarter the area:\n for x, y in zip([0, 0, 1, 1], [0, 1, 0, 1]):\n calc_area = paf.get_circle_area_overlap([x], [y], R, x_edges[0], x_edges[1],\n y_edges[0], y_edges[1])\n assert_allclose(calc_area, np.pi * R**2 / 4)\n\n # In the middle of an edge we should have half the circle area:\n for x, y in zip([0, 0.5, 1, 0.5], [0.5, 0, 0.5, 1]):\n calc_area = paf.get_circle_area_overlap([x], [y], R, x_edges[0], x_edges[1],\n y_edges[0], y_edges[1])\n assert_allclose(calc_area, np.pi * R**2 / 2)\n\n # Verify a few randomly placed circles too:\n done = 0\n xp = np.linspace(*x_edges, 100)\n yp = np.linspace(*y_edges, 100)\n dx, dy = xp[1] - xp[0], yp[1] - yp[0]\n while done < 20:\n [x, y] = rng.uniform(0, 1, size=2)\n if np.any([x - R < x_edges[0], x + R > x_edges[1],\n y - R < y_edges[0], y + R > y_edges[1]]):\n calc_area = paf.get_circle_area_overlap([x], [y], R, x_edges[0], x_edges[1],\n y_edges[0], y_edges[1])\n manual_area = 0\n for i in range(len(xp)):\n for j in range(len(yp)):\n if np.sqrt((xp[i] - x)**2 + (yp[j] - y)**2) <= R:\n manual_area += dx*dy\n assert_allclose(calc_area, manual_area, rtol=0.05)\n done += 1\n\n\nclass TestMakePerturbAUFs():\n def setup_class(self):\n self.auf_folder = 'auf_folder'\n self.cat_folder = 'cat_folder'\n os.system('rm -r {}'.format(self.auf_folder))\n os.system('rm -r {}'.format(self.cat_folder))\n os.makedirs(self.auf_folder)\n os.makedirs(self.cat_folder)\n\n self.filters = np.array(['W1'])\n self.tri_filt_names = np.copy(self.filters)\n self.auf_points = np.array([[0.0, 0.0]])\n self.ax_lims = np.array([0, 1, 0, 1])\n\n self.psf_fwhms = np.array([6.1])\n self.r = np.linspace(0, 1.185 * self.psf_fwhms[0], 2500)\n self.dr = np.diff(self.r)\n self.rho = np.linspace(0, 100, 10000)\n self.drho = np.diff(self.rho)\n self.which_cat = 'b'\n self.include_perturb_auf = True\n self.num_trials = 100000\n\n self.mem_chunk_num = 1\n self.delta_mag_cuts = np.array([2.5, 5])\n\n self.args = [self.auf_folder, self.cat_folder, self.filters, self.auf_points,\n self.r, self.dr, self.rho, self.drho, self.which_cat,\n self.include_perturb_auf, self.mem_chunk_num]\n\n self.files_per_auf_sim = 7\n\n def test_raise_value_errors(self):\n with pytest.raises(ValueError, match='tri_set_name must be given if include_perturb_auf ' +\n 'and tri_download_flag are both True'):\n make_perturb_aufs(*self.args, tri_download_flag=True)\n with pytest.raises(ValueError, match='tri_filt_num must be given if include_perturb_auf ' +\n 'and tri_download_flag are both True'):\n make_perturb_aufs(*self.args, tri_download_flag=True, tri_set_name='WISE')\n with pytest.raises(ValueError, match='auf_region_frame must be given if ' +\n 'include_perturb_auf and tri_download_flag are both True'):\n make_perturb_aufs(*self.args, tri_download_flag=True, tri_set_name='WISE',\n tri_filt_num=1)\n\n with pytest.raises(ValueError, match='tri_filt_names must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args)\n with pytest.raises(ValueError, match='delta_mag_cuts must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1)\n with pytest.raises(ValueError, match='psf_fwhms must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1)\n with pytest.raises(ValueError, match='num_trials must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1, psf_fwhms=1)\n with pytest.raises(ValueError, match='j0s must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1, psf_fwhms=1,\n num_trials=1)\n with pytest.raises(ValueError, match='density_mags must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1, psf_fwhms=1,\n num_trials=1, j0s=1)\n with pytest.raises(ValueError, match='dm_max must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1, psf_fwhms=1,\n num_trials=1, j0s=1, density_mags=1)\n with pytest.raises(ValueError, match='d_mag must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1, psf_fwhms=1,\n num_trials=1, j0s=1, density_mags=1, dm_max=1)\n with pytest.raises(ValueError, match='compute_local_density must be given if ' +\n 'include_perturb_auf is True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1, psf_fwhms=1,\n num_trials=1, j0s=1, density_mags=1, dm_max=1, d_mag=1)\n with pytest.raises(ValueError, match='density_radius must be given if ' +\n 'include_perturb_auf and compute_local_density are both True'):\n make_perturb_aufs(*self.args, tri_filt_names=1, delta_mag_cuts=1,\n compute_local_density=True, psf_fwhms=1, num_trials=1, j0s=1,\n density_mags=1, dm_max=1, d_mag=1)\n\n def test_without_compute_local_density(self):\n # Number of sources per PSF circle, on average, solved backwards to ensure\n # that local density ends up exactly in the middle of a count_array bin.\n # This should be approximately 0.076 sources per PSF circle.\n psf_mean = np.exp(8.7) * np.pi * (1.185 * self.psf_fwhms[0] / 3600)**2\n # Local density is the controllable variable to ensure that we get\n # the expected sources per PSF circle, with most variables cancelling\n # mean divided by circle area sets the density needed.\n local_dens = psf_mean / (np.pi * (1.185 * self.psf_fwhms[0] / 3600)**2)\n np.save('{}/local_N.npy'.format(self.auf_folder), np.array([[local_dens]]))\n\n np.save('{}/con_cat_astro.npy'.format(self.cat_folder), np.array([[0.3, 0.3, 0.1]]))\n np.save('{}/con_cat_photo.npy'.format(self.cat_folder), np.array([[14.99]]))\n np.save('{}/magref.npy'.format(self.cat_folder), np.array([0]))\n\n num_trials = 100000\n j0s = mff.calc_j0(self.rho[:-1]+self.drho/2, self.r[:-1]+self.dr/2)\n cutoff_mags = np.array([20])\n dm_max = np.array([10])\n d_mag = 0.1\n\n # Fake up a TRILEGAL simulation data file. Need to paste the same source\n # four times to pass a check for more than three sources in a histogram.\n text = ('#area = 4.0 sq deg\\n#Gc logAge [M/H] m_ini logL logTe logg m-M0 Av ' +\n 'm2/m1 mbol J H Ks IRAC_3.6 IRAC_4.5 IRAC_5.8 IRAC_8.0 MIPS_24 ' +\n 'MIPS_70 MIPS_160 W1 W2 W3 W4 Mact\\n 1 6.65 -0.39 0.02415 ' +\n '-2.701 3.397 4.057 14.00 8.354 0.00 25.523 25.839 24.409 23.524 22.583 ' +\n '22.387 22.292 22.015 21.144 19.380 20.878 15.001 22.391 21.637 21.342 0.024\\n ' +\n '1 6.65 -0.39 0.02415 -2.701 3.397 4.057 14.00 8.354 0.00 25.523 25.839 ' +\n '24.409 23.524 22.583 22.387 22.292 22.015 21.144 19.380 20.878 15.002 22.391 ' +\n '21.637 21.342 0.024\\n 1 6.65 -0.39 0.02415 -2.701 3.397 4.057 14.00 ' +\n '8.354 0.00 25.523 25.839 24.409 23.524 22.583 22.387 22.292 22.015 21.144 ' +\n '19.380 20.878 15.003 22.391 21.637 21.342 0.024\\n 1 6.65 -0.39 0.02415 ' +\n '-2.701 3.397 4.057 14.00 8.354 0.00 25.523 25.839 24.409 23.524 22.583 ' +\n '22.387 22.292 22.015 21.144 19.380 20.878 15.004 22.391 21.637 21.342 0.024')\n os.makedirs('{}/{}/{}'.format(\n self.auf_folder, self.auf_points[0][0], self.auf_points[0][1]), exist_ok=True)\n with open('{}/{}/{}/trilegal_auf_simulation.dat'.format(\n self.auf_folder, self.auf_points[0][0], self.auf_points[0][1]), \"w\") as f:\n f.write(text)\n\n prob_0_draw = psf_mean**0 * np.exp(-psf_mean) / np.math.factorial(0)\n prob_1_draw = psf_mean**1 * np.exp(-psf_mean) / np.math.factorial(1)\n prob_2_draw = psf_mean**2 * np.exp(-psf_mean) / np.math.factorial(2)\n\n ax1, ax2 = self.auf_points[0]\n\n keep_frac = np.zeros((len(self.delta_mag_cuts), 1), float)\n keep_flux = np.zeros((1,), float)\n track_fourier = np.zeros(len(self.rho)-1, float)\n\n # Catalogue bins for the source:\n a_photo = np.load('{}/con_cat_photo.npy'.format(self.cat_folder))\n dmag = 0.25\n mag_min = dmag * np.floor(np.amin(a_photo)/dmag)\n mag_max = dmag * np.ceil(np.amax(a_photo)/dmag)\n mag_bins = np.arange(mag_min, mag_max+1e-10, dmag)\n mag_bin = 0.5 * (mag_bins[1:]+mag_bins[:-1])\n # Model magnitude bins:\n tri_mags = np.array([15.001, 15.002, 15.003, 15.004])\n minmag = d_mag * np.floor(np.amin(tri_mags)/d_mag)\n maxmag = d_mag * np.ceil(np.amax(tri_mags)/d_mag)\n mod_bins = np.arange(minmag, maxmag+1e-10, d_mag)\n mod_bin = mod_bins[:-1] + np.diff(mod_bins)/2\n mag_offset = mod_bin - mag_bin\n rel_flux = 10**(-1/2.5 * mag_offset)\n\n N = 25\n for i in range(N):\n make_perturb_aufs(*self.args, psf_fwhms=self.psf_fwhms, num_trials=num_trials, j0s=j0s,\n density_mags=cutoff_mags, dm_max=dm_max, d_mag=d_mag,\n delta_mag_cuts=self.delta_mag_cuts, compute_local_density=False,\n tri_filt_names=self.tri_filt_names)\n\n if i == 0:\n for name, size in zip(\n ['frac', 'flux', 'offset', 'cumulative', 'fourier', 'N', 'mag'],\n [(len(self.delta_mag_cuts), 1), (1,), (len(self.r)-1, 1),\n (len(self.r)-1, 1), (len(self.rho)-1, 1), (1,), (1,)]):\n var = np.load('{}/{}/{}/{}/{}.npy'.format(\n self.auf_folder, ax1, ax2, self.filters[0], name))\n assert np.all(var.shape == size)\n\n fracs = np.load('{}/{}/{}/{}/frac.npy'.format(\n self.auf_folder, ax1, ax2, self.filters[0]))\n fluxs = np.load('{}/{}/{}/{}/flux.npy'.format(\n self.auf_folder, ax1, ax2, self.filters[0]))\n fourier = np.load('{}/{}/{}/{}/fourier.npy'.format(\n self.auf_folder, ax1, ax2, self.filters[0]))\n\n keep_frac += fracs / N\n keep_flux += fluxs / N\n track_fourier += fourier[:, 0] / N\n\n # Have more relaxed conditions on assertion than in test_perturb_aufs\n # above, as we can't arbitrarily force the magnitude bin widths to be\n # very small, and hence have a blur on relative fluxes allowed.\n assert_allclose(keep_frac[0, 0], 1-prob_0_draw, rtol=0.1)\n assert_allclose(keep_flux[0], (prob_0_draw*0 + prob_1_draw*rel_flux +\n prob_2_draw*2*rel_flux), rtol=0.1)\n\n R = 1.185 * self.psf_fwhms[0]\n small_R = R * rel_flux / (1 + rel_flux)\n\n fake_fourier = (1-prob_0_draw) / (np.pi * small_R * (self.rho[:-1]+self.drho/2)) * j1(\n 2 * np.pi * small_R * (self.rho[:-1]+self.drho/2))\n fake_fourier += prob_0_draw * j0(2 * np.pi * (self.r[0]+self.dr[0]/2) *\n (self.rho[:-1]+self.drho/2))\n\n assert_allclose(fake_fourier, track_fourier, rtol=0.05)\n\n def test_with_compute_local_density(self):\n # Number of sources per PSF circle, on average, solved backwards to ensure\n # that local density ends up exactly in the middle of a count_array bin.\n # This should be approximately 0.076 sources per PSF circle.\n psf_mean = np.exp(8.7) * np.pi * (1.185 * self.psf_fwhms[0] / 3600)**2\n # This time we want to calculate the local density on the fly, but still\n # get the same value we did in the without compute local density test. We\n # therefore have to choose our \"density radius\" to set the appropriate\n # local density for our single source.\n density_mags = np.array([20])\n density_radius = np.sqrt(1 / np.pi / np.exp(8.7))\n\n new_auf_points = np.vstack((self.auf_points, np.array([[10, 10]])))\n\n # Have to fudge extra sources to keep our 15th mag source in the local\n # density cutout.\n np.save('{}/con_cat_astro.npy'.format(self.cat_folder),\n np.array([[0.3, 0.3, 0.1], [0.1, 0.1, 0.1], [0.9, 0.9, 0.1]]))\n np.save('{}/con_cat_photo.npy'.format(self.cat_folder), np.array([[14.99], [10], [10]]))\n np.save('{}/magref.npy'.format(self.cat_folder), np.array([0, 0, 0]))\n\n # Fake up a TRILEGAL simulation data file. Need to paste the same source\n # four times to pass a check for more than three sources in a histogram.\n text = ('#area = 4.0 sq deg\\n#Gc logAge [M/H] m_ini logL logTe logg m-M0 Av ' +\n 'm2/m1 mbol J H Ks IRAC_3.6 IRAC_4.5 IRAC_5.8 IRAC_8.0 MIPS_24 ' +\n 'MIPS_70 MIPS_160 W1 W2 W3 W4 Mact\\n 1 6.65 -0.39 0.02415 ' +\n '-2.701 3.397 4.057 14.00 8.354 0.00 25.523 25.839 24.409 23.524 22.583 ' +\n '22.387 22.292 22.015 21.144 19.380 20.878 15.001 22.391 21.637 21.342 0.024\\n ' +\n '1 6.65 -0.39 0.02415 -2.701 3.397 4.057 14.00 8.354 0.00 25.523 25.839 ' +\n '24.409 23.524 22.583 22.387 22.292 22.015 21.144 19.380 20.878 15.002 22.391 ' +\n '21.637 21.342 0.024\\n 1 6.65 -0.39 0.02415 -2.701 3.397 4.057 14.00 ' +\n '8.354 0.00 25.523 25.839 24.409 23.524 22.583 22.387 22.292 22.015 21.144 ' +\n '19.380 20.878 15.003 22.391 21.637 21.342 0.024\\n 1 6.65 -0.39 0.02415 ' +\n '-2.701 3.397 4.057 14.00 8.354 0.00 25.523 25.839 24.409 23.524 22.583 ' +\n '22.387 22.292 22.015 21.144 19.380 20.878 15.004 22.391 21.637 21.342 0.024')\n for i in range(len(new_auf_points)):\n os.makedirs('{}/{}/{}'.format(\n self.auf_folder, new_auf_points[i][0], new_auf_points[i][1]), exist_ok=True)\n with open('{}/{}/{}/trilegal_auf_simulation.dat'.format(\n self.auf_folder, new_auf_points[i][0], new_auf_points[i][1]), \"w\") as f:\n f.write(text)\n\n prob_0_draw = psf_mean**0 * np.exp(-psf_mean) / np.math.factorial(0)\n prob_1_draw = psf_mean**1 * np.exp(-psf_mean) / np.math.factorial(1)\n prob_2_draw = psf_mean**2 * np.exp(-psf_mean) / np.math.factorial(2)\n\n ax1, ax2 = self.auf_points[0]\n\n d_mag = 0.1\n # Catalogue bins for the source, only keeping the single\n # source currently under consideration, and ignoring the two extra objects\n # used to force the local density to be calculated properly.\n a_photo = np.load('{}/con_cat_photo.npy'.format(self.cat_folder))[0, :]\n dmag = 0.25\n mag_min = dmag * np.floor(np.amin(a_photo)/dmag)\n mag_max = dmag * np.ceil(np.amax(a_photo)/dmag)\n mag_bins = np.arange(mag_min, mag_max+1e-10, dmag)\n mag_bin = 0.5 * (mag_bins[1:]+mag_bins[:-1])\n # Model magnitude bins:\n tri_mags = np.array([15.001, 15.002, 15.003, 15.004])\n minmag = d_mag * np.floor(np.amin(tri_mags)/d_mag)\n maxmag = d_mag * np.ceil(np.amax(tri_mags)/d_mag)\n mod_bins = np.arange(minmag, maxmag+1e-10, d_mag)\n mod_bin = mod_bins[:-1] + np.diff(mod_bins)/2\n mag_offset = mod_bin - mag_bin\n rel_flux = 10**(-1/2.5 * mag_offset)\n\n ol, nl = 'include_perturb_auf = no', 'include_perturb_auf = yes\\n'\n f = open(os.path.join(os.path.dirname(__file__),\n 'data/crossmatch_params.txt')).readlines()\n idx = np.where([ol in line for line in f])[0][0]\n _replace_line(os.path.join(os.path.dirname(__file__), 'data/crossmatch_params.txt'),\n idx, nl, out_file=os.path.join(os.path.dirname(__file__),\n 'data/crossmatch_params_.txt'))\n\n ol, nl = 'filt_names = G_BP G G_RP', 'filt_names = G\\n'\n f = open(os.path.join(os.path.dirname(__file__),\n 'data/cat_a_params.txt')).readlines()\n idx = np.where([ol in line for line in f])[0][0]\n _replace_line(os.path.join(os.path.dirname(__file__), 'data/cat_a_params.txt'),\n idx, nl, out_file=os.path.join(os.path.dirname(__file__),\n 'data/cat_a_params_.txt'))\n for ol, nl in zip(['psf_fwhms = 0.12 0.12 0.12', 'cat_folder_path = gaia_folder',\n 'auf_folder_path = gaia_auf_folder', 'tri_filt_names = G_BP G G_RP',\n 'dens_mags = 20 20 20'],\n ['psf_fwhms = 0.12\\n', 'cat_folder_path = cat_folder\\n',\n 'auf_folder_path = auf_folder\\n', 'tri_filt_names = W1\\n',\n 'dens_mags = 20\\n']):\n f = open(os.path.join(os.path.dirname(__file__),\n 'data/cat_a_params.txt')).readlines()\n idx = np.where([ol in line for line in f])[0][0]\n _replace_line(os.path.join(os.path.dirname(__file__), 'data/cat_a_params_.txt'),\n idx, nl)\n\n ol, nl = 'filt_names = W1 W2 W3 W4', 'filt_names = W1\\n'\n f = open(os.path.join(os.path.dirname(__file__),\n 'data/cat_b_params.txt')).readlines()\n idx = np.where([ol in line for line in f])[0][0]\n _replace_line(os.path.join(os.path.dirname(__file__), 'data/cat_b_params.txt'),\n idx, nl, out_file=os.path.join(os.path.dirname(__file__),\n 'data/cat_b_params_.txt'))\n for ol, nl in zip(['psf_fwhms = 6.08 6.84 7.36 11.99', 'cat_folder_path = wise_folder',\n 'auf_folder_path = wise_auf_folder', 'tri_filt_names = W1 W2 W3 W4',\n 'dens_mags = 20 20 20 20'],\n ['psf_fwhms = 6.08\\n', 'cat_folder_path = cat_folder\\n',\n 'auf_folder_path = auf_folder\\n', 'tri_filt_names = W1\\n',\n 'dens_mags = 20\\n']):\n f = open(os.path.join(os.path.dirname(__file__),\n 'data/cat_b_params.txt')).readlines()\n idx = np.where([ol in line for line in f])[0][0]\n _replace_line(os.path.join(os.path.dirname(__file__), 'data/cat_b_params_.txt'),\n idx, nl)\n\n cm = CrossMatch(os.path.join(os.path.dirname(__file__),\n 'data/crossmatch_params_.txt'),\n os.path.join(os.path.dirname(__file__), 'data/cat_a_params_.txt'),\n os.path.join(os.path.dirname(__file__), 'data/cat_b_params_.txt'))\n\n cm.a_auf_region_points = new_auf_points\n cm.b_auf_region_points = new_auf_points\n cm.cross_match_extent = self.ax_lims\n cm.a_dens_mags = density_mags\n cm.b_dens_mags = density_mags\n cm.a_dens_dist = density_radius\n cm.b_dens_dist = density_radius\n cm.compute_local_density = True\n cm.run_auf = True\n cm.run_group = True\n cm.run_cf = True\n cm.run_source = True\n cm.num_trials = self.num_trials\n\n cm.create_perturb_auf(self.files_per_auf_sim)\n\n fracs = np.load('{}/{}/{}/{}/frac.npy'.format(\n self.auf_folder, ax1, ax2, self.filters[0]))\n fluxs = np.load('{}/{}/{}/{}/flux.npy'.format(\n self.auf_folder, ax1, ax2, self.filters[0]))\n fourier = np.load('{}/{}/{}/{}/fourier.npy'.format(\n self.auf_folder, ax1, ax2, self.filters[0]))\n\n assert_allclose(fracs[0, 0], 1-prob_0_draw, rtol=0.1)\n assert_allclose(fluxs[0], (prob_0_draw*0 + prob_1_draw*rel_flux +\n prob_2_draw*2*rel_flux), rtol=0.1)\n\n R = 1.185 * self.psf_fwhms[0]\n small_R = R * rel_flux / (1 + rel_flux)\n\n fake_fourier = (1-prob_0_draw) / (np.pi * small_R * (cm.rho[:-1]+cm.drho/2)) * j1(\n 2 * np.pi * small_R * (cm.rho[:-1]+cm.drho/2))\n fake_fourier += prob_0_draw * j0(2 * np.pi * (cm.r[0]+cm.dr[0]/2) *\n (cm.rho[:-1]+cm.drho/2))\n\n assert_allclose(fake_fourier, fourier[:, 0], rtol=0.05)\n\n\[email protected]_data\ndef test_trilegal_download():\n tri_folder = '.'\n download_trilegal_simulation(tri_folder, 'gaiaDR2', 180, 20, 1, 'galactic', total_objs=10000)\n tri_name = 'trilegal_auf_simulation'\n f = open('{}/{}.dat'.format(tri_folder, tri_name), \"r\")\n line = f.readline()\n f.close()\n bits = line.split(' ')\n tri_area = float(bits[2])\n tri = np.genfromtxt('{}/{}.dat'.format(tri_folder, tri_name), delimiter=None,\n names=True, comments='#', skip_header=1)\n assert np.all(tri[:]['G'] <= 32)\n assert tri_area <= 10\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.diff", "numpy.histogram", "scipy.special.j1", "scipy.special.j0", "numpy.any", "numpy.ones_like", "numpy.copy", "numpy.amax", "numpy.log10", "numpy.where", "numpy.linspace", "numpy.mean", "numpy.sqrt", "numpy.zeros", "numpy.arange", "numpy.all", "numpy.zeros_like", "numpy.random.default_rng", "numpy.exp", "numpy.amin", "numpy.testing.assert_allclose", "numpy.array", "numpy.math.factorial" ] ]
cehanagan/pylith
[ "ac2c1587f87e45c948638b19560813d4d5b6a9e3" ]
[ "tests/fullscale/viscoelasticity/nofaults-3d/axialstrainrate_genmaxwell_soln.py" ]
[ "# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University at Buffalo\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2021 University of California, Davis\n#\n# See LICENSE.md for license information.\n#\n# ----------------------------------------------------------------------\n#\n# @file tests/fullscale/viscoelasticity/nofaults-3d/axialstrainrate_genmaxwell_soln.py\n#\n# @brief Analytical solution to axial strain rate relaxation problem for a\n# generalized Maxwell viscoelastic material.\n#\n# 3-D axial strain rate solution for linear generalized Maxwell viscoelastic material.\n#\n# Uz = U0 + V0*t\n# ----------\n# | |\n# Ux=0 | | Ux=0\n# | |\n# | |\n# ----------\n# Uz=0\n#\n# Dirichlet boundary conditions\n# Ux(-4000,y,z) = 0\n# Ux(+4000,y,z) = 0\n# Uy(x,-4000,z) = 0\n# Uy(x,+4000,z) = 0\n# Uz(x,y,-8000) = 0\n# Uz(x,y,+0) = U0 + V0*t\n#\n\nimport numpy\nimport math\nfrom pythia.pyre.units.time import year\n\n# Physical properties.\np_density = 2500.0\np_vs = 3464.1016\np_vp = 6000.0\np_viscosity_1 = 9.46728e17\np_viscosity_2 = 4.73364e17\np_viscosity_3 = 1.893456e18\np_shear_ratio_1 = 0.25\np_shear_ratio_2 = 0.25\np_shear_ratio_3 = 0.25\n\n# Stress and strain components, and number of Maxwell elements.\nnumMaxElements = 3\nnumComponents = 6\n\n# Initial displacement (1 m) and velocity (2 m/year).\nU0 = 1.0\nV0 = 2.0/year.value\n\n# Derived properties.\np_mu = p_density*p_vs*p_vs\np_lambda = p_density*p_vp*p_vp - 2.0*p_mu\np_youngs = p_mu*(3.0*p_lambda + 2.0*p_mu)/(p_lambda + p_mu)\np_poissons = 0.5*p_lambda/(p_lambda + p_mu)\np_shear_ratio_0 = 1.0 - p_shear_ratio_1 - p_shear_ratio_2 - p_shear_ratio_3\np_tau_1 = p_viscosity_1/(p_mu*p_shear_ratio_1)\np_tau_2 = p_viscosity_2/(p_mu*p_shear_ratio_2)\np_tau_3 = p_viscosity_3/(p_mu*p_shear_ratio_3)\np_shear_ratio = numpy.array([p_shear_ratio_1, p_shear_ratio_2, p_shear_ratio_3], dtype=numpy.float64)\n\n# Time information.\ndt = 0.025*year.value\nstartTime = dt\nendTime = 0.5*year.value\nnumSteps = 20\nt = numpy.linspace(startTime, endTime, num=numSteps, dtype=numpy.float64)\nU = U0 + V0*t\n\n# Uniform strain rate field (3D).\ne0 = U0/8000.0\nedot0 = V0/8000.0\nexx = numpy.zeros(numSteps, dtype=numpy.float64)\neyy = numpy.zeros(numSteps, dtype=numpy.float64)\nezz = e0 + edot0*t\nexy = numpy.zeros(numSteps, dtype=numpy.float64)\neyz = numpy.zeros(numSteps, dtype=numpy.float64)\nexz = numpy.zeros(numSteps, dtype=numpy.float64)\n\n# Deviatoric strains at t=0.\neMean0 = e0/3.0\neDev0 = numpy.zeros(numComponents, dtype=numpy.float64)\neDev0[0] = -eMean0\neDev0[1] = -eMean0\neDev0[2] = e0 - eMean0\neDev0[3] = 0.0\neDev0[4] = 0.0\neDev0[5] = 0.0\n\n# Deviatoric strain rates.\neMeanDot0 = edot0/3.0\neDevDot0 = numpy.zeros(numComponents, dtype=numpy.float64)\neDevDot0[0] = -eMeanDot0\neDevDot0[1] = -eMeanDot0\neDevDot0[2] = edot0 - eMeanDot0\neDevDot0[3] = 0.0\neDevDot0[4] = 0.0\neDevDot0[5] = 0.0\n\n# Deviatoric strains.\neMean = (exx + eyy + ezz)/3.0\neDev = numpy.zeros((numSteps, numComponents), dtype=numpy.float64)\neDev[:, 0] = exx - eMean\neDev[:, 1] = eyy - eMean\neDev[:, 2] = ezz - eMean\neDev[:, 3] = exy\neDev[:, 3] = eyz\neDev[:, 3] = exz\n\n# Loop over time steps.\neVis = numpy.zeros((numSteps, numComponents, numMaxElements), dtype=numpy.float64)\nsDev = numpy.zeros((numSteps, numComponents), dtype=numpy.float64)\n\nfor timeStep in range(numSteps):\n timeFac1 = math.exp(-t[timeStep]/p_tau_1)\n timeFac2 = math.exp(-t[timeStep]/p_tau_2)\n timeFac3 = math.exp(-t[timeStep]/p_tau_3)\n # Viscous strains.\n eVis[timeStep,:, 0] = eDev0*timeFac1 + eDevDot0*p_tau_1*(1.0 - timeFac1)\n eVis[timeStep,:, 1] = eDev0*timeFac2 + eDevDot0*p_tau_2*(1.0 - timeFac2)\n eVis[timeStep,:, 2] = eDev0*timeFac3 + eDevDot0*p_tau_3*(1.0 - timeFac3)\n\n # Deviatoric stresses.\n sDev[timeStep,:] = p_shear_ratio_0*eDev[timeStep,:]\n for elementNum in range(numMaxElements):\n sDev[timeStep,:] += p_shear_ratio[elementNum]*eVis[timeStep,:, elementNum]\n\nsDev *= 2.0*p_mu\n\n# Total stresses.\nsMean = eMean*(3.0*p_lambda + 2.0*p_mu)\nsxx = sDev[:, 0] + sMean\nsyy = sDev[:, 1] + sMean\nszz = sDev[:, 2] + sMean\nsxy = sDev[:, 3]\nsyz = sDev[:, 4]\nsxz = sDev[:, 5]\n\n# ----------------------------------------------------------------------\nclass AnalyticalSoln(object):\n \"\"\"Analytical solution to axial extension problem.\n \"\"\"\n SPACE_DIM = 3\n TENSOR_SIZE = 6\n\n def __init__(self):\n self.fields = {\n \"displacement\": self.displacement,\n \"density\": self.density,\n \"shear_modulus\": self.shear_modulus,\n \"bulk_modulus\": self.bulk_modulus,\n \"shear_modulus_ratio\": self.shear_modulus_ratio,\n \"maxwell_time\": self.maxwell_time,\n \"cauchy_strain\": self.strain,\n \"cauchy_stress\": self.stress,\n \"viscous_strain\": self.viscous_strain,\n \"initial_amplitude\": self.initial_displacement,\n \"rate_start_time\": self.bc_rate_time,\n \"rate_amplitude\": self.bc_velocity,\n }\n return\n\n def getField(self, name, mesh_entity, pts):\n field = self.fields[name](pts)\n return field\n\n def displacement(self, locs):\n \"\"\"Compute displacement field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n disp = numpy.zeros((numSteps, npts, self.SPACE_DIM), dtype=numpy.float64)\n disp[:,:, 2] = numpy.dot(ezz.reshape(numSteps, 1), (locs[:, 2] + 8000.0).reshape(1, npts))\n above = numpy.where(locs[:, 2] > -0.1)\n below = numpy.where(locs[:, 2] < -7999.9)\n # This is a bit kludgy.\n dispAbove = disp[:, above, 2].transpose()\n dispBelow = disp[:, below, 2].transpose()\n dispAbove[:,:,:] = U\n dispBelow[:,:,:] = 0.0\n disp[:, above, 2] = dispAbove.transpose()\n disp[:, below, 2] = dispBelow.transpose()\n return disp\n\n def initial_displacement(self, locs):\n \"\"\"Compute initial displacement field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n disp = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)\n disp[0,:, 2] = e0*(locs[:, 2] + 8000.0).reshape(1, npts)\n above = numpy.where(locs[:, 2] > -0.1)\n below = numpy.where(locs[:, 2] < -7999.9)\n disp[:, above, 2] = U0\n disp[:, below, 2] = 0.0\n return disp\n\n def bc_velocity(self, locs):\n \"\"\"Compute velocity field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n velocity = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)\n velocity[0,:, 2] = edot0*(locs[:, 2] + 8000.0).reshape(1, npts)\n above = numpy.where(locs[:, 2] > -0.1)\n below = numpy.where(locs[:, 2] < -7999.9)\n velocity[:, above, 2] = V0\n velocity[:, below, 2] = 0.0\n return velocity\n\n def bc_rate_time(self, locs):\n \"\"\"Compute rate start time at locations.\n \"\"\"\n (npts, dim) = locs.shape\n rate_start_time = numpy.zeros((1, npts, 1), dtype=numpy.float64)\n return rate_start_time\n\n def density(self, locs):\n \"\"\"Compute density field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n density = p_density * numpy.ones((1, npts, 1), dtype=numpy.float64)\n return density\n\n def shear_modulus(self, locs):\n \"\"\"Compute shear modulus field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n shear_modulus = p_mu * numpy.ones((1, npts, 1), dtype=numpy.float64)\n return shear_modulus\n\n def bulk_modulus(self, locs):\n \"\"\"Compute bulk modulus field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n bulk_modulus = (p_lambda + 2.0 / 3.0 * p_mu) * numpy.ones((1, npts, 1), dtype=numpy.float64)\n return bulk_modulus\n\n def maxwell_time(self, locs):\n \"\"\"Compute Maxwell time field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n maxwell_time = numpy.zeros((1, npts, 3), dtype=numpy.float64)\n maxwell_time[0,:, 0] = p_tau_1\n maxwell_time[0,:, 1] = p_tau_2\n maxwell_time[0,:, 2] = p_tau_3\n return maxwell_time\n\n def shear_modulus_ratio(self, locs):\n \"\"\"Compute shear modulus ratio field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n shear_modulus_ratio = numpy.zeros((1, npts, 3), dtype=numpy.float64)\n shear_modulus_ratio[0,:, 0] = p_shear_ratio_1\n shear_modulus_ratio[0,:, 1] = p_shear_ratio_2\n shear_modulus_ratio[0,:, 2] = p_shear_ratio_3\n return shear_modulus_ratio\n\n def strain(self, locs):\n \"\"\"Compute strain field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n strain = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)\n strain[:,:, 0] = exx.reshape(numSteps, 1)\n strain[:,:, 1] = eyy.reshape(numSteps, 1)\n strain[:,:, 2] = ezz.reshape(numSteps, 1)\n strain[:,:, 3] = exy.reshape(numSteps, 1)\n strain[:,:, 4] = eyz.reshape(numSteps, 1)\n strain[:,:, 5] = exz.reshape(numSteps, 1)\n return strain\n\n def stress(self, locs):\n \"\"\"Compute stress field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n stress = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)\n stress[:,:, 0] = sxx.reshape(numSteps, 1)\n stress[:,:, 1] = syy.reshape(numSteps, 1)\n stress[:,:, 2] = szz.reshape(numSteps, 1)\n stress[:,:, 3] = sxy.reshape(numSteps, 1)\n stress[:,:, 4] = syz.reshape(numSteps, 1)\n stress[:,:, 5] = sxz.reshape(numSteps, 1)\n return stress\n\n def viscous_strain(self, locs):\n \"\"\"Compute viscous strain field at locations.\n \"\"\"\n (npts, dim) = locs.shape\n viscous_strain = numpy.zeros((numSteps, npts, 3*self.TENSOR_SIZE), dtype=numpy.float64)\n viscous_strain[:,:, 0] = eVis[:, 0, 0].reshape(numSteps, 1)\n viscous_strain[:,:, 1] = eVis[:, 1, 0].reshape(numSteps, 1)\n viscous_strain[:,:, 2] = eVis[:, 2, 0].reshape(numSteps, 1)\n viscous_strain[:,:, 3] = eVis[:, 3, 0].reshape(numSteps, 1)\n viscous_strain[:,:, 4] = eVis[:, 4, 0].reshape(numSteps, 1)\n viscous_strain[:,:, 5] = eVis[:, 5, 0].reshape(numSteps, 1)\n viscous_strain[:,:, 6] = eVis[:, 0, 1].reshape(numSteps, 1)\n viscous_strain[:,:, 7] = eVis[:, 1, 1].reshape(numSteps, 1)\n viscous_strain[:,:, 8] = eVis[:, 2, 1].reshape(numSteps, 1)\n viscous_strain[:,:, 9] = eVis[:, 3, 1].reshape(numSteps, 1)\n viscous_strain[:,:, 10] = eVis[:, 4, 1].reshape(numSteps, 1)\n viscous_strain[:,:, 11] = eVis[:, 5, 1].reshape(numSteps, 1)\n viscous_strain[:,:, 12] = eVis[:, 0, 2].reshape(numSteps, 1)\n viscous_strain[:,:, 13] = eVis[:, 1, 2].reshape(numSteps, 1)\n viscous_strain[:,:, 14] = eVis[:, 2, 2].reshape(numSteps, 1)\n viscous_strain[:,:, 15] = eVis[:, 3, 2].reshape(numSteps, 1)\n viscous_strain[:,:, 16] = eVis[:, 4, 2].reshape(numSteps, 1)\n viscous_strain[:,:, 17] = eVis[:, 5, 2].reshape(numSteps, 1)\n return viscous_strain\n\n\n# End of file\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.array", "numpy.where", "numpy.linspace" ] ]
sclipman/TransmissionCluster2
[ "3dbe4c23bc0ae0489e2686d915615bbb32631f4d" ]
[ "EPI-ClusT.py" ]
[ "#!/usr/bin/env python3\n\n###############################################################################\n# Program: EPI-ClusT.py\n# Type: Python Script\n# Version: 1.0\n# Author: Steven J. Clipman\n# Description: Empiral Phylogeny Informed Cluster Tool for identifying\n# distance thresholds and defining clusters in phylogenetic trees\n# License: MIT\n###############################################################################\n\nfrom queue import Queue\nfrom treeswift import read_tree_newick\nimport PySimpleGUI as sg\nimport os\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nimport math\nimport statistics\nimport numpy\n\n\nNUM_THRESH = 1000 # number of thresholds to calculate genetic distance over\n\n\n# cut out the current node's subtree (by setting all nodes' DELETED to True) and return list of leaves\ndef cut(node):\n cluster = list()\n descendants = Queue()\n descendants.put(node)\n while not descendants.empty():\n descendant = descendants.get()\n if descendant.DELETED:\n continue\n descendant.DELETED = True\n descendant.left_dist = 0\n descendant.right_dist = 0\n descendant.edge_length = 0\n if descendant.is_leaf():\n cluster.append(str(descendant))\n else:\n for c in descendant.children:\n descendants.put(c)\n return cluster\n\n\n# initialize properties of input tree and return set containing taxa of leaves\ndef prep(tree, support):\n tree.resolve_polytomies()\n tree.suppress_unifurcations()\n leaves = set()\n for node in tree.traverse_postorder():\n if node.edge_length is None:\n node.edge_length = 0\n node.DELETED = False\n if node.is_leaf():\n leaves.add(str(node))\n else:\n try:\n node.confidence = float(str(node))\n except:\n node.confidence = 100. # give edges without support values support 100\n if node.confidence < support: # don't allow low-support edges\n node.edge_length = float('inf')\n return leaves\n\n\n# split leaves into minimum number of clusters such that the maximum leaf pairwise distance is below some threshold\ndef min_clusters_threshold_max(tree,threshold,support):\n leaves = prep(tree,support)\n clusters = list()\n for node in tree.traverse_postorder():\n # if I've already been handled, ignore me\n if node.DELETED:\n continue\n\n # find my undeleted max distances to leaf\n if node.is_leaf():\n node.left_dist = 0; node.right_dist = 0\n else:\n children = list(node.children)\n if children[0].DELETED and children[1].DELETED:\n cut(node); continue\n if children[0].DELETED:\n node.left_dist = 0\n else:\n node.left_dist = max(children[0].left_dist,children[0].right_dist) + children[0].edge_length\n if children[1].DELETED:\n node.right_dist = 0\n else:\n node.right_dist = max(children[1].left_dist,children[1].right_dist) + children[1].edge_length\n\n # if my kids are screwing things up, cut out the longer one\n if node.left_dist + node.right_dist > threshold:\n if node.left_dist > node.right_dist:\n cluster = cut(children[0])\n node.left_dist = 0\n else:\n cluster = cut(children[1])\n node.right_dist = 0\n\n # add cluster\n if len(cluster) != 0:\n clusters.append(cluster)\n for leaf in cluster:\n leaves.remove(leaf)\n\n # add all remaining leaves to a single cluster\n if len(leaves) != 0:\n clusters.append(list(leaves))\n return clusters\n\n# min_clusters_threshold_max, but all clusters must define a clade\ndef min_clusters_threshold_max_clade(tree, threshold, support):\n leaves = prep(tree, support)\n clusters = list()\n for node in tree.traverse_postorder():\n # if I've already been handled, ignore me\n if node.DELETED:\n continue\n\n # find my undeleted max distances to leaf\n if node.is_leaf():\n node.left_dist = 0\n node.right_dist = 0\n else:\n children = list(node.children)\n if children[0].DELETED and children[1].DELETED:\n cut(node)\n continue\n if children[0].DELETED:\n node.left_dist = 0\n else:\n node.left_dist = max(children[0].left_dist, children[0].right_dist) + children[0].edge_length\n if children[1].DELETED:\n node.right_dist = 0\n else:\n node.right_dist = max(children[1].left_dist, children[1].right_dist) + children[1].edge_length\n\n # if my kids are screwing things up, cut both\n if node.left_dist + node.right_dist > threshold:\n cluster_l = cut(children[0])\n node.left_dist = 0\n cluster_r = cut(children[1])\n node.right_dist = 0\n\n # add cluster\n for cluster in (cluster_l, cluster_r):\n if len(cluster) != 0:\n clusters.append(cluster)\n for leaf in cluster:\n leaves.remove(leaf)\n\n # add all remaining leaves to a single cluster\n if len(leaves) != 0:\n clusters.append(list(leaves))\n return clusters\n\n\n# pick the threshold between 0 and \"distance threshold\" that maximizes number of (non-singleton) clusters\ndef auto_cluster(method, tree, threshold, support, display_fig):\n supportTemp = float('-inf')\n if display_fig is True:\n distfile = open(\"EPI-ClusT_PlotData_NumClusters_by_DistanceThreshold.txt\", 'w')\n distfile.write(\"Distance\\tNumClusters\\n\")\n from copy import deepcopy\n thresholds = [i*threshold/NUM_THRESH for i in range(NUM_THRESH+1)]\n best = None\n best_num = -1\n best_t = -1\n distv = []\n xs = []\n ys = []\n for i, t in enumerate(thresholds):\n sg.OneLineProgressMeter('EPI-ClusT', i+1, len(thresholds)-1, 'key', 'Computing best genetic distance threshold...', orientation='h')\n clusters = method(deepcopy(tree), t, supportTemp)\n num_non_singleton = len([c for c in clusters if len(c) > 1])\n if display_fig is True:\n distfile.write(\"%s\\t%s\\n\" % (t, num_non_singleton))\n xs.append(float(t))\n ys.append(int(num_non_singleton))\n if num_non_singleton > best_num:\n best = clusters\n best_num = num_non_singleton\n raw_t = t\n best_t = float(round(t, 3))\n best = method(deepcopy(tree), best_t, support)\n outfile.write(\"Genetic Distance Uperbound: %s\\n\" % threshold)\n outfile.write(\"Best Distance Threshold: %s\\n\" % best_t)\n\n if display_fig is True:\n distfile.close()\n plt.figure(2)\n plt.bar(xs, ys, width=0.001)\n plt.ylabel('Number of Clusters')\n plt.xlabel('Genetic Distance Threshold')\n\n return best\n\n\n# plot distance histogram\ndef gen_hist(tree, display_fig):\n # if display_fig is True:\n # histfile = open(\"EPI-ClusT_PlotData_Pairwise_Distance_Histogram.txt\", 'w')\n pw_dists = []\n distance_matrix = tree.distance_matrix(leaf_labels=True)\n distance_matrix_keys = list(distance_matrix.keys())\n for i in range(len(distance_matrix_keys)):\n u = distance_matrix_keys[i]\n sg.OneLineProgressMeter('EPI-ClusT', i+1, len(distance_matrix_keys)-1, 'key', 'Analyzing pairwise distances...', orientation='h')\n for v in distance_matrix[u].keys():\n pw_dists.append(distance_matrix[u][v])\n # if display_fig is True:\n # histfile.write(\"%s\\t%s\\t%s\\n\" % (u, v, distance_matrix[u][v]))\n\n bin_size = int(math.ceil(math.sqrt(len(pw_dists)) / 10.0)) * 10\n plt.figure(1)\n plt.hist(pw_dists, bins=bin_size)\n plt.ylabel('Count')\n plt.xlabel('Sample Pairwise Genetic Distance')\n histarray = plt.hist(pw_dists, bins=bin_size)[0]\n binsarray = plt.hist(pw_dists, bins=bin_size)[1]\n # if display_fig is True:\n # histfile.close()\n return histarray, binsarray\n\n\n# generate edge list to visualize clusters in gephi\ndef generate_edge_list(tree, cluster_members):\n outname = \"EPI-ClusT_Network_Diagram_Edge_List.txt\"\n outfile = open(outname, 'w')\n outfile.write(\"Source\\tTarget\\n\")\n distance_matrix = tree.distance_matrix(leaf_labels=True)\n for cluster_num in cluster_members.keys():\n clustered_samples = cluster_members[cluster_num]\n if len(clustered_samples) == 2:\n outfile.write(\"%s\\t%s\\n\" % (clustered_samples[0], clustered_samples[1]))\n else:\n for i in range(len(clustered_samples)):\n id1 = clustered_samples[i]\n dist = 1000\n edgeTo = ''\n for j in range(i+1, len(clustered_samples)):\n id2 = clustered_samples[j]\n if distance_matrix[id1][id2] < dist:\n dist = distance_matrix[id1][id2]\n edgeTo = id2\n if edgeTo != '':\n outfile.write('%s\\t%s\\n' % (edgeTo, id1))\n outfile.close()\n\n\nif __name__ == \"__main__\":\n # Render GUI window\n passingfile = False\n passingdist = False\n passingsupp = False\n window = ''\n while passingfile is False or passingdist is False or passingsupp is False:\n if window != '':\n window.Close()\n layout = [\n [sg.Text(\"EPI-ClusT\", font=('Helvetica', 24, 'bold'))],\n [sg.Text(\"Empirical Phylogeny Informed Cluster Tool\", font=('Helvetica', 16))],\n [sg.Text(\"Written By: Steven J. Clipman, Johns Hopkins University\\n\", font=('Helvetica', 12))],\n [sg.Text('Newick Tree File*:', font=('Helvetica', 13)), sg.InputText(font=('Helvetica 13'), key='infilename'), sg.FileBrowse(font=('Helvetica 13'))],\n [sg.Text('Output Filename*:', font=('Helvetica', 13)), sg.InputText(font=('Helvetica 13'), default_text='EPI-ClusT_Results.txt', text_color='gray', key='outfilename')],\n [sg.Text('Genetic Distance Threshold (optional):', font=('Helvetica 13')), sg.InputText(font=('Helvetica 13'), key='dist'), sg.Checkbox('Compute Best Distance Threshold', font=('Helvetica 13'), default=False, key='df')],\n [sg.Text('Support Threshold (optional):', font=('Helvetica 13')), sg.InputText(font=('Helvetica 13'), key='support')],\n [sg.Checkbox('Plot Clusters Histogram', font=('Helvetica 13'), default=False, key='plothist'), sg.Checkbox('Export Network Edge List', font=('Helvetica 13'), default=False, key='edge'), sg.Checkbox('Rooted Tree: Use Clade Support', font=('Helvetica 13'), default=False, key='rooted')],\n [sg.OK('Analyze', font=('Helvetica', 13), size=(10, 2))]]\n\n window = sg.Window('EPI-ClusT', layout)\n event, values = window.Read()\n\n # parse user arguments\n if os.path.exists(values['infilename']) is not True:\n sg.Popup(\"Error: Input tree not found.\", font=('Helvetica', 13, 'bold'))\n passingfile = False\n else:\n passingfile = True\n try:\n float(values['dist'])\n if float(values['dist']) > 1 or float(values['dist']) < 0:\n sg.Popup(\"Error: Genetic distance threshold must be between 0 and 1.\", font=('Helvetica', 13, 'bold'))\n passingdist = False\n else:\n passingdist = True\n except ValueError:\n if values['df'] is not True:\n sg.Popup(\"Error: Genetic distance threshold must be between 0 and 1 or 'Compute Best Distance Threshold' must be selected.\", font=('Helvetica', 13, 'bold'))\n passingdist = False\n else:\n passingdist = True\n\n if values['support'] != '':\n try:\n float(values['support'])\n if float(values['support']) > 1 or float(values['support']) < 0:\n sg.Popup(\"Error: Support threshold must be between 0 and 1.\", font=('Helvetica', 13, 'bold'))\n passingsupp = False\n else:\n passingsupp = True\n except ValueError:\n sg.Popup(\"Error: Support threshold must be between 0 and 1.\", font=('Helvetica', 13, 'bold'))\n passingsupp = False\n else:\n passingsupp = True\n\n\n infile = open(values['infilename'], 'r')\n outfile = open(values['outfilename'], 'w')\n if values['support'] == '':\n values['support'] = '-inf'\n trees = list()\n for line in infile:\n if isinstance(line, bytes):\n l = line.decode().strip()\n else:\n l = line.strip()\n trees.append(read_tree_newick(l))\n\n # run algorithm\n outfile.write(\"** EPI-ClusT Results **\\n\")\n outfile.write(\"Input File: %s\\n\" % values['infilename'])\n outfile.write(\"Support Threshold: %s\\n\" % values['support'])\n for t, tree in enumerate(trees):\n if values['df'] is True:\n gen_hist(tree, True)\n plt.show(block=False)\n # plot pairwise distances\n visable = False\n if values['plothist'] is True:\n visable = True\n if values['df'] is False:\n outfile.write(\"Genetic Distance Threshold: %s\\n\" % values['dist'])\n if values['rooted'] is True:\n clusters = min_clusters_threshold_max_clade(tree, float(values['dist']), float(values['support']))\n else:\n clusters = min_clusters_threshold_max(tree, float(values['dist']), float(values['support']))\n else:\n d = float(sg.PopupGetText(\"Enter distance upperbound:\\nThe best genetic distance up through this threshold will be computed.\\nIf you are unsure, click 'Ok' to use the default upperbound of 0.10.\",title='Enter Distance Upperbound',default_text=\"0.10\", font=('Helvetica', 13)))\n if values['rooted'] is True:\n clusters = auto_cluster(min_clusters_threshold_max_clade, tree, float(d), float(values['support']), visable)\n else:\n clusters = auto_cluster(min_clusters_threshold_max, tree, float(d), float(values['support']), visable)\n cluster_num = 1\n clust_members = {}\n for cluster in clusters:\n if len(cluster) > 1:\n for l in cluster:\n if cluster_num in clust_members:\n samplenames = clust_members[cluster_num]\n samplenames.append(l)\n clust_members[cluster_num] = samplenames\n else:\n samplenames = [l]\n clust_members[cluster_num] = samplenames\n cluster_num += 1\n totalclusters = clust_members\n cluster_num -= 1\n outfile.write('Found %s clusters\\n\\n' % cluster_num)\n header = \"ClusterNum\\tNumberOfSamples\\tSampleNames\\n\"\n outfile.write(header)\n total = 0\n for k in clust_members.keys():\n total += len(clust_members[k])\n outfile.write(\"%s\\t%s\\t[%s]\\n\" % (k, len(clust_members[k]), (','.join(clust_members[k]))))\n outfile.write(\"\\n-------------------------------\\nTotal Samples Clustered: %s\" % total)\n outfile.close()\n if values['edge'] is True:\n generate_edge_list(tree, clust_members)\n if visable is True:\n plt.show(block=False)\n sg.PopupOK('Process Complete!',\n 'Results have been written to the output file:\\n%s' % values['outfilename'], font=('Helvetica', 13, 'bold'))\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.hist", "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.bar" ] ]
pengbeicn/turicreate
[ "43930dc95d1d74da21214b2ea8c717200daeaca6" ]
[ "src/unity/python/turicreate/toolkits/activity_classifier/_activity_classifier.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\"\"\"\nClass definition and utilities for the activity classification toolkit.\n\"\"\"\nfrom __future__ import absolute_import as _\nfrom __future__ import print_function as _\nfrom __future__ import division as _\n\nimport numpy as _np\nimport time as _time\nimport six as _six\n\nfrom turicreate import SArray as _SArray, SFrame as _SFrame\nfrom turicreate import aggregate as _agg\n\nimport turicreate.toolkits._internal_utils as _tkutl\nfrom turicreate.toolkits import _coreml_utils\nimport turicreate.toolkits._feature_engineering._internal_utils as _fe_tkutl\nfrom turicreate.toolkits._main import ToolkitError as _ToolkitError\nfrom turicreate.toolkits import evaluation as _evaluation\nfrom .. import _mxnet_utils\n\nfrom turicreate.toolkits._model import CustomModel as _CustomModel\nfrom turicreate.toolkits._model import PythonProxy as _PythonProxy\n\nfrom .util import random_split_by_session as _random_split_by_session\n\n\ndef create(dataset, session_id, target, features=None, prediction_window=100,\n validation_set='auto', max_iterations=10, batch_size=32, verbose=True):\n \"\"\"\n Create an :class:`ActivityClassifier` model.\n\n Parameters\n ----------\n dataset : SFrame\n Input data which consists of `sessions` of data where each session is\n a sequence of data. The data must be in `stacked` format, grouped by\n session. Within each session, the data is assumed to be sorted\n temporally. Columns in `features` will be used to train a model that\n will make a prediction using labels in the `target` column.\n\n session_id : string\n Name of the column that contains a unique ID for each session.\n\n target : string\n Name of the column containing the target variable. The values in this\n column must be of string or integer type. Use `model.classes` to\n retrieve the order in which the classes are mapped.\n\n features : list[string], optional\n Name of the columns containing the input features that will be used\n for classification. If set to `None`, all columns except `session_id`\n and `target` will be used.\n\n prediction_window : int, optional\n Number of time units between predictions. For example, if your input\n data is sampled at 100Hz, and the `prediction_window` is set to 100,\n then this model will make a prediction every 1 second.\n\n validation_set : SFrame, optional\n A dataset for monitoring the model's generalization performance to\n prevent the model from overfitting to the training data.\n\n For each row of the progress table, accuracy is measured over the\n provided training dataset and the `validation_set`. The format of this\n SFrame must be the same as the training set.\n\n When set to 'auto', a validation set is automatically sampled from the\n training data (if the training data has > 100 sessions). If\n validation_set is set to None, then all the data will be used for\n training.\n\n max_iterations : int , optional\n Maximum number of iterations/epochs made over the data during the\n training phase.\n\n batch_size : int, optional\n Number of sequence chunks used per training step. Must be greater than\n the number of GPUs in use.\n\n verbose : bool, optional\n If True, print progress updates and model details.\n\n Returns\n -------\n out : ActivityClassifier\n A trained :class:`ActivityClassifier` model.\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import turicreate as tc\n\n # Training on dummy data\n >>> data = tc.SFrame({\n ... 'accelerometer_x': [0.1, 0.2, 0.3, 0.4, 0.5] * 10,\n ... 'accelerometer_y': [0.5, 0.4, 0.3, 0.2, 0.1] * 10,\n ... 'accelerometer_z': [0.01, 0.01, 0.02, 0.02, 0.01] * 10,\n ... 'session_id': [0, 0, 0] * 10 + [1, 1] * 10,\n ... 'activity': ['walk', 'run', 'run'] * 10 + ['swim', 'swim'] * 10\n ... })\n\n # Create an activity classifier\n >>> model = tc.activity_classifier.create(data,\n ... session_id='session_id', target='activity',\n ... features=['accelerometer_x', 'accelerometer_y', 'accelerometer_z'])\n\n # Make predictions (as probability vector, or class)\n >>> predictions = model.predict(data)\n >>> predictions = model.predict(data, output_type='probability_vector')\n\n # Get both predictions and classes together\n >>> predictions = model.classify(data)\n\n # Get topk predictions (instead of only top-1) if your labels have more\n # 2 classes\n >>> predictions = model.predict_topk(data, k = 3)\n\n # Evaluate the model\n >>> results = model.evaluate(data)\n\n See Also\n --------\n ActivityClassifier, util.random_split_by_session\n \"\"\"\n _tkutl._raise_error_if_not_sframe(dataset, \"dataset\")\n from ._mx_model_architecture import _net_params\n from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter\n from ._sframe_sequence_iterator import prep_data as _prep_data\n from ._mx_model_architecture import _define_model_mxnet, _fit_model_mxnet\n from ._mps_model_architecture import _define_model_mps, _fit_model_mps\n from .._mps_utils import (use_mps as _use_mps,\n mps_device_name as _mps_device_name,\n ac_weights_mps_to_mxnet as _ac_weights_mps_to_mxnet)\n\n\n if not isinstance(target, str):\n raise _ToolkitError('target must be of type str')\n if not isinstance(session_id, str):\n raise _ToolkitError('session_id must be of type str')\n _tkutl._raise_error_if_sframe_empty(dataset, 'dataset')\n _tkutl._numeric_param_check_range('prediction_window', prediction_window, 1, 400)\n _tkutl._numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)\n\n if features is None:\n features = _fe_tkutl.get_column_names(dataset,\n interpret_as_excluded=True,\n column_names=[session_id, target])\n if not hasattr(features, '__iter__'):\n raise TypeError(\"Input 'features' must be a list.\")\n if not all([isinstance(x, str) for x in features]):\n raise TypeError(\"Invalid feature %s: Feature names must be of type str.\" % x)\n if len(features) == 0:\n raise TypeError(\"Input 'features' must contain at least one column name.\")\n\n start_time = _time.time()\n dataset = _tkutl._toolkits_select_columns(dataset, features + [session_id, target])\n _tkutl._raise_error_if_sarray_not_expected_dtype(dataset[target], target, [str, int])\n _tkutl._raise_error_if_sarray_not_expected_dtype(dataset[session_id], session_id, [str, int])\n\n # Encode the target column to numerical values\n use_target = target is not None\n dataset, target_map = _encode_target(dataset, target)\n\n predictions_in_chunk = 20\n chunked_data, num_sessions = _prep_data(dataset, features, session_id, prediction_window,\n predictions_in_chunk, target=target, verbose=verbose)\n\n if isinstance(validation_set, str) and validation_set == 'auto':\n if num_sessions < 100:\n validation_set = None\n else:\n dataset, validation_set = _random_split_by_session(dataset, session_id)\n\n # Decide whether to use MPS GPU, MXnet GPU or CPU\n num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)\n use_mps = _use_mps() and num_mxnet_gpus == 0\n\n if verbose:\n if use_mps:\n print('Using GPU to create model ({})'.format(_mps_device_name()))\n elif num_mxnet_gpus == 1:\n print('Using GPU to create model (CUDA)')\n elif num_mxnet_gpus > 1:\n print('Using {} GPUs to create model (CUDA)'.format(num_mxnet_gpus))\n else:\n print('Using CPU to create model')\n\n # Create data iterators\n user_provided_batch_size = batch_size\n batch_size = max(batch_size, num_mxnet_gpus, 1)\n use_mx_data_batch = not use_mps\n data_iter = _SFrameSequenceIter(chunked_data, len(features),\n prediction_window, predictions_in_chunk,\n batch_size, use_target=use_target, mx_output=use_mx_data_batch)\n\n if validation_set is not None:\n _tkutl._raise_error_if_not_sframe(validation_set, 'validation_set')\n _tkutl._raise_error_if_sframe_empty(validation_set, 'validation_set')\n validation_set = _tkutl._toolkits_select_columns(\n validation_set, features + [session_id, target])\n validation_set = validation_set.filter_by(target_map.keys(), target)\n validation_set, mapping = _encode_target(validation_set, target, target_map)\n chunked_validation_set, _ = _prep_data(validation_set, features, session_id, prediction_window,\n predictions_in_chunk, target=target, verbose=False)\n\n valid_iter = _SFrameSequenceIter(chunked_validation_set, len(features),\n prediction_window, predictions_in_chunk,\n batch_size, use_target=use_target, mx_output=use_mx_data_batch)\n else:\n valid_iter = None\n\n # Define model architecture\n context = _mxnet_utils.get_mxnet_context(max_devices=num_sessions)\n\n # Always create MXnet models, as the pred_model is later saved to the state\n # If MPS is used - the loss_model will be overwritten\n loss_model, pred_model = _define_model_mxnet(len(target_map), prediction_window,\n predictions_in_chunk, context)\n\n if use_mps:\n loss_model = _define_model_mps(batch_size, len(features), len(target_map),\n prediction_window, predictions_in_chunk, is_prediction_model=False)\n\n log = _fit_model_mps(loss_model, data_iter, valid_iter, max_iterations, verbose)\n\n else:\n # Train the model using Mxnet\n log = _fit_model_mxnet(loss_model, data_iter, valid_iter,\n max_iterations, num_mxnet_gpus, verbose)\n\n # Set up prediction model\n pred_model.bind(data_shapes=data_iter.provide_data, label_shapes=None,\n for_training=False)\n\n if use_mps:\n mps_params = loss_model.export()\n arg_params, aux_params = _ac_weights_mps_to_mxnet(mps_params, _net_params['lstm_h'])\n else:\n arg_params, aux_params = loss_model.get_params()\n\n pred_model.init_params(arg_params=arg_params, aux_params=aux_params)\n\n # Save the model\n state = {\n '_pred_model': pred_model,\n 'verbose': verbose,\n 'training_time': _time.time() - start_time,\n 'target': target,\n 'classes': sorted(target_map.keys()),\n 'features': features,\n 'session_id': session_id,\n 'prediction_window': prediction_window,\n 'max_iterations': max_iterations,\n 'num_examples': len(dataset),\n 'num_sessions': num_sessions,\n 'num_classes': len(target_map),\n 'num_features': len(features),\n 'training_accuracy': log['train_acc'],\n 'training_log_loss': log['train_loss'],\n '_target_id_map': target_map,\n '_id_target_map': {v: k for k, v in target_map.items()},\n '_predictions_in_chunk': predictions_in_chunk,\n '_recalibrated_batch_size': data_iter.batch_size,\n 'batch_size' : user_provided_batch_size\n }\n\n if validation_set is not None:\n state['valid_accuracy'] = log['valid_acc']\n state['valid_log_loss'] = log['valid_loss']\n\n model = ActivityClassifier(state)\n return model\n\n\ndef _encode_target(data, target, mapping=None):\n \"\"\" Encode targets to integers in [0, num_classes - 1] \"\"\"\n if mapping is None:\n mapping = {t: i for i, t in enumerate(sorted(data[target].unique()))}\n\n data[target] = data[target].apply(lambda t: mapping[t])\n return data, mapping\n\nclass ActivityClassifier(_CustomModel):\n \"\"\"\n A trained model that is ready to use for classification or export to\n CoreML.\n\n This model should not be constructed directly.\n \"\"\"\n\n _PYTHON_ACTIVITY_CLASSIFIER_VERSION = 2\n\n def __init__(self, state):\n self.__proxy__ = _PythonProxy(state)\n\n def _get_native_state(self):\n state = self.__proxy__.get_state()\n state['_pred_model'] = _mxnet_utils.get_mxnet_state(state['_pred_model'])\n return state\n\n @classmethod\n def _load_version(cls, state, version):\n from ._mx_model_architecture import _define_model_mxnet\n\n _tkutl._model_version_check(version, cls._PYTHON_ACTIVITY_CLASSIFIER_VERSION)\n\n data_seq_len = state['prediction_window'] * state['_predictions_in_chunk']\n\n context = _mxnet_utils.get_mxnet_context(max_devices=state['num_sessions'])\n _, _pred_model = _define_model_mxnet(len(state['_target_id_map']), state['prediction_window'],\n state['_predictions_in_chunk'], context)\n\n batch_size = state['batch_size']\n preds_in_chunk = state['_predictions_in_chunk']\n win = state['prediction_window'] * preds_in_chunk\n num_features = len(state['features'])\n data_shapes = [('data', (batch_size, win, num_features))]\n\n _pred_model.bind(data_shapes=data_shapes, label_shapes=None,\n for_training=False)\n arg_params = _mxnet_utils.params_from_dict(state['_pred_model']['arg_params'])\n aux_params = _mxnet_utils.params_from_dict(state['_pred_model']['aux_params'])\n _pred_model.init_params(arg_params=arg_params, aux_params=aux_params)\n state['_pred_model'] = _pred_model\n\n return ActivityClassifier(state)\n\n @classmethod\n def _native_name(cls):\n return \"activity_classifier\"\n\n def _get_version(self):\n return self._PYTHON_ACTIVITY_CLASSIFIER_VERSION\n\n def export_coreml(self, filename):\n \"\"\"\n Export the model in Core ML format.\n\n Parameters\n ----------\n filename: str\n A valid filename where the model can be saved.\n\n Examples\n --------\n >>> model.export_coreml(\"MyModel.mlmodel\")\n \"\"\"\n import coremltools as _cmt\n import mxnet as _mx\n from ._mx_model_architecture import _net_params\n\n prob_name = self.target + 'Probability'\n label_name = self.target\n\n input_features = [\n ('features', _cmt.models.datatypes.Array(*(1, self.prediction_window, self.num_features)))\n ]\n output_features = [\n (prob_name, _cmt.models.datatypes.Array(*(self.num_classes,)))\n ]\n\n model_params = self._pred_model.get_params()\n weights = {k: v.asnumpy() for k, v in model_params[0].items()}\n weights = _mx.rnn.LSTMCell(num_hidden=_net_params['lstm_h']).unpack_weights(weights)\n moving_weights = {k: v.asnumpy() for k, v in model_params[1].items()}\n\n builder = _cmt.models.neural_network.NeuralNetworkBuilder(\n input_features,\n output_features,\n mode='classifier'\n )\n\n # Conv\n # (1,1,W,C) -> (1,C,1,W)\n builder.add_permute(name='permute_layer', dim=(0, 3, 1, 2),\n input_name='features', output_name='conv_in')\n W = _np.expand_dims(weights['conv_weight'], axis=0).transpose((2, 3, 1, 0))\n builder.add_convolution(name='conv_layer',\n kernel_channels=self.num_features,\n output_channels=_net_params['conv_h'],\n height=1, width=self.prediction_window,\n stride_height=1, stride_width=self.prediction_window,\n border_mode='valid', groups=1,\n W=W, b=weights['conv_bias'], has_bias=True,\n input_name='conv_in', output_name='relu0_in')\n builder.add_activation(name='relu_layer0', non_linearity='RELU',\n input_name='relu0_in', output_name='lstm_in')\n\n # LSTM\n builder.add_optionals([('lstm_h_in', _net_params['lstm_h']),\n ('lstm_c_in', _net_params['lstm_h'])],\n [('lstm_h_out', _net_params['lstm_h']),\n ('lstm_c_out', _net_params['lstm_h'])])\n\n W_x = [weights['lstm_i2h_i_weight'], weights['lstm_i2h_f_weight'],\n weights['lstm_i2h_o_weight'], weights['lstm_i2h_c_weight']]\n W_h = [weights['lstm_h2h_i_weight'], weights['lstm_h2h_f_weight'],\n weights['lstm_h2h_o_weight'], weights['lstm_h2h_c_weight']]\n bias = [weights['lstm_h2h_i_bias'], weights['lstm_h2h_f_bias'],\n weights['lstm_h2h_o_bias'], weights['lstm_h2h_c_bias']]\n\n builder.add_unilstm(name='lstm_layer',\n W_h=W_h, W_x=W_x, b=bias,\n input_size=_net_params['conv_h'],\n hidden_size=_net_params['lstm_h'],\n input_names=['lstm_in', 'lstm_h_in', 'lstm_c_in'],\n output_names=['dense0_in', 'lstm_h_out', 'lstm_c_out'],\n inner_activation='SIGMOID')\n\n # Dense\n builder.add_inner_product(name='dense_layer',\n W=weights['dense0_weight'], b=weights['dense0_bias'],\n input_channels=_net_params['lstm_h'],\n output_channels=_net_params['dense_h'],\n has_bias=True,\n input_name='dense0_in',\n output_name='bn_in')\n\n builder.add_batchnorm(name='bn_layer',\n channels=_net_params['dense_h'],\n gamma=weights['bn_gamma'], beta=weights['bn_beta'],\n mean=moving_weights['bn_moving_mean'],\n variance=moving_weights['bn_moving_var'],\n input_name='bn_in', output_name='relu1_in',\n epsilon=0.001)\n builder.add_activation(name='relu_layer1', non_linearity='RELU',\n input_name='relu1_in', output_name='dense1_in')\n\n # Softmax\n builder.add_inner_product(name='dense_layer1',\n W=weights['dense1_weight'], b=weights['dense1_bias'],\n has_bias=True,\n input_channels=_net_params['dense_h'],\n output_channels=self.num_classes,\n input_name='dense1_in', output_name='softmax_in')\n\n builder.add_softmax(name=prob_name,\n input_name='softmax_in',\n output_name=prob_name)\n\n\n labels = list(map(str, sorted(self._target_id_map.keys())))\n builder.set_class_labels(labels)\n mlmodel = _cmt.models.MLModel(builder.spec)\n model_type = 'activity classifier'\n mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)\n # Add useful information to the mlmodel\n features_str = ', '.join(self.features)\n mlmodel.input_description['features'] = u'Window \\xd7 [%s]' % features_str\n mlmodel.input_description['lstm_h_in'] = 'LSTM hidden state input'\n mlmodel.input_description['lstm_c_in'] = 'LSTM cell state input'\n mlmodel.output_description[prob_name] = 'Activity prediction probabilities'\n mlmodel.output_description['classLabel'] = 'Class label of top prediction'\n mlmodel.output_description['lstm_h_out'] = 'LSTM hidden state output'\n mlmodel.output_description['lstm_c_out'] = 'LSTM cell state output'\n _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {\n 'prediction_window': str(self.prediction_window),\n 'session_id': self.session_id,\n 'target': self.target,\n 'features': ','.join(self.features),\n 'max_iterations': str(self.max_iterations),\n }, version=ActivityClassifier._PYTHON_ACTIVITY_CLASSIFIER_VERSION)\n spec = mlmodel.get_spec()\n _cmt.models.utils.rename_feature(spec, 'classLabel', label_name)\n _cmt.models.utils.rename_feature(spec, 'lstm_h_in', 'hiddenIn')\n _cmt.models.utils.rename_feature(spec, 'lstm_c_in', 'cellIn')\n _cmt.models.utils.rename_feature(spec, 'lstm_h_out', 'hiddenOut')\n _cmt.models.utils.rename_feature(spec, 'lstm_c_out', 'cellOut')\n _cmt.utils.save_spec(spec, filename)\n\n def predict(self, dataset, output_type='class', output_frequency='per_row'):\n \"\"\"\n Return predictions for ``dataset``, using the trained activity classifier.\n Predictions can be generated as class labels, or as a probability\n vector with probabilities for each class.\n\n The activity classifier generates a single prediction for each\n ``prediction_window`` rows in ``dataset``, per ``session_id``. Thus the\n number of predictions is smaller than the length of ``dataset``. By\n default each prediction is replicated by ``prediction_window`` to return\n a prediction for each row of ``dataset``. Use ``output_frequency`` to\n get the unreplicated predictions.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the features used for model training, but does not require\n a target column. Additional columns are ignored.\n\n output_type : {'class', 'probability_vector'}, optional\n Form of each prediction which is one of:\n\n - 'probability_vector': Prediction probability associated with each\n class as a vector. The probability of the first class (sorted\n alphanumerically by name of the class in the training set) is in\n position 0 of the vector, the second in position 1 and so on.\n - 'class': Class prediction. This returns the class with maximum\n probability.\n\n output_frequency : {'per_row', 'per_window'}, optional\n The frequency of the predictions which is one of:\n\n - 'per_window': Return a single prediction for each\n ``prediction_window`` rows in ``dataset`` per ``session_id``.\n - 'per_row': Convenience option to make sure the number of\n predictions match the number of rows in the dataset. Each\n prediction from the model is repeated ``prediction_window``\n times during that window.\n\n Returns\n -------\n out : SArray | SFrame\n If ``output_frequency`` is 'per_row' return an SArray with predictions\n for each row in ``dataset``.\n If ``output_frequency`` is 'per_window' return an SFrame with\n predictions for ``prediction_window`` rows in ``dataset``.\n\n See Also\n ----------\n create, evaluate, classify\n\n Examples\n --------\n\n .. sourcecode:: python\n\n # One prediction per row\n >>> probability_predictions = model.predict(\n ... data, output_type='probability_vector', output_frequency='per_row')[:4]\n >>> probability_predictions\n\n dtype: array\n Rows: 4\n [array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),\n array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),\n array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086]),\n array('d', [0.01857384294271469, 0.0348394550383091, 0.026018327102065086])]\n\n # One prediction per window\n >>> class_predictions = model.predict(\n ... data, output_type='class', output_frequency='per_window')\n >>> class_predictions\n\n +---------------+------------+-----+\n | prediction_id | session_id |class|\n +---------------+------------+-----+\n | 0 | 3 | 5 |\n | 1 | 3 | 5 |\n | 2 | 3 | 5 |\n | 3 | 3 | 5 |\n | 4 | 3 | 5 |\n | 5 | 3 | 5 |\n | 6 | 3 | 5 |\n | 7 | 3 | 4 |\n | 8 | 3 | 4 |\n | 9 | 3 | 4 |\n | ... | ... | ... |\n +---------------+------------+-----+\n \"\"\"\n _tkutl._raise_error_if_not_sframe(dataset, 'dataset')\n _tkutl._check_categorical_option_type(\n 'output_frequency', output_frequency, ['per_window', 'per_row'])\n _tkutl._check_categorical_option_type(\n 'output_type', output_type, ['probability_vector', 'class'])\n from ._sframe_sequence_iterator import SFrameSequenceIter as _SFrameSequenceIter\n from ._sframe_sequence_iterator import prep_data as _prep_data\n\n from ._sframe_sequence_iterator import _ceil_dev\n from ._mx_model_architecture import _net_params\n from ._mps_model_architecture import _define_model_mps, _predict_mps\n from .._mps_utils import (use_mps as _use_mps,\n ac_weights_mxnet_to_mps as _ac_weights_mxnet_to_mps,)\n\n prediction_window = self.prediction_window\n chunked_dataset, num_sessions = _prep_data(dataset, self.features, self.session_id, prediction_window,\n self._predictions_in_chunk, verbose=False)\n\n # Decide whether to use MPS GPU, MXnet GPU or CPU\n num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=num_sessions)\n use_mps = _use_mps() and num_mxnet_gpus == 0\n\n data_iter = _SFrameSequenceIter(chunked_dataset, len(self.features),\n prediction_window, self._predictions_in_chunk,\n self._recalibrated_batch_size, use_pad=True, mx_output=not use_mps)\n\n\n\n if use_mps:\n arg_params, aux_params = self._pred_model.get_params()\n mps_params = _ac_weights_mxnet_to_mps(arg_params, aux_params, _net_params['lstm_h'])\n mps_pred_model = _define_model_mps(self.batch_size, len(self.features), len(self._target_id_map),\n prediction_window, self._predictions_in_chunk, is_prediction_model=True)\n\n mps_pred_model.load(mps_params)\n\n preds = _predict_mps(mps_pred_model, data_iter)\n else:\n preds = self._pred_model.predict(data_iter).asnumpy()\n\n chunked_data = data_iter.dataset\n\n if output_frequency == 'per_row':\n # Replicate each prediction times prediction_window\n preds = preds.repeat(prediction_window, axis=1)\n\n # Remove predictions for padded rows\n unpadded_len = chunked_data['chunk_len'].to_numpy()\n preds = [p[:unpadded_len[i]] for i, p in enumerate(preds)]\n\n # Reshape from (num_of_chunks, chunk_size, num_of_classes)\n # to (ceil(length / prediction_window), num_of_classes)\n # chunk_size is DIFFERENT between chunks - since padding was removed.\n out = _np.concatenate(preds)\n out = out.reshape((-1, len(self._target_id_map)))\n out = _SArray(out)\n\n if output_type == 'class':\n id_target_map = self._id_target_map\n out = out.apply(lambda c: id_target_map[_np.argmax(c)])\n\n elif output_frequency == 'per_window':\n # Calculate the number of expected predictions and\n # remove predictions for padded data\n unpadded_len = chunked_data['chunk_len'].apply(\n lambda l: _ceil_dev(l, prediction_window)).to_numpy()\n preds = [list(p[:unpadded_len[i]]) for i, p in enumerate(preds)]\n\n out = _SFrame({\n self.session_id: chunked_data['session_id'],\n 'preds': _SArray(preds, dtype=list)\n }).stack('preds', new_column_name='probability_vector')\n\n # Calculate the prediction index per session\n out = out.add_row_number(column_name='prediction_id')\n start_sess_idx = out.groupby(\n self.session_id, {'start_idx': _agg.MIN('prediction_id')})\n start_sess_idx = start_sess_idx.unstack(\n [self.session_id, 'start_idx'], new_column_name='idx')['idx'][0]\n\n if output_type == 'class':\n id_target_map = self._id_target_map\n out['probability_vector'] = out['probability_vector'].apply(\n lambda c: id_target_map[_np.argmax(c)])\n out = out.rename({'probability_vector': 'class'})\n\n return out\n\n def evaluate(self, dataset, metric='auto'):\n \"\"\"\n Evaluate the model by making predictions of target values and comparing\n these to actual values.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the session_id, target and features used for model training.\n Additional columns are ignored.\n\n metric : str, optional\n Name of the evaluation metric. Possible values are:\n\n - 'auto' : Returns all available metrics.\n - 'accuracy' : Classification accuracy (micro average).\n - 'auc' : Area under the ROC curve (macro average)\n - 'precision' : Precision score (macro average)\n - 'recall' : Recall score (macro average)\n - 'f1_score' : F1 score (macro average)\n - 'log_loss' : Log loss\n - 'confusion_matrix' : An SFrame with counts of possible\n prediction/true label combinations.\n - 'roc_curve' : An SFrame containing information needed for an\n ROC curve\n\n Returns\n -------\n out : dict\n Dictionary of evaluation results where the key is the name of the\n evaluation metric (e.g. `accuracy`) and the value is the evaluation\n score.\n\n See Also\n ----------\n create, predict\n\n Examples\n ----------\n .. sourcecode:: python\n\n >>> results = model.evaluate(data)\n >>> print results['accuracy']\n \"\"\"\n\n avail_metrics = ['accuracy', 'auc', 'precision', 'recall',\n 'f1_score', 'log_loss', 'confusion_matrix', 'roc_curve']\n _tkutl._check_categorical_option_type(\n 'metric', metric, avail_metrics + ['auto'])\n\n if metric == 'auto':\n metrics = avail_metrics\n else:\n metrics = [metric]\n\n probs = self.predict(dataset, output_type='probability_vector')\n classes = self.predict(dataset, output_type='class')\n\n ret = {}\n if 'accuracy' in metrics:\n ret['accuracy'] = _evaluation.accuracy(dataset[self.target], classes)\n if 'auc' in metrics:\n ret['auc'] = _evaluation.auc(dataset[self.target], probs)\n if 'precision' in metrics:\n ret['precision'] = _evaluation.precision(dataset[self.target], classes)\n if 'recall' in metrics:\n ret['recall'] = _evaluation.recall(dataset[self.target], classes)\n if 'f1_score' in metrics:\n ret['f1_score'] = _evaluation.f1_score(dataset[self.target], classes)\n if 'log_loss' in metrics:\n ret['log_loss'] = _evaluation.log_loss(dataset[self.target], probs)\n if 'confusion_matrix' in metrics:\n ret['confusion_matrix'] = _evaluation.confusion_matrix(dataset[self.target], classes)\n if 'roc_curve' in metrics:\n ret['roc_curve'] = _evaluation.roc_curve(dataset[self.target], probs)\n\n return ret\n\n def classify(self, dataset, output_frequency='per_row'):\n \"\"\"\n Return a classification, for each ``prediction_window`` examples in the\n ``dataset``, using the trained activity classification model. The output\n SFrame contains predictions as both class labels as well as probabilities \n that the predicted value is the associated label.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the features and session id used for model training, but\n does not require a target column. Additional columns are ignored.\n\n output_frequency : {'per_row', 'per_window'}, optional\n The frequency of the predictions which is one of:\n\n - 'per_row': Each prediction is returned ``prediction_window`` times.\n - 'per_window': Return a single prediction for each \n ``prediction_window`` rows in ``dataset`` per ``session_id``.\n\n Returns\n -------\n out : SFrame\n An SFrame with model predictions i.e class labels and probabilities.\n\n See Also\n ----------\n create, evaluate, predict\n\n Examples\n ----------\n >>> classes = model.classify(data)\n \"\"\"\n _tkutl._check_categorical_option_type(\n 'output_frequency', output_frequency, ['per_window', 'per_row'])\n id_target_map = self._id_target_map\n preds = self.predict(\n dataset, output_type='probability_vector', output_frequency=output_frequency)\n\n if output_frequency == 'per_row':\n return _SFrame({\n 'class': preds.apply(lambda p: id_target_map[_np.argmax(p)]),\n 'probability': preds.apply(_np.max)\n })\n elif output_frequency == 'per_window':\n preds['class'] = preds['probability_vector'].apply(\n lambda p: id_target_map[_np.argmax(p)])\n preds['probability'] = preds['probability_vector'].apply(_np.max)\n preds = preds.remove_column('probability_vector')\n return preds\n\n def predict_topk(self, dataset, output_type='probability', k=3, output_frequency='per_row'):\n \"\"\"\n Return top-k predictions for the ``dataset``, using the trained model.\n Predictions are returned as an SFrame with three columns: `prediction_id`, \n `class`, and `probability`, or `rank`, depending on the ``output_type``\n parameter.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the features and session id used for model training, but\n does not require a target column. Additional columns are ignored.\n\n output_type : {'probability', 'rank'}, optional\n Choose the return type of the prediction:\n\n - `probability`: Probability associated with each label in the prediction.\n - `rank` : Rank associated with each label in the prediction.\n\n k : int, optional\n Number of classes to return for each input example.\n\n output_frequency : {'per_row', 'per_window'}, optional\n The frequency of the predictions which is one of:\n\n - 'per_row': Each prediction is returned ``prediction_window`` times.\n - 'per_window': Return a single prediction for each \n ``prediction_window`` rows in ``dataset`` per ``session_id``.\n\n Returns\n -------\n out : SFrame\n An SFrame with model predictions.\n\n See Also\n --------\n predict, classify, evaluate\n\n Examples\n --------\n >>> pred = m.predict_topk(validation_data, k=3)\n >>> pred\n +---------------+-------+-------------------+\n | row_id | class | probability |\n +---------------+-------+-------------------+\n | 0 | 4 | 0.995623886585 |\n | 0 | 9 | 0.0038311756216 |\n | 0 | 7 | 0.000301006948575 |\n | 1 | 1 | 0.928708016872 |\n | 1 | 3 | 0.0440889261663 |\n | 1 | 2 | 0.0176190119237 |\n | 2 | 3 | 0.996967732906 |\n | 2 | 2 | 0.00151345680933 |\n | 2 | 7 | 0.000637513934635 |\n | 3 | 1 | 0.998070061207 |\n | ... | ... | ... |\n +---------------+-------+-------------------+\n \"\"\"\n _tkutl._check_categorical_option_type('output_type', output_type, ['probability', 'rank'])\n id_target_map = self._id_target_map\n preds = self.predict(\n dataset, output_type='probability_vector', output_frequency=output_frequency)\n\n if output_frequency == 'per_row':\n probs = preds\n elif output_frequency == 'per_window':\n probs = preds['probability_vector']\n\n if output_type == 'rank':\n probs = probs.apply(lambda p: [\n {'class': id_target_map[i],\n 'rank': i}\n for i in reversed(_np.argsort(p)[-k:])]\n )\n elif output_type == 'probability':\n probs = probs.apply(lambda p: [\n {'class': id_target_map[i],\n 'probability': p[i]}\n for i in reversed(_np.argsort(p)[-k:])]\n )\n\n if output_frequency == 'per_row':\n output = _SFrame({'probs': probs})\n output = output.add_row_number(column_name='row_id')\n elif output_frequency == 'per_window':\n output = _SFrame({\n 'probs': probs,\n self.session_id: preds[self.session_id],\n 'prediction_id': preds['prediction_id']\n })\n\n output = output.stack('probs', new_column_name='probs')\n output = output.unpack('probs', column_name_prefix='')\n return output\n\n def __str__(self):\n \"\"\"\n Return a string description of the model to the ``print`` method.\n\n Returns\n -------\n out : string\n A description of the ActivityClassifier.\n \"\"\"\n return self.__repr__()\n\n def __repr__(self):\n \"\"\"\n Print a string description of the model when the model name is entered\n in the terminal.\n \"\"\"\n width = 40\n sections, section_titles = self._get_summary_struct()\n out = _tkutl._toolkit_repr_print(self, sections, section_titles,\n width=width)\n return out\n\n def _get_summary_struct(self):\n \"\"\"\n Returns a structured description of the model, including (where\n relevant) the schema of the training data, description of the training\n data, training statistics, and model hyperparameters.\n\n Returns\n -------\n sections : list (of list of tuples)\n A list of summary sections.\n Each section is a list.\n Each item in a section list is a tuple of the form:\n ('<label>','<field>')\n section_titles: list\n A list of section titles.\n The order matches that of the 'sections' object.\n \"\"\"\n model_fields = [\n ('Number of examples', 'num_examples'),\n ('Number of sessions', 'num_sessions'),\n ('Number of classes', 'num_classes'),\n ('Number of feature columns', 'num_features'),\n ('Prediction window', 'prediction_window'),\n ]\n training_fields = [\n ('Log-likelihood', 'training_log_loss'),\n ('Training time (sec)', 'training_time'),\n ]\n\n section_titles = ['Schema', 'Training summary']\n return([model_fields, training_fields], section_titles)\n" ]
[ [ "numpy.concatenate", "numpy.expand_dims", "numpy.argsort", "numpy.argmax" ] ]
illumi-Zoldyck/Courses-
[ "88a8fc9c1abe22e3dd2989e6cb97a8f229a521b9" ]
[ "Imperial College London - Mathematics for Machine Learning Specialization/Imperial College London - Mathematics for Machine Learning Multivariate Calculus/lagrange-multipliers.py" ]
[ "# salimt\r\n# Import libraries\r\n# Import libraries\r\nimport numpy as np\r\nfrom scipy import optimize\r\n\r\n# First we define the functions, YOU SHOULD IMPLEMENT THESE\r\ndef f (x, y) :\r\n return - np.exp(x - y**2 + x*y)\r\n\r\ndef g (x, y) :\r\n return np.cosh(y) + x - 2\r\n\r\ndef dfdx (x, y) :\r\n return (1 + y) * f (x, y)\r\n\r\ndef dfdy (x, y) :\r\n return (- 2*y + x) * f (x, y)\r\n\r\ndef dgdx (x, y) :\r\n return 1\r\n\r\ndef dgdy (x, y) :\r\n return np.sinh(y)\r\n\r\n# Use the definition of DL from previously.\r\ndef DL (xyλ) :\r\n [x, y, λ] = xyλ\r\n return np.array([\r\n dfdx(x, y) - λ * dgdx(x, y),\r\n dfdy(x, y) - λ * dgdy(x, y),\r\n - g(x, y)\r\n ])\r\n\r\n# To score on this question, the code above should set\r\n# the variables x, y, λ, to the values which solve the\r\n# Langrange multiplier problem.\r\n\r\n# I.e. use the optimize.root method, as you did previously.\r\n\r\nx, y, λ = (1,0,1)\r\nx, y, λ = optimize.root(DL, [x, y, λ]).x\r\nprint(\"x = %g\" % x)\r\nprint(\"y = %g\" % y)\r\nprint(\"λ = %g\" % λ)\r\nprint(\"f(x, y) = %g\" % f(x, y))\r\n" ]
[ [ "scipy.optimize.root", "numpy.cosh", "numpy.sinh", "numpy.exp" ] ]
gholdman1/Graphene-Modeling
[ "61fdf5941f4c43866e846902a21e58e5e1b2a47e" ]
[ "graphenemodeling/experiments/photonics/optical_models.py" ]
[ "import os\nimport numpy as np\nfrom scipy import integrate\n\ndef LorentzTerm(w,w0,gamma,s):\n\t'''\n\tA single term in a Lorentz Oscillator sum.\n\n\tParameters\n\t----------\n\n\tw:\t\tarray-like, frequency at which to evaluate the response (rad/s)\n\n\tw0:\t\tscalar, resonance frequency (rad/s)\n\n\tgamma:\tscalar, width of the resonance (rad/s)\n\n\ts:\t\tscalar, strength of the response (unitless)\n\t'''\n\n\tres = s * w0**2 / (w0**2 - w**2 - 1j*gamma*w)\n\n\treturn res\n\ndef Lorentzian(omega,epsinf,modes):\n\t'''\n\tThe full Lorentzian model.\n\n\tParameters\n\t----------\n\n\tepsinf:\t\tscalar, the infinite frequency permittivity\n\n\tmodes:\t\tarray-like, n x 3 array of responses\n\t\t\t\t\t\t\tFormat:[[w01,gamma1,s1],\n\t\t\t\t\t\t\t\t\t[w02,gamma2,s2],\n\t\t\t\t\t\t\t\t\t...]\n\t'''\n\n\tLorentzSum = np.zeros_like(omega)\n\n\tfor i in range(np.shape(modes)[0]):\n\n\t\tLorentzSum = LorentzSum + LorentzTerm(omega,modes[i][0],\n\t\t\t\t\t\t\t\t\t\t\t\t\tmodes[i][1],\n\t\t\t\t\t\t\t\t\t\t\t\t\tmodes[i][2])\n\n\treturn epsinf + LorentzSum\n\ndef DrudeSommerfeld(omega,omegap,gamma):\n\t'''\n\tThe Drude-Sommerfeld model of a metal dielectric.\n\tApplies when only free-electron contributions need\n\tto be considered.\n\n\tReturns\n\t----------\n\n\teps:\t\tDrude permittivity\n\t'''\n\n\tfrac = omegap**2 / (omega**2 + 1j*gamma*omega)\n\n\teps = 1 - frac\n\n\treturn eps\n\ndef Fano(omega,omega0,gamma,q):\n\t'''\n\tA Fano lineshape\n\n\tParameters\n\t------------\n\n\tomega:\t\tarray-like, frequency (rad/s)\n\n\tomega0:\t\tscalar, resonant frequency (rad/s)\n\n\tgamma:\t\tscalar, loss rate (rad/s)\n\t\n\tq:\t\t\tscalar, Fano asymmetry parameters\n\n\tReferences\n\t---------\n\n\t[1] http://demonstrations.wolfram.com/FanoResonance/.\n\n\t'''\n\n\twReduced = 2 * (omega-omega0)/gamma\n\n\tnumerator = (wReduced+q)**2\n\n\tdenominator = wReduced**2+1\n\n\tfano = numerator / denominator\n\n\treturn fano\n\ndef Brendel(omega,omega0,sd0,omegap,omegat):\n\t'''\n\tA broadened version of the Lorentz model.\n\n\tEquation 3 of Ref 1\n\n\tParameters\n\t----------\n\n\tReturns\n\t----------\n\n\n\tReferences\n\t----------\n\n\t[1] Kischkat, Jan, Sven Peters, Bernd Gruska, Mykhaylo Semtsiv,\n\t\tMikaela Chashnikova, Matthias Klinkmüller, Oliana Fedosenko,\n\t\tet al. 2012.\n\t\t“Mid-Infrared Optical Properties of Thin Films of Aluminum Oxide,\n\t\tTitanium Dioxide, Silicon Dioxide, Aluminum Nitride,\n\t\tand Silicon Nitride.” Applied Optics 51 (28): 6789–98.\n\t\thttps://doi.org/10.1364/AO.51.006789.\n\n\t'''\n\n\tGaussian = lambda x, x0, sd: np.exp((x-x0)**2/(2*sd**2)) / (sd*np.sqrt(2*fc.pi))\n\n\tintegrand= lambda x,omega: Gaussian(x,omega0,sd0) * omegap**2 / (x**2 - omega**2-1j*omegat*omega)\n\n\tintegral = integrate.quad(integrand,-np.inf,np.inf,args=(omega))\n\n\treturn integral\n" ]
[ [ "numpy.zeros_like", "scipy.integrate.quad", "numpy.exp", "numpy.shape", "numpy.sqrt" ] ]
2017qiuju/tensorflow
[ "41948f588ba2852ebae712358117ffa86e32a24b" ]
[ "tensorflow/python/eager/context.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Experimental API for TensorFlow's \"Eager\" mode of execution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport copy\nimport random\nimport threading\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import is_in_graph_mode\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\nGRAPH_MODE = 0\nEAGER_MODE = 1\n\n# Default execution mode.\n_default_mode = GRAPH_MODE\n\n# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,\n# new_device_spec).\n# Note that we do not protect this with a lock and instead rely on python's GIL\n# and the idempotent nature of writes to provide thread safety.\n_device_parsing_cache = {}\n\n_MAXINT32 = 2**31 - 1\n\nDEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT\nDEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN\nDEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT\nDEVICE_PLACEMENT_SILENT_FOR_INT32 = (\n pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)\nSYNC = 0\nASYNC = 1\n\n\nclass _TensorCache(object):\n \"\"\"Simple cache which evicts items based on length in a FIFO manner.\"\"\"\n\n def __init__(self, max_items=256):\n self._data = collections.OrderedDict()\n self._max_items = max_items if max_items else 256\n\n def put(self, key, value):\n self._data[key] = value\n\n if len(self._data) > self._max_items:\n self._data.popitem(last=False)\n\n def get(self, key):\n return self._data.get(key, None)\n\n def flush(self):\n self._data = {}\n\n\n# TODO(agarwal): better name ?\nclass _EagerContext(threading.local):\n \"\"\"Thread local eager context.\"\"\"\n\n def __init__(self):\n super(_EagerContext, self).__init__()\n self.device_spec = pydev.DeviceSpec.from_string(\"\")\n self.device_name = self.device_spec.to_string()\n self.mode = _default_mode\n self.is_eager = _default_mode == EAGER_MODE\n self.scope_name = \"\"\n self.recording_summaries = False\n self.summary_writer_resource = None\n self.scalar_cache = {}\n self.ones_rank_cache = _TensorCache()\n self.zeros_cache = _TensorCache()\n self.execution_mode = None\n\n\nContextSwitch = collections.namedtuple(\n \"ContextSwitch\", [\"is_building_function\", \"enter_context_fn\"])\n\n\n# `_ContextSwitchStack` is a `threading.local` to match the semantics of\n# ``DefaultGraphStack`, which is also a `threading.local`.\nclass _ContextSwitchStack(threading.local):\n \"\"\"A thread-local stack of context switches.\"\"\"\n\n def __init__(self, eager):\n super(_ContextSwitchStack, self).__init__()\n self.stack = []\n if eager:\n # Initialize the stack with a pointer to enter the eager context; this\n # ensures that the fact that eager execution was enabled is propagated\n # across threads, since (1) `enable_eager_execution` modifies a\n # process-level flag (`_default_mode`) and (2) `__init__` is called each\n # time a threading.local object is used in a separate thread.\n self.push(is_building_function=False, enter_context_fn=eager_mode)\n\n def push(self, is_building_function, enter_context_fn):\n \"\"\"Push metadata about a context switch onto the stack.\n\n A context switch can take one of two forms: installing a graph as the\n default graph, or entering the eager context. For each context switch,\n we record whether or not the entered context is building a function.\n\n Args:\n is_building_function: (bool.) Whether the context is building a function.\n enter_context_fn: (function.) A callable that executes the context switch.\n For example, `graph.as_default` or `eager_mode`.\n \"\"\"\n\n self.stack.append(\n ContextSwitch(is_building_function, enter_context_fn))\n\n def pop(self):\n \"\"\"Pop the stack.\"\"\"\n\n self.stack.pop()\n\n\n# TODO(agarwal): rename to EagerContext / EagerRuntime ?\n# TODO(agarwal): consider keeping the corresponding Graph here.\nclass Context(object):\n \"\"\"Environment in which eager operations execute.\"\"\"\n\n # TODO(agarwal): create and link in some documentation for `execution_mode`.\n # pylint: disable=redefined-outer-name\n def __init__(self,\n config=None,\n device_policy=None,\n execution_mode=None,\n server_def=None):\n \"\"\"Creates a new Context.\n\n Args:\n config: (Optional.) A `ConfigProto` protocol buffer with configuration\n options for the Context. Note that a lot of these options may be\n currently unimplemented or irrelevant when eager execution is enabled.\n device_policy: (Optional.) What policy to use when trying to run an\n operation on a device with inputs which are not on that device.\n When set to None, an appropriate value will be picked automatically.\n The value picked may change between TensorFlow releases.\n\n Defaults to tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32.\n Valid values:\n - tfe.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is\n not correct.\n - tfe.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the\n right device but raises a warning.\n - tfe.DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might\n hide performance problems.\n - tfe.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,\n raising errors on the other ones.\n execution_mode: (Optional.) Policy controlling how operations dispatched\n are actually executed. When set to None, an appropriate value will be\n picked automatically. The value picked may change between TensorFlow\n releases.\n Valid values:\n - tf.contrib.eager.SYNC: executes each operation synchronously.\n - tf.contrib.eager.ASYNC: executes each operation asynchronously. These\n operations may return \"non-ready\" handles.\n server_def: (Optional.) A tensorflow::ServerDef proto.\n Enables execution on remote devices. GrpcServers need to be started by\n creating an identical server_def to this, and setting the appropriate\n task_indexes, so that the servers can communicate. It will then be\n possible to execute operations on remote devices.\n\n Raises:\n ValueError: If execution_mode is not valid.\n \"\"\"\n self._eager_context = _EagerContext()\n self._context_switches = _ContextSwitchStack(self.executing_eagerly())\n self._context_handle = None\n self._context_devices = None\n self._post_execution_callbacks = []\n self._config = config\n self._seed = None\n self._initialize_lock = threading.Lock()\n self._device_policy = device_policy\n if execution_mode not in (None, SYNC, ASYNC):\n raise ValueError(\n \"execution_mode should be None/SYNC/ASYNC. Got %s\" % execution_mode)\n if execution_mode is None:\n execution_mode = SYNC\n self._execution_mode = execution_mode\n self._server_def = server_def\n\n # pylint: enable=redefined-outer-name\n\n def _set_global_seed(self, seed):\n \"\"\"Set a global eager mode seed for random ops.\"\"\"\n self._seed = seed\n self._rng = random.Random(self._seed)\n # Also clear the kernel cache, to reset any existing seeds\n if self._context_handle is not None:\n pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)\n\n def _internal_operation_seed(self):\n \"\"\"Returns a fake operation seed.\n\n In eager mode, user shouldn't set or depend on operation seed.\n Here, we generate a random seed based on global seed to make\n operation's randomness different and depend on the global seed.\n\n Returns:\n A fake operation seed based on global seed.\n \"\"\"\n return self._rng.randint(0, _MAXINT32)\n\n def _initialize_devices(self):\n \"\"\"Helper to initialize devices.\"\"\"\n # Store list of devices\n self._context_devices = []\n device_list = pywrap_tensorflow.TFE_ContextListDevices(\n self._context_handle)\n try:\n self._num_gpus = 0\n for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):\n dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i)\n self._context_devices.append(pydev.canonical_name(dev_name))\n dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i)\n if dev_type == \"GPU\":\n self._num_gpus += 1\n\n finally:\n pywrap_tensorflow.TF_DeleteDeviceList(device_list)\n\n def _initialize_handle_and_devices(self):\n \"\"\"Initialize handle and devices.\"\"\"\n with self._initialize_lock:\n if self._context_handle is not None:\n return\n assert self._context_devices is None\n opts = pywrap_tensorflow.TFE_NewContextOptions()\n try:\n if self._config is not None:\n config_str = self._config.SerializeToString()\n pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str)\n if self._device_policy is not None:\n pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(\n opts, self._device_policy)\n if self._execution_mode == ASYNC:\n pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True)\n self._context_handle = pywrap_tensorflow.TFE_NewContext(opts)\n finally:\n pywrap_tensorflow.TFE_DeleteContextOptions(opts)\n if self._server_def is not None:\n server_def_str = self._server_def.SerializeToString()\n pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle, 600,\n server_def_str)\n\n self._initialize_devices()\n\n def _clear_caches(self):\n self.scalar_cache().clear()\n self.ones_rank_cache().flush()\n self.zeros_cache().flush()\n\n def set_server_def(self, server_def, keep_alive_secs=600):\n \"\"\"Allow setting a server_def on the context.\n\n When a server def is replaced, it effectively clears a bunch of caches\n within the context. If you attempt to use a tensor object that was pointing\n to a tensor on the remote device, it will raise an error.\n\n Args:\n server_def: A tensorflow::ServerDef proto.\n Enables execution on remote devices.\n keep_alive_secs: Num. seconds after which the remote end will hang up.\n As long as the client is still alive, the server state for the context\n will be kept alive. If the client is killed (or there is some failure),\n the server will clean up its context keep_alive_secs after the final RPC\n it receives.\n\n Raises:\n ValueError: if server_def is None.\n \"\"\"\n if not server_def:\n raise ValueError(\"server_def is None.\")\n if not self._context_handle:\n self._server_def = server_def\n else:\n server_def_str = server_def.SerializeToString()\n pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle,\n keep_alive_secs, server_def_str)\n\n # Clear all the caches in case there are remote tensors in them.\n self._clear_caches()\n\n self._initialize_devices()\n\n @property\n def _handle(self):\n ctx = self._context_handle\n if ctx is None:\n self._initialize_handle_and_devices()\n return self._context_handle\n else:\n return ctx\n\n @property\n def _devices(self):\n devices = self._context_devices\n if devices is None:\n self._initialize_handle_and_devices()\n return self._context_devices\n else:\n return devices\n\n def __str__(self):\n if self._context_handle is None:\n return \"Eager TensorFlow Context. Devices currently uninitialized.\"\n else:\n devices = self._devices\n lines = [\"Eager TensorFlow Context with %d devices\" % (len(devices))]\n for i, d in enumerate(devices):\n lines.append(\" Device %d: %s\" % (i, d))\n return \"\\n\".join(lines)\n\n @tf_contextlib.contextmanager\n def _mode(self, mode):\n \"\"\"A context manager to allow setting the mode to EAGER/GRAPH.\"\"\"\n ctx = self._eager_context\n old_mode = ctx.mode\n old_is_eager = ctx.is_eager\n ctx.mode = mode\n ctx.is_eager = mode == EAGER_MODE\n if mode == EAGER_MODE:\n # Entering graph mode does not provide us with sufficient information to\n # record a context switch; graph-based context switches are only logged\n # when a graph is registered as the default graph.\n self.context_switches.push(False, eager_mode)\n try:\n yield\n finally:\n ctx.is_eager = old_is_eager\n ctx.mode = old_mode\n if mode == EAGER_MODE:\n self.context_switches.pop()\n\n def executing_eagerly(self):\n \"\"\"Returns True if current thread has eager executing enabled.\"\"\"\n return self._eager_context.is_eager\n\n def scalar_cache(self):\n \"\"\"Per-device cache for scalars.\"\"\"\n return self._eager_context.scalar_cache\n\n def ones_rank_cache(self):\n \"\"\"Per-device cache for scalars.\"\"\"\n return self._eager_context.ones_rank_cache\n\n def zeros_cache(self):\n \"\"\"Per-device cache for scalars.\"\"\"\n return self._eager_context.zeros_cache\n\n @property\n def scope_name(self):\n \"\"\"Returns scope name for the current thread.\"\"\"\n return self._eager_context.scope_name\n\n @scope_name.setter\n def scope_name(self, s):\n \"\"\"Sets scope name for the current thread.\"\"\"\n self._eager_context.scope_name = s\n\n @property\n def summary_writer_resource(self):\n \"\"\"Returns summary writer resource.\"\"\"\n return self._eager_context.summary_writer_resource\n\n @summary_writer_resource.setter\n def summary_writer_resource(self, resource):\n \"\"\"Sets summary writer resource.\"\"\"\n self._eager_context.summary_writer_resource = resource\n\n @property\n def device_name(self):\n \"\"\"Returns the device name for the current thread.\"\"\"\n return self._eager_context.device_name\n\n @property\n def device_spec(self):\n \"\"\"Returns the device spec for the current thread.\"\"\"\n return self._eager_context.device_spec\n\n @tf_contextlib.contextmanager\n def device(self, name):\n \"\"\"Context-manager to force placement of operations and Tensors on a device.\n\n Args:\n name: Name of the device or None to get default placement.\n\n Yields:\n Nothing.\n\n Raises:\n ValueError: If name is not a string or is an invalid device name.\n \"\"\"\n eager_context = self._eager_context\n old_device_name = eager_context.device_name\n old_device_spec = eager_context.device_spec\n cache_key = (old_device_name, name)\n try:\n new_device_name, new_device_spec = _device_parsing_cache[cache_key]\n except TypeError:\n # Error while trying to compute the cache key.\n raise ValueError(\"Expecting a string device name. Got %s(%s)\" %\n (type(name), name))\n except KeyError:\n # Handle a cache miss.\n if name is not None:\n if not isinstance(name, str):\n raise ValueError(\"Expecting a string device name. Got %s(%s)\" %\n (type(name), name))\n device_spec = pydev.DeviceSpec.from_string(name)\n if old_device_name:\n new_device_spec = copy.copy(old_device_spec)\n else:\n new_device_spec = pydev.DeviceSpec.from_string(\n \"/job:localhost/replica:0/task:0/device:CPU:0\")\n new_device_spec.merge_from(device_spec)\n else:\n new_device_spec = pydev.DeviceSpec.from_string(\"\")\n new_device_name = new_device_spec.to_string()\n _device_parsing_cache[cache_key] = (new_device_name, new_device_spec)\n\n try:\n eager_context.device_name = new_device_name\n eager_context.device_spec = new_device_spec\n yield\n finally:\n eager_context.device_name = old_device_name\n eager_context.device_spec = old_device_spec\n\n def devices(self):\n \"\"\"List of the names of devices available to execute operations.\"\"\"\n return self._devices\n\n def get_execution_mode(self):\n mode = self._eager_context.execution_mode\n if mode is None:\n mode = self._execution_mode\n return mode\n\n def set_execution_mode(self, mode):\n \"\"\"Sets execution mode for current thread.\"\"\"\n if mode not in (None, SYNC, ASYNC):\n raise ValueError(\n \"Execution mode should be None/SYNC/ASYNC. Got %s\" % mode)\n if mode is None:\n mode = SYNC\n self._eager_context.execution_mode = mode\n pywrap_tensorflow.TFE_ContextSetAsyncForThread(self._handle, mode == ASYNC)\n\n @tf_contextlib.contextmanager\n def execution_mode(self, mode):\n \"\"\"Context manager for setting execution mode for current thread.\"\"\"\n old_mode = self.get_execution_mode()\n try:\n self.set_execution_mode(mode)\n yield\n finally:\n self.set_execution_mode(old_mode)\n\n def async_wait(self):\n \"\"\"Waits for ops dispatched in ASYNC mode to finish.\"\"\"\n pywrap_tensorflow.TFE_ContextAsyncWait(self._handle)\n\n def async_clear_error(self):\n \"\"\"Clears errors raised during ASYNC execution.\"\"\"\n pywrap_tensorflow.TFE_ContextAsyncClearError(self._handle)\n\n def num_gpus(self):\n \"\"\"The number of GPUs available to execute operations.\"\"\"\n self._initialize_handle_and_devices()\n return self._num_gpus\n\n def add_function(self, fn):\n \"\"\"Add a function definition to the context.\n\n Once added, the function (identified by its name) can be executed like any\n other operation.\n\n Args:\n fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).\n \"\"\"\n pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn)\n\n def add_function_def(self, fdef):\n \"\"\"Add a function definition to the context.\n\n Once added, the function (identified by its name) can be executed like any\n other operation.\n\n Args:\n fdef: A FunctionDef protocol buffer message.\n \"\"\"\n fdef_string = fdef.SerializeToString()\n pywrap_tensorflow.TFE_ContextAddFunctionDef(\n self._handle, fdef_string, len(fdef_string))\n\n def add_post_execution_callback(self, callback):\n \"\"\"Add a post-execution callback to the context.\n\n A post-execution callback is invoked immediately after an eager operation or\n function has finished execution, providing access to the op's type, name\n input and output tensors. Multiple execution callbacks can be added, in\n which case the callbacks will be invoked in the order in which they are\n added.\n\n Args:\n callback: a callable of the signature\n `f(op_type, op_name, attrs, inputs, outputs)`.\n `op_type` is the type of the operation that was just executed (e.g.,\n `MatMul`).\n `op_name` is the name of the operation that has was just executed. This\n name is set by the client who created the operation and can be `None` if\n it is unset.\n `attrs` contains the attributes of the operation as a `tuple` of\n alternating attribute names and attribute values.\n `inputs` is the `list` of input `Tensor`(s) to the op.\n `outputs` is the `list` of output `Tensor`(s) from the op.\n Return value(s) from the callback are ignored.\n \"\"\"\n # TODO(cais): (b/64674139) Allow access to function-internal operations.\n self._post_execution_callbacks.append(callback)\n\n def clear_post_execution_callbacks(self):\n \"\"\"Clear all post-execution callbacks added to the context.\"\"\"\n del self._post_execution_callbacks[:]\n\n @property\n def post_execution_callbacks(self):\n \"\"\"Get the list of post-execution callbacks added to the context.\"\"\"\n return self._post_execution_callbacks\n\n def enable_run_metadata(self):\n \"\"\"Enables tracing of op execution via RunMetadata.\n\n To retrieve the accumulated metadata call context.export_run_metadata()\n and to stop tracing call context.disable_run_metadata().\n \"\"\"\n pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle)\n\n @tf_contextlib.contextmanager\n def device_policy(self, policy):\n handle = self._handle\n old = pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(handle)\n pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(\n handle, policy)\n try:\n yield\n finally:\n pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(\n handle, old)\n\n def disable_run_metadata(self):\n \"\"\"Disables tracing of op execution via RunMetadata.\"\"\"\n if not self._context_handle:\n return\n pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)\n\n def export_run_metadata(self):\n \"\"\"Returns a RunMetadata proto with accumulated information.\n\n The returned protocol buffer contains information since the most recent call\n to either enable_run_metadata or export_run_metadata.\n\n Returns:\n A RunMetadata protocol buffer. Or None if not enabled.\n \"\"\"\n if not self._context_handle:\n return None\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tensorflow.TFE_ContextExportRunMetadata(\n self._context_handle, buffer_)\n proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)\n run_metadata = config_pb2.RunMetadata()\n run_metadata.ParseFromString(compat.as_bytes(proto_data))\n return run_metadata\n\n @property\n def context_switches(self):\n \"\"\"Returns a stack of context switches.\"\"\"\n return self._context_switches\n\n def start_step(self):\n pywrap_tensorflow.TFE_ContextStartStep(self._handle)\n\n def end_step(self):\n pywrap_tensorflow.TFE_ContextEndStep(self._handle)\n\n_context = None\n_context_lock = threading.Lock()\n\n\ndef _initialize_context():\n global _context\n with _context_lock:\n if _context is None:\n _context = Context()\n\n\ndef context():\n \"\"\"Returns a singleton context object.\"\"\"\n if _context is None:\n _initialize_context()\n return _context\n\n\ndef context_safe():\n return _context\n\n\n# TODO(agarwal): remove this.\ndef get_default_context():\n \"\"\"Same as context.\"\"\"\n if _context is None:\n _initialize_context()\n return _context\n\n\ndef set_global_seed(seed):\n \"\"\"Sets the eager mode seed.\"\"\"\n context()._set_global_seed(seed) # pylint: disable=protected-access\n\n\ndef global_seed():\n \"\"\"Returns the eager mode seed.\"\"\"\n return context()._seed # pylint: disable=protected-access\n\n\ndef internal_operation_seed():\n \"\"\"Returns the operation seed generated based on global seed.\"\"\"\n return context()._internal_operation_seed() # pylint: disable=protected-access\n\n\n@tf_export(\"executing_eagerly\")\ndef executing_eagerly():\n \"\"\"Returns True if the current thread has eager execution enabled.\n\n Eager execution is typically enabled via `tf.enable_eager_execution`,\n but may also be enabled within the context of a Python function via\n tf.contrib.eager.py_func.\n \"\"\"\n return context().executing_eagerly()\n\n\ndef in_eager_mode():\n \"\"\"Use executing_eagerly() instead. This function will be removed.\"\"\"\n return executing_eagerly()\n\n\ndef graph_mode():\n \"\"\"Context-manager to disable eager execution for the current thread.\"\"\"\n return context()._mode(GRAPH_MODE) # pylint: disable=protected-access\n\n\ndef eager_mode():\n \"\"\"Context-manager to enable eager execution for the current thread.\"\"\"\n return context()._mode(EAGER_MODE) # pylint: disable=protected-access\n\n\n# TODO(agarwal): get rid of this and use ops.name_scope instead.\[email protected]\ndef namescope(name):\n \"\"\"ContextManager for creating hierarchical name scopes.\"\"\"\n ctx = context()\n old_name = ctx.scope_name\n ctx.scope_name = \"%s/%s\" % (old_name, name) if old_name else name\n try:\n yield\n finally:\n ctx.scope_name = old_name\n\n\ndef scope_name():\n \"\"\"Name of the current scope.\"\"\"\n return context().scope_name\n\n\ndef device(name):\n \"\"\"Context-manager to force placement of operations and Tensors on a device.\n\n Example:\n ```python\n with tfe.device('gpu:0'):\n with tfe.device('cpu:0'):\n shape = tf.constant([], dtype=tf.int32)\n x = tf.truncated_normal(shape, tf.float32)\n ```\n will ensure that the `shape` Tensor is on CPU but the `truncated_normal`\n operation runs on GPU 0.\n\n Args:\n name: Name of the device (see context().devices()), or None to\n perform automatic placement.\n\n Returns:\n Context manager for setting the device.\n \"\"\"\n return context().device(name)\n\n\ndef list_devices():\n \"\"\"List the names of the available devices.\n\n Returns:\n Names of the available devices, as a `list`.\n \"\"\"\n return context().devices()\n\n\ndef set_execution_mode(mode):\n \"\"\"Sets execution mode for the current thread.\"\"\"\n context().set_execution_mode(mode)\n\n\ndef execution_mode(mode):\n \"\"\"Context manager for setting execution mode for current thread.\"\"\"\n return context().execution_mode(mode)\n\n\ndef async_wait():\n \"\"\"Waits for ops dispatched in ASYNC mode to finish.\"\"\"\n return context().async_wait()\n\n\ndef async_clear_error():\n \"\"\"Clears errors raised during ASYNC execution mode.\"\"\"\n return context().async_clear_error()\n\n\ndef num_gpus():\n \"\"\"Get the number of available GPU devices.\n\n Returns:\n The number of available GPU devices.\n \"\"\"\n return context().num_gpus()\n\n\ndef enable_run_metadata():\n \"\"\"Enables tracing of op execution via RunMetadata.\n\n To retrieve the accumulated metadata call context.export_run_metadata()\n and to stop tracing call context.disable_run_metadata().\n \"\"\"\n context().enable_run_metadata()\n\n\ndef disable_run_metadata():\n \"\"\"Disables tracing of op execution via RunMetadata.\"\"\"\n context().disable_run_metadata()\n\n\ndef export_run_metadata():\n \"\"\"Returns a RunMetadata proto with accumulated information.\n\n The returned protocol buffer contains information since the most recent call\n to either enable_run_metadata or export_run_metadata.\n\n Returns:\n A RunMetadata protocol buffer.\n \"\"\"\n return context().export_run_metadata()\n\n\ndef set_server_def(server_def):\n context().set_server_def(server_def)\n\n\n# Not every user creates a Context via context.context()\n# (for example, enable_eager_execution in python/framework/ops.py),\n# but they do all import this file. Note that IS_IN_GRAPH_MODE and\n# in_graph_mode are both parameterless functions.\ndef _tmp_in_graph_mode():\n return not executing_eagerly()\n\n\nis_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode\n" ]
[ [ "tensorflow.python.pywrap_tensorflow.TF_DeviceListCount", "tensorflow.python.pywrap_tensorflow.TFE_ContextOptionsSetConfig", "tensorflow.python.pywrap_tensorflow.TFE_ContextSetAsyncForThread", "tensorflow.python.pywrap_tensorflow.TFE_ContextOptionsSetAsync", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.framework.c_api_util.tf_buffer", "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.framework.device.DeviceSpec.from_string", "tensorflow.python.pywrap_tensorflow.TFE_NewContextOptions", "tensorflow.python.pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy", "tensorflow.python.pywrap_tensorflow.TF_GetBuffer", "tensorflow.python.pywrap_tensorflow.TFE_NewContext", "tensorflow.python.pywrap_tensorflow.TF_DeviceListType", "tensorflow.python.pywrap_tensorflow.TFE_ContextEnableRunMetadata", "tensorflow.python.pywrap_tensorflow.TF_DeviceListName", "tensorflow.python.pywrap_tensorflow.TFE_ContextAsyncWait", "tensorflow.python.pywrap_tensorflow.TFE_DeleteContextOptions", "tensorflow.python.framework.device.canonical_name", "tensorflow.python.pywrap_tensorflow.TFE_ContextSetServerDef", "tensorflow.python.pywrap_tensorflow.TFE_ContextListDevices", "tensorflow.python.pywrap_tensorflow.TFE_ContextDisableRunMetadata", "tensorflow.python.pywrap_tensorflow.TFE_ContextAsyncClearError", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.pywrap_tensorflow.TFE_ContextClearCaches", "tensorflow.python.pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy", "tensorflow.python.pywrap_tensorflow.TF_DeleteDeviceList", "tensorflow.python.pywrap_tensorflow.TFE_ContextExportRunMetadata", "tensorflow.python.pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy", "tensorflow.python.pywrap_tensorflow.TFE_ContextAddFunction", "tensorflow.python.pywrap_tensorflow.TFE_ContextEndStep", "tensorflow.python.pywrap_tensorflow.TFE_ContextStartStep" ] ]
wangg12/mx-DeepIM
[ "b99e33193ef5b0927d79ca1d7e7d40ca3373c98c" ]
[ "deepim/config/config.py" ]
[ "# --------------------------------------------------------\n# Deep Iterative Matching Network\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Written by Yi Li, Gu Wang\n# --------------------------------------------------------\nfrom __future__ import print_function, division\nimport yaml\nimport numpy as np\nfrom easydict import EasyDict as edict\n\nconfig = edict()\nconfig.ModelNet = False\nconfig.modelnet_root = \"./data/ModelNet/\"\nconfig.MXNET_VERSION = \"\"\n\nconfig.output_path = \"\"\nconfig.symbol = \"\"\nconfig.SCALES = [(480, 640)] # first is scale (the shorter side); second is max size\n\n# default training\nconfig.default = edict()\nconfig.default.frequent = 1000\nconfig.default.kvstore = \"device\"\n\n# network related params\nconfig.network = edict()\nconfig.network.FIXED_PARAMS = []\nconfig.network.PIXEL_MEANS = np.array([0, 0, 0])\nconfig.network.pretrained = \"../model/pretrained_model/flownet\"\nconfig.network.pretrained_epoch = 0\nconfig.network.init_from_flownet = False\nconfig.network.skip_initialize = False\nconfig.network.INPUT_DEPTH = False\nconfig.network.INPUT_MASK = False\nconfig.network.PRED_MASK = False\nconfig.network.PRED_FLOW = False\nconfig.network.STANDARD_FLOW_REP = False\nconfig.network.TRAIN_ITER = False\nconfig.network.TRAIN_ITER_SIZE = 1\nconfig.network.REGRESSOR_NUM = 1 # 1 or num_classes\nconfig.network.ROT_TYPE = \"QUAT\" # 'QUAT', 'EULER'\nconfig.network.ROT_COORD = \"CAMERA\"\nconfig.network.TRANS_LOSS_TYPE = \"L2\" # 'L1', 'smooth_L1'\n\n# dataset related params\nconfig.dataset = edict()\nconfig.dataset.dataset = \"LINEMOD_REFINE\"\nconfig.dataset.dataset_path = \"./data/LINEMOD_6D/LINEMOD_converted/LINEMOD_refine\"\nconfig.dataset.image_set = \"train_ape\"\nconfig.dataset.root_path = \"./data\"\nconfig.dataset.test_image_set = \"val_ape\"\nconfig.dataset.model_dir = \"\"\nconfig.dataset.model_file = (\n \"./data/ModelNet/render_v1/models.txt\"\n) # optional, if too many classes\nconfig.dataset.pose_file = (\n \"./data/ModelNet/render_v1/poses.txt\"\n) # optional, if too many classes\n\nconfig.dataset.DEPTH_FACTOR = 1000\nconfig.dataset.NORMALIZE_FLOW = 1.0\nconfig.dataset.NORMALIZE_3D_POINT = 0.1\nconfig.dataset.INTRINSIC_MATRIX = np.array(\n [[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]]\n)\nconfig.dataset.ZNEAR = 0.25\nconfig.dataset.ZFAR = 6.0\n\nconfig.dataset.class_name_file = \"\"\nconfig.dataset.class_name = []\nconfig.dataset.trans_means = np.array([0.0, 0.0, 0.0])\nconfig.dataset.trans_stds = np.array([1.0, 1.0, 1.0])\n\nconfig.TRAIN = edict()\nconfig.TRAIN.optimizer = \"sgd\"\nconfig.TRAIN.warmup = False\nconfig.TRAIN.warmup_lr = 0\nconfig.TRAIN.warmup_step = 0\nconfig.TRAIN.begin_epoch = 0\nconfig.TRAIN.end_epoch = 0\nconfig.TRAIN.lr = 0.0001\nconfig.TRAIN.lr_step = \"4, 6\"\nconfig.TRAIN.momentum = 0.975\nconfig.TRAIN.wd = 0.0005\nconfig.TRAIN.model_prefix = \"deepim\"\nconfig.TRAIN.RESUME = False\nconfig.TRAIN.SHUFFLE = True\nconfig.TRAIN.BATCH_PAIRS = 1\nconfig.TRAIN.FLOW_WEIGHT_TYPE = \"all\" # 'all', 'viz', 'valid'\n# config.TRAIN.VISUALIZE = False\nconfig.TRAIN.TENSORBOARD_LOG = False\nconfig.TRAIN.INIT_MASK = \"box_gt\" # mask_gt, box_gt\nconfig.TRAIN.UPDATE_MASK = \"box_gt\"\nconfig.TRAIN.MASK_DILATE = False\nconfig.TRAIN.REPLACE_OBSERVED_BG_RATIO = 0.0 # replace train images' bg with VOC\n\nconfig.TEST = edict()\nconfig.TEST.BATCH_PAIRS = 1\nconfig.TEST.test_epoch = 0\nconfig.TEST.VISUALIZE = False\nconfig.TEST.test_iter = 1\nconfig.TEST.INIT_MASK = \"box_rendered\"\nconfig.TEST.UPDATE_MASK = \"box_rendered\"\nconfig.TEST.FAST_TEST = False\nconfig.TEST.PRECOMPUTED_ICP = False # evaluate with ICP refinement\nconfig.TEST.BEFORE_ICP = False # evaluate without ICP refinement\n\n# for iterative train\n# se3 distance loss\nconfig.train_iter = edict()\nconfig.train_iter.SE3_DIST_LOSS = False\nconfig.train_iter.LW_ROT = 0.0\nconfig.train_iter.LW_TRANS = 0.0\nconfig.train_iter.TRANS_LOSS_TYPE = \"L2\" # 'L1', 'smooth_L1'\nconfig.train_iter.TRANS_SMOOTH_L1_SCALAR = 3.0\n# se3 point matching loss\nconfig.train_iter.SE3_PM_LOSS = False\nconfig.train_iter.LW_PM = 0.0\nconfig.train_iter.SE3_PM_LOSS_TYPE = \"L1\"\nconfig.train_iter.SE3_PM_SL1_SCALAR = 1.0\nconfig.train_iter.NUM_3D_SAMPLE = -1\n# flow loss\nconfig.train_iter.LW_FLOW = 0.0\n# segmentation loss\nconfig.train_iter.LW_MASK = 0.0\n\n\ndef update_config(config_file):\n exp_config = None\n with open(config_file) as f:\n exp_config = edict(yaml.load(f))\n for k, v in exp_config.items():\n if k in config:\n if isinstance(v, dict):\n if k == \"TRAIN\":\n if \"BBOX_WEIGHTS\" in v:\n v[\"BBOX_WEIGHTS\"] = np.array(v[\"BBOX_WEIGHTS\"])\n elif k == \"network\":\n if \"PIXEL_MEANS\" in v:\n v[\"PIXEL_MEANS\"] = np.array(v[\"PIXEL_MEANS\"])\n elif k == \"dataset\":\n # make elegant later\n if \"INTRINSIC_MATRIX\" in v:\n v[\"INTRINSIC_MATRIX\"] = (\n np.array(v[\"INTRINSIC_MATRIX\"])\n .reshape([3, 3])\n .astype(np.float32)\n )\n if \"trans_means\" in v:\n v[\"trans_means\"] = (\n np.array(v[\"trans_means\"]).flatten().astype(np.float32)\n )\n if \"trans_stds\" in v:\n v[\"trans_stds\"] = (\n np.array(v[\"trans_stds\"]).flatten().astype(np.float32)\n )\n if \"class_name_file\" in v:\n if v[\"class_name_file\"] != \"\":\n with open(v[\"class_name_file\"]) as f:\n v[\"class_name\"] = [\n line.strip() for line in f.readlines()\n ]\n for vk, vv in v.items():\n config[k][vk] = vv\n else:\n if k == \"SCALES\":\n config[k][0] = tuple(v)\n else:\n config[k] = v\n else:\n raise ValueError(\"key: {} does not exist in config.py\".format(k))\n" ]
[ [ "numpy.array" ] ]
timpal0l/transformers
[ "d86d57faa3b6511c6e4d9139535d77b695b9af8a" ]
[ "src/transformers/models/layoutlm/modeling_layoutlm.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch LayoutLM model. \"\"\"\n\n\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_outputs import (\n BaseModelOutputWithCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n MaskedLMOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_layoutlm import LayoutLMConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"LayoutLMConfig\"\n_TOKENIZER_FOR_DOC = \"LayoutLMTokenizer\"\n\nLAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"layoutlm-base-uncased\",\n \"layoutlm-large-uncased\",\n]\n\n\nLayoutLMLayerNorm = torch.nn.LayerNorm\n\n\nclass LayoutLMEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super(LayoutLMEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)\n self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)\n self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)\n self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n self.LayerNorm = LayoutLMLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(\n self,\n input_ids=None,\n bbox=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n words_embeddings = inputs_embeds\n position_embeddings = self.position_embeddings(position_ids)\n try:\n left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])\n upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])\n right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])\n lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])\n except IndexError as e:\n raise IndexError(\"The :obj:`bbox`coordinate values should be within 0-1000 range.\") from e\n\n h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])\n w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = (\n words_embeddings\n + position_embeddings\n + left_position_embeddings\n + upper_position_embeddings\n + right_position_embeddings\n + lower_position_embeddings\n + h_position_embeddings\n + w_position_embeddings\n + token_type_embeddings\n )\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->LayoutLM\nclass LayoutLMSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n if encoder_hidden_states is not None:\n mixed_key_layer = self.key(encoder_hidden_states)\n mixed_value_layer = self.value(encoder_hidden_states)\n attention_mask = encoder_attention_mask\n else:\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in LayoutLMModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->LayoutLM\nclass LayoutLMSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->LayoutLM\nclass LayoutLMAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = LayoutLMSelfAttention(config)\n self.output = LayoutLMSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass LayoutLMIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM\nclass LayoutLMOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->LayoutLM\nclass LayoutLMLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = LayoutLMAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = LayoutLMAttention(config)\n self.intermediate = LayoutLMIntermediate(config)\n self.output = LayoutLMOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->LayoutLM\nclass LayoutLMEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([LayoutLMLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False):\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]\n if v is not None\n )\n return BaseModelOutputWithCrossAttentions(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass LayoutLMPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM\nclass LayoutLMPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->LayoutLM\nclass LayoutLMLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = LayoutLMPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM\nclass LayoutLMOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = LayoutLMLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass LayoutLMPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = LayoutLMConfig\n base_model_prefix = \"layoutlm\"\n authorized_missing_keys = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, LayoutLMLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nLAYOUTLM_START_DOCSTRING = r\"\"\"\n The LayoutLM model was proposed in `LayoutLM: Pre-training of Text and Layout for Document Image Understanding\n <https://arxiv.org/abs/1912.13318>`__ by....\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config (:class:`~transformers.LayoutLMConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nLAYOUTLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.LayoutLMTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n bbox (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Bounding Boxes of each input sequence tokens. Selected in the range ``[0, config.max_2d_position_embeddings\n - 1]``.\n\n `What are bboxes? <../glossary.html#position-ids>`_\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for\n tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1`\n indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned\n tensors for more detail.\n return_dict (:obj:`bool`, `optional`):\n If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a\n plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top.\",\n LAYOUTLM_START_DOCSTRING,\n)\nclass LayoutLMModel(LayoutLMPreTrainedModel):\n\n config_class = LayoutLMConfig\n pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST\n base_model_prefix = \"layoutlm\"\n\n def __init__(self, config):\n super(LayoutLMModel, self).__init__(config)\n self.config = config\n\n self.embeddings = LayoutLMEmbeddings(config)\n self.encoder = LayoutLMEncoder(config)\n self.pooler = LayoutLMPooler(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"layoutlm-base-uncased\",\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n bbox=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n \"\"\"\n input_ids (torch.LongTensor of shape (batch_size, sequence_length)):\n Indices of input sequence tokens in the vocabulary.\n attention_mask (torch.FloatTensor of shape (batch_size, sequence_length), optional):\n Mask to avoid performing attention on padding token indices. Mask values selected in [0, 1]: 1 for tokens\n that are NOT MASKED, 0 for MASKED tokens.\n token_type_ids (torch.LongTensor of shape (batch_size, sequence_length), optional):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in [0, 1]:\n 0 corresponds to a sentence A token, 1 corresponds to a sentence B token\n position_ids (torch.LongTensor of shape (batch_size, sequence_length), optional):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range [0,\n config.max_position_embeddings - 1].\n head_mask (torch.FloatTensor of shape (num_heads,) or (num_layers, num_heads), optional):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in [0, 1]: 1 indicates\n the head is not masked, 0 indicates the head is masked.\n inputs_embeds (torch.FloatTensor of shape (batch_size, sequence_length, hidden_size), optional):\n Optionally, instead of passing input_ids you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert input_ids indices into associated vectors than the\n model’s internal embedding lookup matrix.\n output_attentions (bool, optional):\n If set to True, the attentions tensors of all attention layers are returned.\n output_hidden_states (bool, optional):\n If set to True, the hidden states of all layers are returned.\n return_dict (bool, optional):\n If set to True, the model will return a ModelOutput instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n if bbox is None:\n bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype)\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n bbox=bbox,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\"\"\"LayoutLM Model with a `language modeling` head on top. \"\"\", LAYOUTLM_START_DOCSTRING)\nclass LayoutLMForMaskedLM(LayoutLMPreTrainedModel):\n config_class = LayoutLMConfig\n pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST\n base_model_prefix = \"layoutlm\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.layoutlm = LayoutLMModel(config)\n self.cls = LayoutLMOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.layoutlm.embeddings.word_embeddings\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"layoutlm-base-uncased\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n bbox=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.layoutlm(\n input_ids,\n bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(\n prediction_scores.view(-1, self.config.vocab_size),\n labels.view(-1),\n )\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n LAYOUTLM_START_DOCSTRING,\n)\nclass LayoutLMForTokenClassification(LayoutLMPreTrainedModel):\n config_class = LayoutLMConfig\n pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST\n base_model_prefix = \"layoutlm\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.layoutlm = LayoutLMModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.layoutlm.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"layoutlm-base-uncased\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n bbox=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.layoutlm(\n input_ids=input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n" ]
[ [ "torch.ones", "torch.nn.Linear", "torch.nn.Softmax", "torch.nn.Embedding", "torch.nn.Tanh", "torch.nn.CrossEntropyLoss", "torch.nn.LayerNorm", "torch.arange", "torch.zeros", "torch.nn.Dropout", "torch.matmul" ] ]
S-o-T/vlb
[ "78495570e002d0ed6badd3df62f86e416839b0af" ]
[ "python/bench/MatchingScoreBench.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# ===========================================================\n# File Name: MatchingScoreBench.py\n# Author: Xu Zhang, Columbia University\n# Creation Date: 01-25-2019\n# Last Modified: Mon Apr 15 15:19:28 2019\n#\n# Description: Matching score benchmark\n#\n# Copyright (C) 2018 Xu Zhang\n# All rights reserved.\n#\n# This file is made available under\n# the terms of the BSD license (see the COPYING file).\n# ===========================================================\n\n\"\"\"\nThis module describe benchmark for matching score. \n\"\"\"\n\nimport numpy as np\nimport bench.BenchmarkTemplate\nfrom bench.BenchmarkTemplate import Benchmark\nimport bench.ellipse_overlap_H\nimport scipy.io as sio\nimport pyximport\npyximport.install(setup_args={\"include_dirs\": np.get_include()})\nimport bench.vlb_greedy_matching\n\nclass MatchingScoreBench(Benchmark):\n \"\"\"\n Matching score benchmark\n Return repeatability score, number of correspondence, matching score and number of matches\n \"\"\"\n def __init__(self, tmp_feature_dir='./data/features/',\n result_dir='./python_scores/', matchGeometry=True):\n super(MatchingScoreBench, self).__init__(name='Matching Score',\n tmp_feature_dir=tmp_feature_dir, result_dir=result_dir)\n self.matchGeometry = matchGeometry\n self.bench_name = 'decmatch'\n self.test_name = 'matching_score'\n\n def evaluate_unit(self, feature_1, feature_2, task):\n \"\"\"\n Single evaluation unit. Given two features, return the repeatability.\n \n :param feature_1: Feature and descriptor to run.\n :type feature_1: list of array [feature, descriptor] \n :param feature_2: Feature and descriptor to run. \n :type feature_2: list of array [feature, descriptor]\n :param task: What to run\n :type task: dict\n \n See Also\n --------\n\n evaluate_warpper: How to run the unit.\n dset.dataset.Link: definition of task.\n\n \"\"\"\n\n ms = 0.0\n num_matches = 0\n rep = 0.0\n num_cor = 0\n feature_1, descriptor_1 = feature_1\n feature_2, descriptor_2 = feature_2\n # print(feature_1.shape)\n # print(descriptor_1.shape)\n\n if feature_1 is None or feature_2 is None or feature_1.shape[0] == 0 or feature_2.shape[0] == 0\\\n or descriptor_1 is None or descriptor_2 is None\\\n or descriptor_1.shape[0] == 0 or descriptor_2.shape[0] == 0:\n ms = 0.0\n num_matches = 0\n rep = 0.0\n num_cor = 0\n else:\n option = {}\n option['maxOverlapError'] = 0.5\n geo_info = task\n tcorr, corr_score, info = bench.ellipse_overlap_H.ellipse_overlap_H(\n geo_info, feature_1, feature_2, option)\n\n if corr_score.size == 0:\n ms = 0.0\n num_matches = 0\n rep = 0.0\n num_cor = 0\n else:\n # have to use stable sort method, otherwise, result will not be\n # correct\n perm_index = np.argsort(1 - corr_score, kind='mergesort')\n tcorr_s = tcorr[perm_index, :]\n fa_valid = info['fa_valid']\n fb_valid = info['fb_valid']\n\n fa_num = np.sum(fa_valid)\n fb_num = np.sum(fb_valid)\n geoMatches, _ = bench.vlb_greedy_matching.vlb_greedy_matching(\n fa_num, fb_num, tcorr_s)\n overlapped_num = sum(geoMatches[:, 0] > -1)\n geoMatches = geoMatches[:, 0]\n num_cor = overlapped_num\n\n if self.norm_factor == 'minab':\n rep = overlapped_num / float(min(fa_num, fb_num))\n elif self.norm_factor == 'a':\n rep = overlapped_num / float(fa_num)\n elif self.norm_factor == 'b':\n rep = overlapped_num / float(fb_num)\n\n feature_1 = feature_1[fa_valid, :]\n descriptor_1 = descriptor_1[fa_valid, :]\n feature_2 = feature_2[fb_valid, :]\n descriptor_2 = descriptor_2[fb_valid, :]\n descriptor_1.astype(np.float)\n descriptor_2.astype(np.float)\n\n descMatches = np.zeros(\n (descriptor_1.shape[0],), dtype=np.int) - 1\n\n descMatchEdges = bench.ellipse_overlap_H.match_greedy(\n descriptor_2, descriptor_1)\n for edge in descMatchEdges:\n descMatches[edge[1]] = edge[0]\n \n #both descriptor and feature have to be nearest neighbor\n if self.matchGeometry:\n matches = descMatches\n for idx, (match, geoMatch) in enumerate(\n zip(matches, geoMatches)):\n if match != geoMatch:\n matches[idx] = -1\n else:\n # only require nearest descriptors has \"reasonable\" overlap (defined by maxOverlapError) over features.\n tcorr_set = set()\n for i in range(tcorr.shape[0]):\n tcorr_set.add((tcorr[i,0], tcorr[i,1]))\n descMatchesEdgeList = descMatchEdges.tolist()\n intersection = []\n for descMatch in descMatchesEdgeList:\n tmpMatch = (descMatch[1], descMatch[0])\n if tmpMatch in tcorr_set:\n intersection.append(tmpMatch)\n\n matches = np.zeros((descriptor_1.shape[0],)) - 1\n for edge in intersection:\n matches[edge[0]] = edge[1]\n\n num_matches = sum(matches[:] > -0.5)\n # print(matches)\n # print(num_matches)\n if self.norm_factor == 'minab':\n ms = num_matches / float(min(fa_num, fb_num))\n elif self.norm_factor == 'a':\n ms = num_matches / float(fa_num)\n elif self.norm_factor == 'b':\n ms = num_matches / float(fb_num)\n #print((rep, num_cor, ms, num_matches))\n return rep, num_cor, ms, num_matches\n\n def evaluate(self, dataset, detector, use_cache=True,\n save_result=True, norm_factor='minab'):\n \"\"\"\n Main function to call the evaluation wrapper. It could be different for different evaluation\n \n :param dataset: Dataset to extract the feature\n :type dataset: SequenceDataset\n :param detector: Detector used to extract the feature\n :type detector: DetectorAndDescriptor\n :param use_cache: Load cached feature and result or not\n :type use_cache: boolean\n :param save_result: Save result or not\n :type save_result: boolean\n :param norm_factor: How to normalize the repeatability. Option: minab, a, b\n :type norm_factor: str\n :returns: result \n :rtype: dict\n\n See Also\n --------\n \n bench.Benchmark\n bench.Benchmark.evaluate_warpper:\n \"\"\"\n\n self.norm_factor = norm_factor\n result = self.evaluate_warpper(dataset, detector, ['repeatability', 'num_cor', 'matching_score', 'num_matches'],\n extract_descriptor=True, use_cache=use_cache, save_result=save_result)\n result['norm_factor'] = norm_factor\n result['bench_name'] = self.bench_name\n return result\n\n def detect_feature_custom(self, dataset, detector,\n use_cache=False, save_feature=True):\n \"\"\"\n Customized feature extraction method. For special task. \n \n :param dataset: Dataset to extract the feature\n :type dataset: SequenceDataset\n :param detector: Detector used to extract the feature\n :type detector: DetectorAndDescriptor\n :param use_cache: Load cached feature and result or not\n :type use_cache: boolean\n :param save_feature: Save computated feature or not\n :type save_feature: boolean\n :returns: feature \n :rtype: dict\n\n \"\"\"\n\n pass\n\n def extract_descriptor_custom(\n self, dataset, detector, use_cache=False, save_feature=True):\n \"\"\"\n Customized description extraction method. For special task. \n \n :param dataset: Dataset to extract the descriptor\n :type dataset: SequenceDataset\n :param detector: Detector used to extract the descriptor\n :type detector: DetectorAndDescriptor\n :param use_cache: Load cached feature and result or not\n :type use_cache: boolean\n :param save_feature: Save computated feature or not\n :type save_feature: boolean\n :returns: feature \n :rtype: dict\n\n \"\"\"\n\n pass\n" ]
[ [ "numpy.sum", "numpy.argsort", "numpy.get_include", "numpy.zeros" ] ]
thegodone/pytorch_geometric
[ "4e9e494e3862f59afebd5678f802700bc4f6ff45" ]
[ "examples/dimenet_utils.py" ]
[ "# Shameless steal from: https://github.com/klicperajo/dimenet\n\nimport numpy as np\nfrom scipy.optimize import brentq\nfrom scipy import special as sp\n\ntry:\n import sympy as sym\nexcept ImportError:\n sym = None\n\n\ndef Jn(r, n):\n return np.sqrt(np.pi / (2 * r)) * sp.jv(n + 0.5, r)\n\n\ndef Jn_zeros(n, k):\n zerosj = np.zeros((n, k), dtype='float32')\n zerosj[0] = np.arange(1, k + 1) * np.pi\n points = np.arange(1, k + n) * np.pi\n racines = np.zeros(k + n - 1, dtype='float32')\n for i in range(1, n):\n for j in range(k + n - 1 - i):\n foo = brentq(Jn, points[j], points[j + 1], (i, ))\n racines[j] = foo\n points = racines\n zerosj[i][:k] = racines[:k]\n\n return zerosj\n\n\ndef spherical_bessel_formulas(n):\n x = sym.symbols('x')\n\n f = [sym.sin(x) / x]\n a = sym.sin(x) / x\n for i in range(1, n):\n b = sym.diff(a, x) / x\n f += [sym.simplify(b * (-x)**i)]\n a = sym.simplify(b)\n return f\n\n\ndef bessel_basis(n, k):\n zeros = Jn_zeros(n, k)\n normalizer = []\n for order in range(n):\n normalizer_tmp = []\n for i in range(k):\n normalizer_tmp += [0.5 * Jn(zeros[order, i], order + 1)**2]\n normalizer_tmp = 1 / np.array(normalizer_tmp)**0.5\n normalizer += [normalizer_tmp]\n\n f = spherical_bessel_formulas(n)\n x = sym.symbols('x')\n bess_basis = []\n for order in range(n):\n bess_basis_tmp = []\n for i in range(k):\n bess_basis_tmp += [\n sym.simplify(normalizer[order][i] *\n f[order].subs(x, zeros[order, i] * x))\n ]\n bess_basis += [bess_basis_tmp]\n return bess_basis\n\n\ndef sph_harm_prefactor(k, m):\n return ((2 * k + 1) * np.math.factorial(k - abs(m)) /\n (4 * np.pi * np.math.factorial(k + abs(m))))**0.5\n\n\ndef associated_legendre_polynomials(k, zero_m_only=True):\n z = sym.symbols('z')\n P_l_m = [[0] * (j + 1) for j in range(k)]\n\n P_l_m[0][0] = 1\n if k > 0:\n P_l_m[1][0] = z\n\n for j in range(2, k):\n P_l_m[j][0] = sym.simplify(((2 * j - 1) * z * P_l_m[j - 1][0] -\n (j - 1) * P_l_m[j - 2][0]) / j)\n if not zero_m_only:\n for i in range(1, k):\n P_l_m[i][i] = sym.simplify((1 - 2 * i) * P_l_m[i - 1][i - 1])\n if i + 1 < k:\n P_l_m[i + 1][i] = sym.simplify(\n (2 * i + 1) * z * P_l_m[i][i])\n for j in range(i + 2, k):\n P_l_m[j][i] = sym.simplify(\n ((2 * j - 1) * z * P_l_m[j - 1][i] -\n (i + j - 1) * P_l_m[j - 2][i]) / (j - i))\n\n return P_l_m\n\n\ndef real_sph_harm(k, zero_m_only=True, spherical_coordinates=True):\n if not zero_m_only:\n S_m = [0]\n C_m = [1]\n for i in range(1, k):\n x = sym.symbols('x')\n y = sym.symbols('y')\n S_m += [x * S_m[i - 1] + y * C_m[i - 1]]\n C_m += [x * C_m[i - 1] - y * S_m[i - 1]]\n\n P_l_m = associated_legendre_polynomials(k, zero_m_only)\n if spherical_coordinates:\n theta = sym.symbols('theta')\n z = sym.symbols('z')\n for i in range(len(P_l_m)):\n for j in range(len(P_l_m[i])):\n if type(P_l_m[i][j]) != int:\n P_l_m[i][j] = P_l_m[i][j].subs(z, sym.cos(theta))\n if not zero_m_only:\n phi = sym.symbols('phi')\n for i in range(len(S_m)):\n S_m[i] = S_m[i].subs(x,\n sym.sin(theta) * sym.cos(phi)).subs(\n y,\n sym.sin(theta) * sym.sin(phi))\n for i in range(len(C_m)):\n C_m[i] = C_m[i].subs(x,\n sym.sin(theta) * sym.cos(phi)).subs(\n y,\n sym.sin(theta) * sym.sin(phi))\n\n Y_func_l_m = [['0'] * (2 * j + 1) for j in range(k)]\n for i in range(k):\n Y_func_l_m[i][0] = sym.simplify(sph_harm_prefactor(i, 0) * P_l_m[i][0])\n\n if not zero_m_only:\n for i in range(1, k):\n for j in range(1, i + 1):\n Y_func_l_m[i][j] = sym.simplify(\n 2**0.5 * sph_harm_prefactor(i, j) * C_m[j] * P_l_m[i][j])\n for i in range(1, k):\n for j in range(1, i + 1):\n Y_func_l_m[i][-j] = sym.simplify(\n 2**0.5 * sph_harm_prefactor(i, -j) * S_m[j] * P_l_m[i][j])\n\n return Y_func_l_m" ]
[ [ "numpy.zeros", "numpy.arange", "scipy.optimize.brentq", "scipy.special.jv", "numpy.sqrt", "numpy.array" ] ]
SulemanKhurram/ThesisExperiments
[ "4fdf7b6558c87a096dcdc374c35085ac946d3a58" ]
[ "main_Bayes_Exp01e.py" ]
[ "from __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport argparse\nimport datetime\nimport math\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import SubsetRandomSampler\n\nfrom utils.autoaugment import CIFAR10Policy\nimport random\nimport torch\nimport torch.utils.data as data\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\n\nimport bayesian_config as cf\nimport pandas as pd\n\nfrom utils.OrigaDataLoader import OrigaDataset\nfrom utils.BBBlayers import GaussianVariationalInference\nfrom utils.BayesianModels.Bayesian3Conv3FC import BBB3Conv3FC\nfrom utils.BayesianModels.BayesianAlexNet import BBBAlexNet\nfrom utils.BayesianModels.BayesianLeNet import BBBLeNet\nfrom utils.BayesianModels.BayesianSqueezeNet import BBBSqueezeNet\nimport utils.utilFunctions as utils\nimport numpy as np\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix\nfrom sklearn.metrics import classification_report\n\nparser = argparse.ArgumentParser(description='PyTorch Bayesian Model Training')\n#parser.add_argument('--lr', default=0.001, type=float, help='learning_rate')\nparser.add_argument('--net_type', default='alexnet', type=str, help='model')\n#parser.add_argument('--depth', default=28, type=int, help='depth of model')\n#parser.add_argument('--widen_factor', default=10, type=int, help='width of model')\n#parser.add_argument('--num_samples', default=10, type=int, help='Number of samples')\n#parser.add_argument('--beta_type', default=\"Blundell\", type=str, help='Beta type')\n#parser.add_argument('--p_logvar_init', default=0, type=int, help='p_logvar_init')\n#parser.add_argument('--q_logvar_init', default=-10, type=int, help='q_logvar_init')\n#parser.add_argument('--weight_decay', default=0.0005, type=float, help='weight_decay')\nparser.add_argument('--dataset', default='origa', type=str, help='dataset = [mnist/cifar10/cifar100/fashionmnist/stl10/origa]')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nparser.add_argument('--testOnly', '-t', action='store_true', help='Test mode with the saved model')\nargs = parser.parse_args()\n\n# Hyper Parameter settings\nuse_cuda = torch.cuda.is_available()\n#torch.cuda.set_device(1)\nbest_acc = 0\nresize=32\nresize_origa=227\n\ntrainLoss = []\ntestLoss = []\ntestPredicts = []\ntestTargets = []\n\n\n# Data Uplaod\nprint('\\n[Phase 1] : Data Preparation')\nutils.writeLogs('\\n[Phase 1] : Data Preparation')\n\ntransform_train = transforms.Compose([\n transforms.Resize((resize, resize)),\n transforms.RandomCrop(32, padding=4),\n #transforms.RandomHorizontalFlip(),\n #CIFAR10Policy(),\n transforms.ToTensor(),\n transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),\n]) # meanstd transformation\n\ntransform_test = transforms.Compose([\n transforms.Resize((resize, resize)),\n transforms.RandomCrop(32, padding=4),\n #transforms.RandomHorizontalFlip(),\n #CIFAR10Policy(),\n transforms.ToTensor(),\n transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),\n])\n\noriga_transform_train = transforms.Compose([\n transforms.Resize((resize_origa, resize_origa)),\n\n transforms.ToTensor(),\n])\n\noriga_transform_test = transforms.Compose([\n transforms.Resize((resize_origa, resize_origa)),\n\n transforms.ToTensor(),\n])\n\n\nif (args.dataset == 'cifar10'):\n print(\"| Preparing CIFAR-10 dataset...\")\n utils.writeLogs(\"| Preparing CIFAR-10 dataset...\")\n\n sys.stdout.write(\"| \")\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)\n outputs = 10\n inputs = 3\n\nelif (args.dataset == 'cifar100'):\n print(\"| Preparing CIFAR-100 dataset...\")\n utils.writeLogs(\"| Preparing CIFAR-100 dataset...\")\n\n sys.stdout.write(\"| \")\n trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test)\n outputs = 100\n inputs = 3\n\nelif (args.dataset == 'mnist'):\n print(\"| Preparing MNIST dataset...\")\n utils.writeLogs(\"| Preparing MNIST dataset...\")\n\n sys.stdout.write(\"| \")\n trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.MNIST(root='./data', train=False, download=False, transform=transform_test)\n outputs = 10\n inputs = 1\n\nelif (args.dataset == 'fashionmnist'):\n print(\"| Preparing FASHIONMNIST dataset...\")\n utils.writeLogs(\"| Preparing FASHIONMNIST dataset...\")\n\n sys.stdout.write(\"| \")\n trainset = torchvision.datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.FashionMNIST(root='./data', train=False, download=False, transform=transform_test)\n outputs = 10\n inputs = 1\nelif (args.dataset == 'stl10'):\n print(\"| Preparing STL10 dataset...\")\n utils.writeLogs(\"| Preparing STL10 dataset...\")\n\n sys.stdout.write(\"| \")\n trainset = torchvision.datasets.STL10(root='./data', split='train', download=True, transform=transform_train)\n testset = torchvision.datasets.STL10(root='./data', split='test', download=False, transform=transform_test)\n outputs = 10\n inputs = 3\n\nelif (args.dataset == 'origa'):\n print(\"| Preparing Origa dataset...\")\n utils.writeLogs(\"| Preparing Origa dataset...\")\n\n sys.stdout.write(\"| \")\n\n # dataset = OrigaDataset('./origaAll.txt', transform_train)\n # batch_size = 16\n # test_split = .1\n # shuffle_dataset = True\n # random_seed = 42\n #\n # dataset_size = len(dataset)\n # indices = list(range(dataset_size))\n # split = int(np.floor(test_split * dataset_size))\n # if shuffle_dataset:\n # np.random.seed(random_seed)\n # np.random.shuffle(indices)\n # train_indices, test_indices = indices[split:], indices[:split]\n # trainset = SubsetRandomSampler(train_indices)\n # testset = SubsetRandomSampler(test_indices)\n\n trainset = OrigaDataset('./randomTrainImages.txt',origa_transform_train )\n testset = OrigaDataset('./randomTestImages.txt',origa_transform_test )\n\n outputs = 2\n inputs = 3\n\n# if(args.dataset == 'origa'):\n# trainloader = torch.utils.data.DataLoader(dataset, batch_size=cf.batch_size, shuffle=False, num_workers=4, sampler=trainset)\n# testloader = torch.utils.data.DataLoader(dataset, batch_size=cf.batch_size, shuffle=False, num_workers=4, sampler=testset)\n# else:\n# trainloader = torch.utils.data.DataLoader(trainset, batch_size=cf.batch_size, shuffle=True, num_workers=4)\n# testloader = torch.utils.data.DataLoader(testset, batch_size=cf.batch_size, shuffle=False, num_workers=4)\n\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=cf.batch_size, shuffle=True, num_workers=4)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=cf.batch_size, shuffle=False, num_workers=4)\n# Return network & file name\n\ndef getNetwork(args):\n if (args.net_type == 'lenet'):\n net = BBBLeNet(outputs,inputs)\n file_name = 'lenet'\n elif (args.net_type == 'alexnet'):\n net = BBBAlexNet(outputs,inputs)\n file_name = 'alexnet-'\n elif (args.net_type == 'squeezenet'):\n net = BBBSqueezeNet(outputs,inputs)\n file_name = 'squeezenet-'\n elif (args.net_type == '3conv3fc'):\n net = BBB3Conv3FC(outputs,inputs)\n file_name = '3Conv3FC-'\n else:\n print('Error : Network should be either [LeNet / AlexNet /SqueezeNet/ 3Conv3FC')\n sys.exit(0)\n\n return net, file_name\n\n\n# Model\nprint('\\n[Phase 2] : Model setup')\nutils.writeLogs('\\n[Phase 2] : Model setup')\n\nif args.resume:\n # Load checkpoint\n print('| Resuming from checkpoint...')\n utils.writeLogs('| Resuming from checkpoint...')\n\n assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!'\n _, file_name = getNetwork(args)\n checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+str(cf.num_samples)+'.t7')\n net = checkpoint['net']\n best_acc = checkpoint['acc']\n cf.start_epoch = checkpoint['epoch']\nelse:\n print('| Building net type [' + args.net_type + ']...')\n print(\"test0.1\")\n utils.writeLogs('| Building net type [' + args.net_type + ']...')\n print(\"test0.1\")\n net, file_name = getNetwork(args)\n print(\"test0\")\nif use_cuda:\n net.cuda()\nprint(\"test1\")\nvi = GaussianVariationalInference(torch.nn.CrossEntropyLoss())\nprint(\"test2\")\nlogfile = os.path.join('diagnostics_Bayes{}_{}_{}.txt'.format(args.net_type, args.dataset, cf.num_samples))\n\n# Training\ndef train(epoch):\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n\n optimizer = optim.Adam(net.parameters(), lr=cf.lr, weight_decay=cf.weight_decay)\n\n print('\\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.lr))\n utils.writeLogs('\\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.lr))\n\n m = math.ceil(len(testset) / cf.batch_size)\n for batch_idx, (inputs_value, targets) in enumerate(trainloader):\n targets = torch.tensor(targets)\n x = inputs_value.view(-1, inputs, resize_origa, resize_origa)#.repeat(cf.num_samples, 1, 1, 1)\n y = targets#.repeat(cf.num_samples)\n if use_cuda:\n x, y = x.cuda(), y.cuda() # GPU settings\n\n if cf.beta_type is \"Blundell\":\n beta = 2 ** (m - (batch_idx + 1)) / (2 ** m - 1)\n elif cf.beta_type is \"Soenderby\":\n beta = min(epoch / (cf.num_epochs // 4), 1)\n elif cf.beta_type is \"Standard\":\n beta = 1 / m\n else:\n beta = 0\n # Forward Propagation\n x, y = Variable(x), Variable(y)\n outputs, kl = net.probforward(x)\n loss = vi(outputs, y, kl, beta) # Loss\n optimizer.zero_grad()\n loss.backward() # Backward Propagation\n optimizer.step() # Optimizer update\n\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n\n total += targets.size(0)\n correct += predicted.eq(y.data).cpu().sum()\n\n sys.stdout.write('\\r')\n sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\\t\\tLoss: %.4f Acc@1: %.3f%%'\n %(epoch, cf.num_epochs, batch_idx+1,\n (len(trainset)//cf.batch_size)+1, loss.item(), (100*(correct.item()/total))))\n utils.writeLogs(str('| Epoch [%3d/%3d] Iter[%3d/%3d]\\t\\tLoss: %.4f Acc@1: %.3f%%'\n %(epoch, cf.num_epochs, batch_idx+1,\n (len(trainset)//cf.batch_size)+1, loss.item(), (100*(correct.item()/total)))))\n\n sys.stdout.flush()\n\n trainLoss.append(loss.item())\n diagnostics_to_write = {'Epoch': epoch, 'Loss': loss.item(), 'Accuracy': (100*(correct.item()/total))}\n utils.writeLogs(str(diagnostics_to_write))\n with open(logfile, 'a') as lf:\n lf.write(str(diagnostics_to_write))\n\n\ndef printMetrics(testTargets,testPredicts, epoch):\n\n print(epoch + \" f1_score:\" + str(f1_score(testTargets, testPredicts, average=\"macro\")))\n utils.writeLogs(epoch + \" f1_score:\" + str(f1_score(testTargets, testPredicts, average=\"macro\")))\n print(epoch + \" overall precision:\" + str(precision_score(testTargets, testPredicts, average=\"macro\")))\n utils.writeLogs(epoch + \" overall precision:\" + str(precision_score(testTargets, testPredicts, average=\"macro\")))\n print(epoch + \" overall recall : \" + str(recall_score(testTargets, testPredicts, average=\"macro\")))\n utils.writeLogs(epoch + \" overall recall : \" + str(recall_score(testTargets, testPredicts, average=\"macro\")))\n fpr, tpr, thresholds = roc_curve(testTargets, testPredicts)\n\n roc_auc = roc_auc_score(testTargets, testPredicts)\n\n print(epoch + \" False positive : \" + str(fpr))\n utils.writeLogs(epoch + \" False positive : \" + str(fpr))\n print (epoch + \" True positive\" + str(tpr))\n utils.writeLogs(epoch + \" True positive : \" + str(tpr))\n print(epoch + \" Thresholds : \" + str(thresholds))\n utils.writeLogs(epoch + \" Thresholds : \" + str(thresholds))\n\n print(epoch + \" ROC_AUC : \" + str(roc_auc))\n utils.writeLogs(epoch + \" ROC_AUC : \" + str(roc_auc))\n confuse = confusion_matrix(testTargets, testPredicts)\n print(epoch + \" Confusion Matrix : \" + str(confuse))\n utils.writeLogs(epoch + \" Confusion Matrix : \" + str(confuse))\n target_names = ['Healthy', 'Glaucoma']\n print(epoch + str(classification_report(testTargets, testPredicts, target_names=target_names, digits=4)))\n utils.writeLogs(epoch + str(classification_report(testTargets, testPredicts, target_names=target_names, digits=4)))\n if epoch == \"Final\":\n #ROC curve\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange', lw=lw,\n label='ROC curve (area = %0.4f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.savefig('./plots/' + 'rocExp01e.png', format='png', dpi=300)\n plt.show()\n plt.close()\n\n #Confusion Matrix\n heatmapData = pd.DataFrame(confuse, index=[i for i in ['TrueHealthy', 'TrueGlaucoma']],\n columns=[j for j in ['PredictedHealthy', 'PredictedGlaucoma']])\n plt.figure(figsize=(4, 4))\n sns.heatmap(heatmapData, annot=True, fmt='d')\n plt.savefig('./plots/' + 'confuseExp01e.png', format='png', dpi=300)\n plt.show()\n plt.close()\n\n #Training loss Curve\n\n plt.plot(list(range(cf.num_epochs)) ,np.array(trainLoss))\n plt.savefig('./plots/' + 'trainLossExp01e.png', format='png', dpi=300)\n plt.show()\n plt.close()\n\ndef test(epoch):\n global best_acc\n testTargets = []\n testPredicts = []\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n m = math.ceil(len(testset) / cf.batch_size)\n for batch_idx, (inputs_value, targets) in enumerate(testloader):\n x = inputs_value.view(-1, inputs, resize_origa, resize_origa)#.repeat(cf.num_samples, 1, 1, 1)\n y = targets#.repeat(cf.num_samples)\n if use_cuda:\n x, y = x.cuda(), y.cuda()\n with torch.no_grad():\n x, y = Variable(x), Variable(y)\n outputs, kl = net.probforward(x)\n\n if cf.beta_type is \"Blundell\":\n beta = 2 ** (m - (batch_idx + 1)) / (2 ** m - 1)\n elif cf.beta_type is \"Soenderby\":\n beta = min(epoch / (cf.num_epochs // 4), 1)\n elif cf.beta_type is \"Standard\":\n beta = 1 / m\n else:\n beta = 0\n\n loss = vi(outputs,y,kl,beta)\n\n test_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(y.data).cpu().sum()\n testPredicts.extend(predicted.cpu().data.numpy())\n testTargets.extend(targets.cpu().data.numpy())\n\n\n # Save checkpoint when best model\n #acc =(100*correct/total)/cf.num_samples\n printMetrics(testTargets,testPredicts, \"Epoch \" + str(epoch))\n print(\"Epoch \" + str(epoch)+\" Correct predictions:\" + str(correct.item()))\n utils.writeLogs(\"Epoch \" + str(epoch)+\" Correct predictions:\" + str(correct.item()))\n print(\"Epoch \" + str(epoch) + \" Total:\" + str(total))\n utils.writeLogs(\"Epoch \" + str(epoch) + \" Total:\" + str(total))\n acc = 100 * (correct.item() / total)\n print(\"\\n| Validation Epoch #%d\\t\\t\\tLoss: %.4f Acc@1: %.2f%%\" %(epoch, loss.item(), acc))\n utils.writeLogs(str(\"\\n| Validation Epoch #%d\\t\\t\\tLoss: %.4f Acc@1: %.2f%%\" %(epoch, loss.item(), acc)))\n test_diagnostics_to_write = {'Validation Epoch':epoch, 'Loss':loss.item(), 'Accuracy': acc}\n with open(logfile, 'a') as lf:\n lf.write(str(test_diagnostics_to_write))\n\n if acc > best_acc:\n print('| Saving Best model...\\t\\t\\tTop1 = %.2f%%' %(acc))\n utils.writeLogs(str('| Saving Best model...\\t\\t\\tTop1 = %.2f%%' %(acc)))\n state = {\n 'net':net if use_cuda else net,\n 'acc':acc,\n 'epoch':epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n save_point = './checkpoint/'+args.dataset+os.sep\n if not os.path.isdir(save_point):\n os.mkdir(save_point)\n torch.save(state, save_point+file_name+str(cf.num_samples)+'.t7')\n best_acc = acc\n\n if epoch == cf.num_epochs:\n\n printMetrics(testTargets, testPredicts, \"Final\")\n\n\n\nprint('\\n[Phase 3] : Training model')\nutils.writeLogs('\\n[Phase 3] : Training model')\nprint('| Training Epochs = ' + str(cf.num_epochs))\nutils.writeLogs('| Training Epochs = ' + str(cf.num_epochs))\nprint('| Initial Learning Rate = ' + str(cf.lr))\nutils.writeLogs('| Initial Learning Rate = ' + str(cf.lr))\nprint('| Optimizer = ' + str(cf.optim_type))\nutils.writeLogs('| Optimizer = ' + str(cf.optim_type))\n\nelapsed_time = 0\nfor epoch in range(cf.start_epoch, cf.start_epoch+cf.num_epochs):\n start_time = time.time()\n train(epoch)\n test(epoch)\n epoch_time = time.time() - start_time\n elapsed_time += epoch_time\n print('| Elapsed time : %d:%02d:%02d' %(cf.get_hms(elapsed_time)))\n utils.writeLogs(str('| Elapsed time : %d:%02d:%02d' %(cf.get_hms(elapsed_time))))\nprint('\\n[Phase 4] : Testing model')\nutils.writeLogs('\\n[Phase 4] : Testing model')\nprint('* Test results : Acc@1 = %.2f%%' %(best_acc))\nutils.writeLogs(str('* Test results : Acc@1 = %.2f%%' %(best_acc)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.utils.data.DataLoader", "sklearn.metrics.classification_report", "torch.no_grad", "torch.cuda.is_available", "matplotlib.pyplot.ylabel", "sklearn.metrics.precision_score", "torch.max", "matplotlib.pyplot.plot", "sklearn.metrics.roc_curve", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "torch.autograd.Variable", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score", "torch.tensor", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "matplotlib.pyplot.legend", "pandas.DataFrame", "torch.nn.CrossEntropyLoss", "sklearn.metrics.roc_auc_score", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
kyle1/sportsreference
[ "baa4890382e7c9e5e38a42c1a71303431345378b" ]
[ "tests/integration/boxscore/test_nba_boxscore.py" ]
[ "import mock\nimport os\nimport pandas as pd\nfrom datetime import datetime\nfrom flexmock import flexmock\nfrom sportsreference import utils\nfrom sportsreference.constants import HOME\nfrom sportsreference.nba.constants import BOXSCORE_URL, BOXSCORES_URL\nfrom sportsreference.nba.boxscore import Boxscore, Boxscores\n\n\nMONTH = 10\nYEAR = 2017\n\nBOXSCORE = '201710310LAL'\n\n\ndef read_file(filename):\n filepath = os.path.join(os.path.dirname(__file__), 'nba', filename)\n return open('%s' % filepath, 'r', encoding='utf8').read()\n\n\ndef mock_pyquery(url):\n class MockPQ:\n def __init__(self, html_contents):\n self.status_code = 200\n self.html_contents = html_contents\n self.text = html_contents\n\n if url == BOXSCORES_URL % (2, 4, YEAR):\n return MockPQ(read_file('boxscores-2-4-2017.html'))\n if url == BOXSCORES_URL % (2, 5, YEAR):\n return MockPQ(read_file('boxscores-2-5-2017.html'))\n boxscore = read_file('%s.html' % BOXSCORE)\n return MockPQ(boxscore)\n\n\nclass MockDateTime:\n def __init__(self, year, month):\n self.year = year\n self.month = month\n\n\nclass TestNBABoxscore:\n @mock.patch('requests.get', side_effect=mock_pyquery)\n def setup_method(self, *args, **kwargs):\n self.results = {\n 'date': '10:30 PM, October 31, 2017',\n 'location': 'STAPLES Center, Los Angeles, California',\n 'winner': HOME,\n 'winning_name': 'Los Angeles Lakers',\n 'winning_abbr': 'LAL',\n 'losing_name': 'Detroit Pistons',\n 'losing_abbr': 'DET',\n 'pace': 97.4,\n 'away_wins': 5,\n 'away_losses': 3,\n 'away_minutes_played': 240,\n 'away_field_goals': 41,\n 'away_field_goal_attempts': 94,\n 'away_field_goal_percentage': .436,\n 'away_two_point_field_goals': 31,\n 'away_two_point_field_goal_attempts': 61,\n 'away_two_point_field_goal_percentage': .508,\n 'away_three_point_field_goals': 10,\n 'away_three_point_field_goal_attempts': 33,\n 'away_three_point_field_goal_percentage': .303,\n 'away_free_throws': 1,\n 'away_free_throw_attempts': 3,\n 'away_free_throw_percentage': .333,\n 'away_offensive_rebounds': 10,\n 'away_defensive_rebounds': 34,\n 'away_total_rebounds': 44,\n 'away_assists': 21,\n 'away_steals': 7,\n 'away_blocks': 3,\n 'away_turnovers': 12,\n 'away_personal_fouls': 11,\n 'away_points': 93,\n 'away_true_shooting_percentage': .488,\n 'away_effective_field_goal_percentage': .489,\n 'away_three_point_attempt_rate': .351,\n 'away_free_throw_attempt_rate': .032,\n 'away_offensive_rebound_percentage': 19.2,\n 'away_defensive_rebound_percentage': 75.6,\n 'away_total_rebound_percentage': 45.4,\n 'away_assist_percentage': 51.2,\n 'away_steal_percentage': 7.2,\n 'away_block_percentage': 4.6,\n 'away_turnover_percentage': 11.2,\n 'away_offensive_rating': 95.5,\n 'away_defensive_rating': 116.0,\n 'home_wins': 3,\n 'home_losses': 4,\n 'home_minutes_played': 240,\n 'home_field_goals': 45,\n 'home_field_goal_attempts': 91,\n 'home_field_goal_percentage': .495,\n 'home_two_point_field_goals': 33,\n 'home_two_point_field_goal_attempts': 65,\n 'home_two_point_field_goal_percentage': .508,\n 'home_three_point_field_goals': 12,\n 'home_three_point_field_goal_attempts': 26,\n 'home_three_point_field_goal_percentage': .462,\n 'home_free_throws': 11,\n 'home_free_throw_attempts': 14,\n 'home_free_throw_percentage': .786,\n 'home_offensive_rebounds': 11,\n 'home_defensive_rebounds': 42,\n 'home_total_rebounds': 53,\n 'home_assists': 30,\n 'home_steals': 9,\n 'home_blocks': 5,\n 'home_turnovers': 14,\n 'home_personal_fouls': 14,\n 'home_points': 113,\n 'home_true_shooting_percentage': .582,\n 'home_effective_field_goal_percentage': .560,\n 'home_three_point_attempt_rate': .286,\n 'home_free_throw_attempt_rate': .154,\n 'home_offensive_rebound_percentage': 24.4,\n 'home_defensive_rebound_percentage': 80.8,\n 'home_total_rebound_percentage': 54.6,\n 'home_assist_percentage': 66.7,\n 'home_steal_percentage': 9.2,\n 'home_block_percentage': 8.2,\n 'home_turnover_percentage': 12.6,\n 'home_offensive_rating': 116.0,\n 'home_defensive_rating': 95.5\n }\n flexmock(utils) \\\n .should_receive('_todays_date') \\\n .and_return(MockDateTime(YEAR, MONTH))\n\n self.boxscore = Boxscore(BOXSCORE)\n\n def test_nba_boxscore_returns_requested_boxscore(self):\n for attribute, value in self.results.items():\n assert getattr(self.boxscore, attribute) == value\n\n def test_invalid_url_yields_empty_class(self):\n flexmock(Boxscore) \\\n .should_receive('_retrieve_html_page') \\\n .and_return(None)\n\n boxscore = Boxscore(BOXSCORE)\n\n for key, value in boxscore.__dict__.items():\n if key == '_uri':\n continue\n assert value is None\n\n def test_nba_boxscore_dataframe_returns_dataframe_of_all_values(self):\n df = pd.DataFrame([self.results], index=[BOXSCORE])\n\n # Pandas doesn't natively allow comparisons of DataFrames.\n # Concatenating the two DataFrames (the one generated during the test\n # and the expected one above) and dropping duplicate rows leaves only\n # the rows that are unique between the two frames. This allows a quick\n # check of the DataFrame to see if it is empty - if so, all rows are\n # duplicates, and they are equal.\n frames = [df, self.boxscore.dataframe]\n df1 = pd.concat(frames).drop_duplicates(keep=False)\n\n assert df1.empty\n\n def test_nba_boxscore_players(self):\n assert len(self.boxscore.home_players) == 13\n assert len(self.boxscore.away_players) == 13\n\n for player in self.boxscore.home_players:\n assert not player.dataframe.empty\n for player in self.boxscore.away_players:\n assert not player.dataframe.empty\n\n\nclass TestNBABoxscores:\n def setup_method(self):\n self.expected = {\n '2-4-2017': [\n {'home_name': 'Atlanta',\n 'home_abbr': 'ATL',\n 'home_score': 113,\n 'boxscore': '201702040ATL',\n 'away_name': 'Orlando',\n 'away_abbr': 'ORL',\n 'away_score': 86,\n 'winning_name': 'Atlanta',\n 'winning_abbr': 'ATL',\n 'losing_name': 'Orlando',\n 'losing_abbr': 'ORL'},\n {'home_name': 'Indiana',\n 'home_abbr': 'IND',\n 'home_score': 105,\n 'boxscore': '201702040IND',\n 'away_name': 'Detroit',\n 'away_abbr': 'DET',\n 'away_score': 84,\n 'winning_name': 'Indiana',\n 'winning_abbr': 'IND',\n 'losing_name': 'Detroit',\n 'losing_abbr': 'DET'},\n {'home_name': 'Miami',\n 'home_abbr': 'MIA',\n 'home_score': 125,\n 'boxscore': '201702040MIA',\n 'away_name': 'Philadelphia',\n 'away_abbr': 'PHI',\n 'away_score': 102,\n 'winning_name': 'Miami',\n 'winning_abbr': 'MIA',\n 'losing_name': 'Philadelphia',\n 'losing_abbr': 'PHI'},\n {'home_name': 'Minnesota',\n 'home_abbr': 'MIN',\n 'home_score': 99,\n 'boxscore': '201702040MIN',\n 'away_name': 'Memphis',\n 'away_abbr': 'MEM',\n 'away_score': 107,\n 'winning_name': 'Memphis',\n 'winning_abbr': 'MEM',\n 'losing_name': 'Minnesota',\n 'losing_abbr': 'MIN'},\n {'home_name': 'New York',\n 'home_abbr': 'NYK',\n 'home_score': 104,\n 'boxscore': '201702040NYK',\n 'away_name': 'Cleveland',\n 'away_abbr': 'CLE',\n 'away_score': 111,\n 'winning_name': 'Cleveland',\n 'winning_abbr': 'CLE',\n 'losing_name': 'New York',\n 'losing_abbr': 'NYK'},\n {'home_name': 'Phoenix',\n 'home_abbr': 'PHO',\n 'home_score': 112,\n 'boxscore': '201702040PHO',\n 'away_name': 'Milwaukee',\n 'away_abbr': 'MIL',\n 'away_score': 137,\n 'winning_name': 'Milwaukee',\n 'winning_abbr': 'MIL',\n 'losing_name': 'Phoenix',\n 'losing_abbr': 'PHO'},\n {'home_name': 'Sacramento',\n 'home_abbr': 'SAC',\n 'home_score': 109,\n 'boxscore': '201702040SAC',\n 'away_name': 'Golden State',\n 'away_abbr': 'GSW',\n 'away_score': 106,\n 'winning_name': 'Sacramento',\n 'winning_abbr': 'SAC',\n 'losing_name': 'Golden State',\n 'losing_abbr': 'GSW'},\n {'home_name': 'San Antonio',\n 'home_abbr': 'SAS',\n 'home_score': 121,\n 'boxscore': '201702040SAS',\n 'away_name': 'Denver',\n 'away_abbr': 'DEN',\n 'away_score': 97,\n 'winning_name': 'San Antonio',\n 'winning_abbr': 'SAS',\n 'losing_name': 'Denver',\n 'losing_abbr': 'DEN'},\n {'home_name': 'Utah',\n 'home_abbr': 'UTA',\n 'home_score': 105,\n 'boxscore': '201702040UTA',\n 'away_name': 'Charlotte',\n 'away_abbr': 'CHO',\n 'away_score': 98,\n 'winning_name': 'Utah',\n 'winning_abbr': 'UTA',\n 'losing_name': 'Charlotte',\n 'losing_abbr': 'CHO'},\n {'home_name': 'Washington',\n 'home_abbr': 'WAS',\n 'home_score': 105,\n 'boxscore': '201702040WAS',\n 'away_name': 'New Orleans',\n 'away_abbr': 'NOP',\n 'away_score': 91,\n 'winning_name': 'Washington',\n 'winning_abbr': 'WAS',\n 'losing_name': 'New Orleans',\n 'losing_abbr': 'NOP'},\n ]\n }\n\n @mock.patch('requests.get', side_effect=mock_pyquery)\n def test_boxscores_search(self, *args, **kwargs):\n result = Boxscores(datetime(2017, 2, 4)).games\n\n assert result == self.expected\n\n @mock.patch('requests.get', side_effect=mock_pyquery)\n def test_boxscores_search_invalid_end(self, *args, **kwargs):\n result = Boxscores(datetime(2017, 2, 4), datetime(2017, 2, 3)).games\n\n assert result == self.expected\n\n @mock.patch('requests.get', side_effect=mock_pyquery)\n def test_boxscores_search_multiple_days(self, *args, **kwargs):\n expected = {\n '2-4-2017': [\n {'boxscore': '201702040ATL',\n 'away_name': 'Orlando',\n 'away_abbr': 'ORL',\n 'away_score': 86,\n 'home_name': 'Atlanta',\n 'home_abbr': 'ATL',\n 'home_score': 113,\n 'winning_name': 'Atlanta',\n 'winning_abbr': 'ATL',\n 'losing_name': 'Orlando',\n 'losing_abbr': 'ORL'},\n {'boxscore': '201702040IND',\n 'away_name': 'Detroit',\n 'away_abbr': 'DET',\n 'away_score': 84,\n 'home_name': 'Indiana',\n 'home_abbr': 'IND',\n 'home_score': 105,\n 'winning_name': 'Indiana',\n 'winning_abbr': 'IND',\n 'losing_name': 'Detroit',\n 'losing_abbr': 'DET'},\n {'boxscore': '201702040MIA',\n 'away_name': 'Philadelphia',\n 'away_abbr': 'PHI',\n 'away_score': 102,\n 'home_name': 'Miami',\n 'home_abbr': 'MIA',\n 'home_score': 125,\n 'winning_name': 'Miami',\n 'winning_abbr': 'MIA',\n 'losing_name': 'Philadelphia',\n 'losing_abbr': 'PHI'},\n {'boxscore': '201702040MIN',\n 'away_name': 'Memphis',\n 'away_abbr': 'MEM',\n 'away_score': 107,\n 'home_name': 'Minnesota',\n 'home_abbr': 'MIN',\n 'home_score': 99,\n 'winning_name': 'Memphis',\n 'winning_abbr': 'MEM',\n 'losing_name': 'Minnesota',\n 'losing_abbr': 'MIN'},\n {'boxscore': '201702040NYK',\n 'away_name': 'Cleveland',\n 'away_abbr': 'CLE',\n 'away_score': 111,\n 'home_name': 'New York',\n 'home_abbr': 'NYK',\n 'home_score': 104,\n 'winning_name': 'Cleveland',\n 'winning_abbr': 'CLE',\n 'losing_name': 'New York',\n 'losing_abbr': 'NYK'},\n {'boxscore': '201702040PHO',\n 'away_name': 'Milwaukee',\n 'away_abbr': 'MIL',\n 'away_score': 137,\n 'home_name': 'Phoenix',\n 'home_abbr': 'PHO',\n 'home_score': 112,\n 'winning_name': 'Milwaukee',\n 'winning_abbr': 'MIL',\n 'losing_name': 'Phoenix',\n 'losing_abbr': 'PHO'},\n {'boxscore': '201702040SAC',\n 'away_name': 'Golden State',\n 'away_abbr': 'GSW',\n 'away_score': 106,\n 'home_name': 'Sacramento',\n 'home_abbr': 'SAC',\n 'home_score': 109,\n 'winning_name': 'Sacramento',\n 'winning_abbr': 'SAC',\n 'losing_name': 'Golden State',\n 'losing_abbr': 'GSW'},\n {'boxscore': '201702040SAS',\n 'away_name': 'Denver',\n 'away_abbr': 'DEN',\n 'away_score': 97,\n 'home_name': 'San Antonio',\n 'home_abbr': 'SAS',\n 'home_score': 121,\n 'winning_name': 'San Antonio',\n 'winning_abbr': 'SAS',\n 'losing_name': 'Denver',\n 'losing_abbr': 'DEN'},\n {'boxscore': '201702040UTA',\n 'away_name': 'Charlotte',\n 'away_abbr': 'CHO',\n 'away_score': 98,\n 'home_name': 'Utah',\n 'home_abbr': 'UTA',\n 'home_score': 105,\n 'winning_name': 'Utah',\n 'winning_abbr': 'UTA',\n 'losing_name': 'Charlotte',\n 'losing_abbr': 'CHO'},\n {'boxscore': '201702040WAS',\n 'away_name': 'New Orleans',\n 'away_abbr': 'NOP',\n 'away_score': 91,\n 'home_name': 'Washington',\n 'home_abbr': 'WAS',\n 'home_score': 105,\n 'winning_name': 'Washington',\n 'winning_abbr': 'WAS',\n 'losing_name': 'New Orleans',\n 'losing_abbr': 'NOP'}\n ],\n '2-5-2017': [\n {'boxscore': '201702050BOS',\n 'away_name': 'LA Clippers',\n 'away_abbr': 'LAC',\n 'away_score': 102,\n 'home_name': 'Boston',\n 'home_abbr': 'BOS',\n 'home_score': 107,\n 'winning_name': 'Boston',\n 'winning_abbr': 'BOS',\n 'losing_name': 'LA Clippers',\n 'losing_abbr': 'LAC'},\n {'boxscore': '201702050BRK',\n 'away_name': 'Toronto',\n 'away_abbr': 'TOR',\n 'away_score': 103,\n 'home_name': 'Brooklyn',\n 'home_abbr': 'BRK',\n 'home_score': 95,\n 'winning_name': 'Toronto',\n 'winning_abbr': 'TOR',\n 'losing_name': 'Brooklyn',\n 'losing_abbr': 'BRK'},\n {'boxscore': '201702050OKC',\n 'away_name': 'Portland',\n 'away_abbr': 'POR',\n 'away_score': 99,\n 'home_name': 'Oklahoma City',\n 'home_abbr': 'OKC',\n 'home_score': 105,\n 'winning_name': 'Oklahoma City',\n 'winning_abbr': 'OKC',\n 'losing_name': 'Portland',\n 'losing_abbr': 'POR'}\n ]\n }\n result = Boxscores(datetime(2017, 2, 4), datetime(2017, 2, 5)).games\n\n assert result == expected\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
bgpeyton/QCEngine
[ "54f42a719edf3f1937dcaf8ab2adbb1171304e12" ]
[ "qcengine/programs/gamess/harvester.py" ]
[ "\"\"\"Compute quantum chemistry using Iowa State's GAMESS executable.\"\"\"\n\nimport logging\nimport pprint\nimport re\nfrom decimal import Decimal\nfrom typing import Tuple\n\nimport numpy as np\nimport qcelemental as qcel\nfrom qcelemental.models import Molecule\nfrom qcelemental.molparse import regex\n\nfrom ..util import PreservingDict\n\npp = pprint.PrettyPrinter(width=120, compact=True, indent=1)\nlogger = logging.getLogger(__name__)\n\n\ndef harvest(p4Mol, gamessout: str, **largs) -> Tuple[PreservingDict, Molecule, list]:\n \"\"\"Parses all the pieces of output from gamess: the stdout in\n *gamessout* Scratch files are not yet considered at this moment.\n \"\"\"\n outqcvar, outMol, outGrad = harvest_output(gamessout)\n\n if outMol:\n outqcvar[\"NUCLEAR REPULSION ENERGY\"] = outMol.nuclear_repulsion_energy()\n if p4Mol:\n if abs(outMol.nuclear_repulsion_energy() - p4Mol.nuclear_repulsion_energy()) > 1.0e-3:\n raise ValueError(\n \"\"\"gamess outfile (NRE: %f) inconsistent with Psi4 input (NRE: %f).\"\"\"\n % (outMol.nuclear_repulsion_energy(), p4Mol.nuclear_repulsion_energy())\n )\n\n amol, data = outMol.align(p4Mol, atoms_map=False, mols_align=True, verbose=0)\n mill = data[\"mill\"]\n if outGrad is not None:\n outGrad = mill.align_gradient(np.array(outGrad))\n else:\n raise ValueError(\"\"\"No coordinate information extracted from gamess output.\"\"\")\n\n return outqcvar, outGrad, outMol\n\n\ndef harvest_output(outtext):\n \"\"\"Function to separate portions of a gamess output file *outtext*,\n divided by \"Step\".\n \"\"\"\n pass_qcvar = []\n pass_coord = []\n pass_grad = []\n\n for outpass in re.split(\n # fmt: off\n r\"^\\s+\" + r\"--------\" + r\"NSERCH:\" + r\"([1-9][0-9][0-9][0-9]*)\" + r\"\\s*\" +\n r\"^\\s+\" + r\"--------\",\n # fmt: on\n outtext,\n re.MULTILINE,\n ):\n\n qcvar, gamesscoord, gamessgrad = harvest_outfile_pass(outpass)\n pass_qcvar.append(qcvar)\n pass_coord.append(gamesscoord)\n pass_grad.append(gamessgrad)\n\n retindx = -1 if pass_coord[-1] else -2\n return pass_qcvar[retindx], pass_coord[retindx], pass_grad[retindx]\n\n\ndef harvest_outfile_pass(outtext):\n \"\"\"Function to read gamess output file *outtext* and parse important\n quantum chemical information from it in\n \"\"\"\n qcvar = PreservingDict()\n qcvar_coord = None\n qcvar_grad = None\n\n NUMBER = r\"(?x:\" + regex.NUMBER + \")\"\n\n # If calculation fail to converge\n mobj = re.search(r\"^\\s+\" + r\"(?:GAMESS TERMINATED ABNORMALLY)\" + r\"\\s*$\", outtext, re.MULTILINE)\n if mobj:\n logger.debug(\"GAMESS TERMINATED ABNORMALLY\")\n\n # If calculation converged\n else:\n mobj = re.search(\n r\"^\\s+\" + r\"(?: TOTAL ENERGY)\" + r\"\\s+=\\s*\" + NUMBER + r\"s*$\", outtext, re.MULTILINE\n )\n if mobj:\n logger.debug(\"matched gamess_RHF energy\")\n qcvar[\"HF TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"SCF TOTAL ENERGY\"] = mobj.group(1)\n\n # Process NRE\n mobj = re.search(\n r\"^\\s+\" + r\"(?: NUCLEAR REPULSION ENERGY)\" + r\"\\s+=\\s*\" + NUMBER + r\"\\s*$\", outtext, re.MULTILINE\n )\n if mobj:\n logger.debug(\"matched NRE\")\n qcvar[\"NUCLEAR REPULSION ENERGY\"] = mobj.group(1)\n\n # Process MP2\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'RESULTS OF MOLLER-PLESSET 2ND ORDER CORRECTION ARE\\n'\n r'^\\s+' + r'E\\(0\\)' + r'=\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(1\\)' + r'=\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(2\\)' + r'=\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(MP2\\)' + r'=\\s+' + NUMBER + r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched mp2\")\n print(\"matched mp2 a\")\n qcvar[\"MP2 CORRELATION ENERGY\"] = mobj.group(3)\n qcvar[\"MP2 TOTAL ENERGY\"] = mobj.group(4)\n\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'RESULTS OF 2ND-ORDER ZAPT CORRECTION' + r'\\s*' +\n r'^\\s+' + r'E\\(HF\\) ' + r'=\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(ZAPT\\) ' + r'=\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'-----------------------------------' + r'\\s*' +\n r'^\\s+' + r'E\\(MP2\\) ' + r'=\\s+' + NUMBER + r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched mp2 b\")\n print(\"matched mp2 b\")\n qcvar[\"MP2 CORRELATION ENERGY\"] = mobj.group(2)\n qcvar[\"MP2 TOTAL ENERGY\"] = mobj.group(3)\n\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'RESULTS OF MOLLER-PLESSET 2ND ORDER CORRECTION ARE\\n'\n r'^\\s+' + r'E\\(0\\)=' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(1\\)=' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(2\\)=' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(MP2\\)=' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'SPIN-COMPONENT-SCALED MP2 RESULTS ARE' + r'\\s*' +\n r'^\\s+' + r'E\\(2S\\)=' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(2T\\)=' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'E\\(2ST\\)=' + r'\\s+' + NUMBER + r'\\s*',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched mp2 rhf c\")\n print(\"matched mp2 c\", mobj.groups())\n qcvar[\"HF TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"MP2 SINGLES ENERGY\"] = mobj.group(2)\n qcvar[\"MP2 DOUBLES ENERGY\"] = mobj.group(3)\n qcvar[\"MP2 TOTAL ENERGY\"] = mobj.group(4)\n qcvar[\"MP2 OPPOSITE-SPIN CORRELATION ENERGY\"] = mobj.group(5)\n qcvar[\"MP2 SAME-SPIN CORRELATION ENERGY\"] = mobj.group(6)\n\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'SINGLE EXCITATION CONTRIBUTION' + r'\\s*' +\n r'^\\s+' + r'ALPHA' + r'\\s*' + NUMBER + r'\\s*' +\n r'^\\s+' + r'BETA' + r'\\s*' + NUMBER + r'\\s*' +\n r'^\\s+' + r'DOUBLE EXCITATION CONTRIBUTION' + r'\\s*' +\n r'^\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' +\n r'^\\s+' + r'RESULTS OF MOLLER-PLESSET 2ND ORDER CORRECTION ARE' + r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched mp2 rohf d\")\n print(\"matched mp2 rohf d\", mobj.groups())\n qcvar[\"MP2 SINGLES ENERGY\"] = Decimal(mobj.group(1)) + Decimal(mobj.group(2))\n qcvar[\"MP2 DOUBLES ENERGY\"] = mobj.group(3)\n\n mobj = re.search(r\"^\\s+\" + \"UHF-MP2 CALCULATION\", outtext, re.MULTILINE)\n if mobj:\n logger.debug(\"matched mp2 uhf e\")\n print(\"matched mp2 uhf e\")\n qcvar[\"MP2 SINGLES ENERGY\"] = \"0.0\"\n\n # Process CCSD\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'SUMMARY OF RESULTS' + r'\\s+' + r'\\n' +\n r'^\\s+' + r'REFERENCE ENERGY:' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'MBPT\\(2\\) ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'CCSD ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched rhf ccsd\")\n qcvar[\"HF TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"SCF TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"MP2 CORRELATION ENERGY\"] = mobj.group(3)\n qcvar[\"MP2 DOUBLES ENERGY\"] = mobj.group(3)\n qcvar[\"MP2 TOTAL ENERGY\"] = mobj.group(2)\n qcvar[\"CCSD DOUBLES ENERGY\"] = mobj.group(5)\n qcvar[\"CCSD CORRELATION ENERGY\"] = mobj.group(5)\n qcvar[\"CCSD TOTAL ENERGY\"] = mobj.group(4)\n\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'SUMMARY OF CCSD RESULTS' + r'\\s+' + r'\\n' +\n r'^\\s+' + r'REFERENCE ENERGY:' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'CCSD ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR. E=\\s+' + r'\\s+' + NUMBER + r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched rohf ccsd\")\n qcvar[\"SCF TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"CCSD CORRELATION ENERGY\"] = mobj.group(3)\n qcvar[\"CCSD TOTAL ENERGY\"] = mobj.group(2)\n\n # Process CR-CC(2,3)\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'CCSD ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'CR-CC\\(2,3\\),A OR CCSD\\(2\\)_T ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'CR-CC\\(2,3\\) OR CR-CCSD\\(T\\)_L ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched cc-cr(2,3)\")\n qcvar[\"CCSD TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"CCSD CORRELATION ENERGY\"] = mobj.group(2)\n qcvar[\"CR-CC(2,3),A TOTAL ENERGY\"] = mobj.group(3)\n qcvar[\"CR-CC(2,3),A CORRELATION ENERGY\"] = mobj.group(4)\n qcvar[\"CR-CC(2,3) TOTAL ENERGY\"] = mobj.group(5)\n qcvar[\"CR-CC(2,3) CORRELATION ENERGY\"] = mobj.group(6)\n\n # Process CCSD(T)\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'SUMMARY OF RESULTS' + r'\\s+' + r'\\n' +\n r'^\\s+' + r'REFERENCE ENERGY:' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'MBPT\\(2\\) ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'CCSD ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'CCSD\\[T\\] ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*' +\n r'^\\s+' + r'CCSD\\(T\\) ENERGY:' + r'\\s+' + NUMBER + r'\\s*' + r'CORR.E=\\s+' + r'\\s+' + NUMBER + r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched ccsd(t)\")\n qcvar[\"HF TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"SCF TOTAL ENERGY\"] = mobj.group(1)\n qcvar[\"CCSD CORRELATION ENERGY\"] = mobj.group(5)\n qcvar[\"CCSD TOTAL ENERGY\"] = mobj.group(4)\n qcvar[\"(T) CORRECTION ENERGY\"] = Decimal(mobj.group(8)) - Decimal(mobj.group(4))\n # qcvar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(5)) - qcvar['SCF TOTAL ENERGY']\n qcvar[\"CCSD(T) CORRELATION ENERGY\"] = mobj.group(9)\n qcvar[\"CCSD(T) TOTAL ENERGY\"] = mobj.group(8)\n\n # Process FCI\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'ALDET CI PROPERTIES...FOR THE WAVEFUNCTION OF STATE' + r'\\s+' + r'(?P<state>\\d+)' + r'\\s*' +\n r'^\\s+' + r'USING THE EXPECTATION VALUE DENSITY' + r'\\s*' +\n r'(?:.*?)' +\n r'^\\s+' + r'TOTAL ENERGY =' + r'\\s+' + NUMBER + r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE | re.DOTALL,\n )\n if mobj:\n logger.debug(\"matched fci\")\n qcvar[\"FCI TOTAL ENERGY\"] = mobj.group(2)\n qcvar[\"CI TOTAL ENERGY\"] = mobj.group(2)\n\n # Process EOM-CCSD\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'---- SUMMARY OF EOM-CCSD CALCULATIONS ----' +\n r'\\s+' + r'\\n' +\n r'^\\s+' + r'EXCITATION EXCITATION TOTAL' + r'\\s*' +\n r'^\\s+' + r'SYMMETRY ENERGY \\(H\\) ENERGY \\(EV\\) ENERGY \\(H\\) ITERATIONS' + r'\\s*' +\n r'^\\s+' + r'A' + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + r'CONVERGED\\s+' +\n r'^\\s+' + r'A' + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + r'CONVERGED\\s+' +\n r'^\\s+' + r'A' + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + r'CONVERGED\\s+' +\n r'^\\s+' + r'A' + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + NUMBER + r'\\s+' + r'CONVERGED\\s+',\n # fmt: on\n outtext,\n re.MULTILINE,\n )\n if mobj:\n logger.debug(\"matched eom-ccsd\")\n qcvar[\"EOM-CCSD ROOT 1 EXCITATION ENERGY\"] = mobj.group(1)\n qcvar[\"EOM-CCSD ROOT 2 EXCITATION ENERGY\"] = mobj.group(4)\n qcvar[\"EOM-CCSD ROOT 3 EXCITATION ENERGY\"] = mobj.group(7)\n qcvar[\"EOM-CCSD ROOT 4 EXCITATION ENERGY\"] = mobj.group(10)\n qcvar[\"EOM-CCSD ROOT 1 TOTAL ENERGY\"] = mobj.group(3)\n qcvar[\"EOM-CCSD ROOT 2 TOTAL ENERGY\"] = mobj.group(6)\n qcvar[\"EOM-CCSD ROOT 3 TOTAL ENERGY\"] = mobj.group(9)\n qcvar[\"EOM-CCSD ROOT 4 TOTAL ENERGY\"] = mobj.group(12)\n\n # Process DFT\n # mobj = re.search(\n # r'^\\s+' + r'DFT EXCHANGE + CORRELATION ENERGY' + r'=\\s+' + NUMBER + r'\\s*$'\n # ,outtext, re.MULTILINE)\n mobj = re.search(\n r\"^\\s+\" + r\"DFT EXCHANGE \\+ CORRELATION ENERGY\" + r\"\\s+=\\s*\" + NUMBER + r\"\\s*$\", outtext, re.MULTILINE\n )\n if mobj:\n logger.debug(\"matched dft xc\")\n qcvar[\"DFT XC ENERGY\"] = mobj.group(1)\n\n # Process Geometry\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'ATOM ATOMIC COORDINATES \\(BOHR\\)' + r'\\s*' +\n r'^\\s+' + r'CHARGE X Y Z'+ r'\\s*' +\n r'((?:\\s+([A-Z][a-z]*)+\\s+\\d+\\.\\d+\\s+[-+]?\\d+\\.\\d+\\s+[-+]?\\d+\\.\\d+\\s+[-+]?\\d+\\.\\d+\\s*\\n)+)'+r'\\s*$',\n # fmt: on\n outtext,\n re.MULTILINE | re.IGNORECASE,\n )\n if mobj:\n logger.debug(\"matched geom\")\n molxyz = \"%d bohr\\n\\n\" % len(mobj.group(1).splitlines())\n for line in mobj.group(1).splitlines():\n lline = line.split()\n molxyz += \"%s %16s %16s %16s\\n\" % (int(float(lline[-4])), lline[-3], lline[-2], lline[-1])\n qcvar_coord = Molecule(\n validate=False,\n **qcel.molparse.to_schema(\n qcel.molparse.from_string(molxyz, dtype=\"xyz+\", fix_com=True, fix_orientation=True)[\"qm\"], dtype=2\n ),\n )\n\n # Process Gradient\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'GRADIENT OF THE ENERGY' + r'\\s*'+\n r'^\\s+' + r'----------------------' + r'\\s*'+\n r'\\s+' + r'\\n'+\n r'^\\s+' + r'UNITS ARE HARTREE/BOHR E\\'X E\\'Y E\\'Z' + r'\\s*' +\n r'((?:\\s+([1-9][0-9]*)+\\s+([A-Z][a-x]*)+\\s+[-+]?\\d+\\.\\d+\\s+[-+]?\\d+\\.\\d+\\s+[-+]?\\d+\\.\\d+\\s*\\n)+)' +\n r'\\s*$',\n # fmt: off\n outtext, re.MULTILINE\n )\n if mobj:\n logger.debug(\"matched gradient - after\")\n atoms = []\n qcvar_grad = []\n for line in mobj.group(1).splitlines():\n lline = line.split()\n if lline == []:\n pass\n else:\n logger.debug(\"printing gradient\")\n atoms.append(lline[1])\n qcvar_grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])])\n qcvar_grad = np.array(qcvar_grad)\n\n # Process SCF blocks\n mobj = re.search(\n # fmt: off\n r'^\\s+' + r'PROPERTIES FOR THE .* DFT FUNCTIONAL .* DENSITY MATRIX' +\n r'(.*)' +\n r'\\.\\.\\.\\.\\.\\. END OF PROPERTY EVALUATION \\.\\.\\.\\.\\.\\.',\n # fmt: on\n outtext,\n re.MULTILINE | re.DOTALL,\n )\n if mobj:\n logger.debug(\"matched dft props\")\n prop_block = mobj.group(1)\n mobj_energy = re.search(\n # fmt: off\n r'\\s+ENERGY COMPONENTS\\s*\\n' +\n r'\\s+-----------------\\s*\\n' +\n r'\\s*\\n' +\n r'\\s+WAVEFUNCTION NORMALIZATION =.*\\n' +\n r'\\s*\\n' +\n r'\\s+ONE ELECTRON ENERGY =\\s+' + NUMBER + r'\\n' +\n r'\\s+TWO ELECTRON ENERGY =\\s+' + NUMBER + r'\\n' +\n r'\\s+NUCLEAR REPULSION ENERGY =\\s+' + NUMBER + r'\\n' +\n r'\\s+------------------\\s*\\n' +\n r'\\s+TOTAL ENERGY =\\s+' + NUMBER+r'\\n',\n # fmt: on\n prop_block\n )\n if mobj_energy:\n qcvar[\"ONE-ELECTRON ENERGY\"] = mobj_energy.group(1)\n qcvar[\"TWO-ELECTRON ENERGY\"] = mobj_energy.group(2)\n qcvar[\"SCF TOTAL ENERGY\"] = mobj_energy.group(4)\n qcvar[\"DFT TOTAL ENERGY\"] = mobj_energy.group(4)\n\n mobj_dipole = re.search(\n # fmt: off\n r'\\s+ELECTROSTATIC MOMENTS\\s*\\n'\n r'\\s+---------------------\\s*\\n'\n r'\\s*\\n'\n r'\\s*POINT\\s+1\\s+X\\s+Y\\s+Z\\s+\\(BOHR\\)\\s+CHARGE\\s*\\n'\n r'.*\\n'\n r'\\s*DX\\s+DY\\s+DZ\\s+/D/\\s+\\(DEBYE\\)\\s*\\n'\n r'\\s*' + NUMBER + '\\s+' + NUMBER + '\\s+' + NUMBER + '\\s+' + NUMBER + r'\\s*\\n',\n # fmt: on\n prop_block\n )\n if mobj_dipole:\n d2au = Decimal(qcel.constants.conversion_factor(\"debye\", \"e * bohr\"))\n qcvar[\"SCF DIPOLE\"] = d2au * np.array(\n [Decimal(mobj_dipole.group(1)), Decimal(mobj_dipole.group(2)), Decimal(mobj_dipole.group(3))]\n )\n\n # Process CURRENT Energies\n if \"HF TOTAL ENERGY\" in qcvar:\n qcvar[\"CURRENT REFERENCE ENERGY\"] = qcvar[\"HF TOTAL ENERGY\"]\n qcvar[\"CURRENT ENERGY\"] = qcvar[\"HF TOTAL ENERGY\"]\n\n if \"MP2 TOTAL ENERGY\" in qcvar and \"MP2 CORRELATION ENERGY\" in qcvar:\n qcvar[\"CURRENT CORRELATION ENERGY\"] = qcvar[\"MP2 CORRELATION ENERGY\"]\n qcvar[\"CURRENT ENERGY\"] = qcvar[\"MP2 TOTAL ENERGY\"]\n\n if \"CCSD TOTAL ENERGY\" in qcvar and \"CCSD CORRELATION ENERGY\" in qcvar:\n qcvar[\"CURRENT CORRELATION ENERGY\"] = qcvar[\"CCSD CORRELATION ENERGY\"]\n qcvar[\"CURRENT ENERGY\"] = qcvar[\"CCSD TOTAL ENERGY\"]\n\n if \"CR-CC(2,3) TOTAL ENERGY\" in qcvar and \"CR-CC(2,3) CORRELATION ENERGY\" in qcvar:\n qcvar[\"CURRENT CORRELATION ENERGY\"] = qcvar[\"CR-CC(2,3) CORRELATION ENERGY\"]\n qcvar[\"CURRENT ENERGY\"] = qcvar[\"CR-CC(2,3) TOTAL ENERGY\"]\n\n if \"CCSD(T) TOTAL ENERGY\" in qcvar and \"CCSD(T) CORRELATION ENERGY\" in qcvar:\n qcvar[\"CURRENT CORRELATION ENERGY\"] = qcvar[\"CCSD(T) CORRELATION ENERGY\"]\n qcvar[\"CURRENT ENERGY\"] = qcvar[\"CCSD(T) TOTAL ENERGY\"]\n\n if \"DFT TOTAL ENERGY\" in qcvar:\n qcvar[\"CURRENT REFERENCE ENERGY\"] = qcvar[\"DFT TOTAL ENERGY\"]\n qcvar[\"CURRENT ENERGY\"] = qcvar[\"DFT TOTAL ENERGY\"]\n\n if \"FCI TOTAL ENERGY\" in qcvar: # and 'FCI CORRELATION ENERGY' in qcvar:\n qcvar[\"CURRENT ENERGY\"] = qcvar[\"FCI TOTAL ENERGY\"]\n\n return qcvar, qcvar_coord, qcvar_grad\n" ]
[ [ "numpy.array" ] ]
xinbaiusc/MLR-OOD
[ "b6e0ac19b17a61cf7599bf1ce9bf27c8451d1c10" ]
[ "train.py" ]
[ "# coding=utf-8\n# This code is modified based on generative.py at \n#\n# https://github.com/google-research/google-research/tree/master/genomics_ood\n#\n# Copyright 2021 University of Southern California.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Build an autoregressive generative model for DNA sequences.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport json\nimport os\nimport random\nfrom absl import flags\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nimport tensorflow as tf\nfrom genomics_ood import utils\nfrom tensorflow.contrib import rnn as contrib_rnn\nfrom tensorflow.contrib import training as contrib_training\nfrom numba import jit, cuda\n\n# parameters\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('random_seed', 1234, 'The random seed')\nflags.DEFINE_integer('batch_size', 100, 'The number of images in each batch.')\nflags.DEFINE_integer('num_steps', 900000, 'The number of trainig steps')\nflags.DEFINE_integer('val_freq', 1000, 'How often to eval validation (# steps)')\nflags.DEFINE_float('learning_rate', 0.0005, 'The learning rate')\nflags.DEFINE_boolean(\n 'emb_variable', False,\n 'If the word embedding is variables. If not, use one-hot encoding.')\nflags.DEFINE_integer('emb_size', 4, 'The word embedding dimensions')\nflags.DEFINE_integer('hidden_lstm_size', 2000,\n 'The number of hidden units in LSTM.')\nflags.DEFINE_boolean('norm_lstm', False,\n 'If turn on the layer normalization for LSTM.')\nflags.DEFINE_float('dropout_rate', 0.1, 'The learning rate')\nflags.DEFINE_string(\n 'reg_type', 'l2',\n 'l2 or l1 regularization for parameters in lstm and dense layers.')\nflags.DEFINE_float(\n 'reg_weight', 0.0,\n 'The regularization weight for parameters in lstm and dense layers.')\nflags.DEFINE_integer('seq_len', 250, 'sequence length')\nflags.DEFINE_float('mutation_rate', 0.0, 'Mutation rate for data augmentation.')\nflags.DEFINE_integer(\n 'filter_label', -1,\n ('If only sequences from the class=filter_label are used for training.'\n 'if -1, no filter.'))\nflags.DEFINE_string('in_tr_fasta_data', '/tmp/data/in_tr/ID_0.fa',\n 'fasta file of in-distribution training sequences')\nflags.DEFINE_integer('in_tr_fasta_data_class', 0,\n 'the ID training class of the input fasta file')\nflags.DEFINE_string('in_tr_data_dir', '/tmp/data/in_tr',\n 'data directory of in-distribution training')\nflags.DEFINE_string('out_dir', '/tmp/out_training',\n 'Directory where to write log and models.')\nflags.DEFINE_boolean('save_meta', False, 'Save meta graph file for each ckpt.')\nflags.DEFINE_string('master', '', 'TensorFlow master to use.')\n\nFLAGS = flags.FLAGS\n\ndict = {\"A\":'0', \"C\":'1', \"G\":'2', \"T\":'3'}\ndef get_example_object(data_record):\n # Convert individual data into a list of int64 or float or bytes\n int_list1 = tf.train.Int64List(value = [data_record['int_data']])\n str_list1 = tf.train.BytesList(value = [data_record['str_data'].encode('utf-8')])\n # Create a dictionary with above lists individually wrapped in Feature\n feature_key_value_pair = {\n 'x': tf.train.Feature(bytes_list = str_list1),\n 'y': tf.train.Feature(int64_list = int_list1)\n }\n features = tf.train.Features(feature = feature_key_value_pair)\n example = tf.train.Example(features = features)\n return example\n\ndef create_out_dir(params):\n \"\"\"Setup the output directory.\"\"\"\n params.in_tr_data_dir = utils.clean_last_slash_if_any(params.in_tr_data_dir)\n sub_dir = ('generative_l%d_bs%d_lr%.4f'\n '_hr%d_nr%s_reg%s_regw%.6f_fi%d_mt%.2f') % (\n params.seq_len, params.batch_size, params.learning_rate,\n params.hidden_lstm_size, params.norm_lstm, params.reg_type,\n params.reg_weight, params.filter_label, params.mutation_rate)\n log_dir = os.path.join(params.out_dir, sub_dir, 'log')\n params.add_hparam('log_dir_in_tr', os.path.join(log_dir, 'in_tr'))\n params.add_hparam('model_dir', log_dir.replace('log', 'model'))\n\n if not tf.gfile.Exists(params.out_dir):\n tf.gfile.MakeDirs(params.out_dir)\n if not tf.gfile.Exists(params.log_dir_in_tr):\n tf.gfile.MakeDirs(params.log_dir_in_tr)\n if not tf.gfile.Exists(params.model_dir):\n tf.gfile.MakeDirs(params.model_dir)\n\n tf.logging.info('model_dir=%s', params.model_dir)\n\n\ndef filter_for_label(features, target_label):\n return tf.equal(features['y'],\n tf.convert_to_tensor(target_label, dtype=tf.int32))\n\n\ndef load_datasets(params, mode_eval=False):\n \"\"\"load class labels, in_tr_data, in_val_data, ood_val_data.\"\"\"\n if mode_eval: # For evaluation, no need to prepare training data\n in_tr_dataset = None\n else:\n in_tr_file_list = [\n os.path.join(params.in_tr_data_dir, x)\n for x in tf.gfile.ListDirectory(params.in_tr_data_dir)\n if params.in_tr_file_pattern in x\n ]\n\n # load in-distribution training sequence\n in_tr_data_file_list = [x for x in in_tr_file_list if '.tfrecord' in x]\n tf.logging.info('in_tr_data_file_list=%s', in_tr_data_file_list)\n\n def parse_single_tfexample_addmutations_short(unused_key, v):\n return utils.parse_single_tfexample_addmutations(unused_key, v,\n params.mutation_rate,\n params.seq_len)\n\n # for training a background model, we mutate input sequences\n if params.mutation_rate == 0:\n in_tr_dataset = tf.data.TFRecordDataset(in_tr_data_file_list).map(\n lambda v: utils.parse_single_tfexample(v, v))\n else:\n in_tr_dataset = tf.data.TFRecordDataset(in_tr_data_file_list).map(\n lambda v: parse_single_tfexample_addmutations_short(v, v))\n\n if params.filter_label != -1:\n\n def filter_fn(v):\n return filter_for_label(v, params.filter_label)\n\n in_tr_dataset = in_tr_dataset.filter(filter_fn)\n return in_tr_dataset\n\nclass SeqModel(object):\n \"\"\"DNA sequence modeling.\"\"\"\n\n def __init__(self, params):\n \"\"\"Create the model.\"\"\"\n self._params = params\n\n self._make_dataset()\n self._make_placeholders()\n if self._params.emb_variable:\n self._make_variables()\n else:\n self._one_hot_encode_x()\n self._make_rnn_model()\n self._make_losses()\n self._make_summary_stats()\n self._make_train_op()\n\n def _make_dataset(self):\n \"\"\"make data generators.\"\"\"\n self.handle = tf.placeholder(tf.string, shape=[])\n self.iterator = tf.data.Iterator.from_string_handle(self.handle, {\n 'x': tf.int32,\n 'y': tf.int32\n }, {\n 'x': [None, self._params.seq_len],\n 'y': [None]\n })\n features = self.iterator.get_next()\n self.x, self.y0 = features['x'], features['y']\n\n def _make_placeholders(self):\n \"\"\"Make placeholders for dropout rate.\"\"\"\n self.dropout_rate = tf.placeholder_with_default(\n self._params.dropout_rate, shape=(), name='dropout_rnn')\n\n def _make_variables(self):\n \"\"\"make variables.\"\"\"\n # emb_size must equal to vocab_size,\n # otherwise exceed vocab will be encoded as zeros\n tf.logging.info('using variable dict for embedding')\n self.emb_dict = tf.Variable(\n tf.one_hot(\n list(range(self._params.vocab_size)), depth=self._params.emb_size))\n self.x_emb = tf.nn.embedding_lookup(\n self.emb_dict, tf.cast(self.x, dtype=tf.int64), name='embx')\n\n def _one_hot_encode_x(self):\n \"\"\"Make embedding layer.\"\"\"\n # input for encoder\n tf.logging.info('use one hot encoding')\n self.x_emb = tf.one_hot(\n tf.cast(self.x, dtype=tf.int64), depth=self._params.vocab_size)\n tf.logging.info('shape of x_emb=%s', self.x_emb.shape)\n\n def _make_rnn_model(self):\n \"\"\"Make rnn model.\"\"\"\n self.y = tf.cast(self.x[:, 1:], dtype=tf.int64)\n self.y_emb = tf.one_hot(self.y, depth=self._params.emb_size)\n tf.logging.info('y.shape=%s', self.y.shape)\n\n lstm_fw_cell_g = contrib_rnn.LayerNormBasicLSTMCell(\n self._params.hidden_lstm_size,\n layer_norm=self._params.norm_lstm,\n dropout_keep_prob=1 - self.dropout_rate)\n lstm_hidden, _ = tf.nn.dynamic_rnn(\n lstm_fw_cell_g, self.x_emb, dtype=tf.float32)\n # stagger two directional vectors so that the backward RNN does not reveal\n # medium.com/@plusepsilon/the-bidirectional-language-model-1f3961d1fb27\n self.logits = tf.layers.dense(\n lstm_hidden[:, :-1, :],\n units=self._params.vocab_size,\n activation=None,\n name='logits')\n tf.logging.info('shape of logits=%s', self.logits.shape)\n\n # cross entropy\n self.loss_i_t = tf.nn.softmax_cross_entropy_with_logits(\n labels=self.y_emb, logits=self.logits)\n self.loss_i = tf.reduce_mean(self.loss_i_t, axis=1)\n\n def _make_losses(self):\n \"\"\"make loss functions.\"\"\"\n self.loss = tf.reduce_mean(self.loss_i)\n # l2 norm\n self.variables = tf.trainable_variables()\n if self._params.reg_type == 'l2':\n self.loss_reg = tf.add_n(\n [tf.nn.l2_loss(v) for v in self.variables if 'bias' not in v.name])\n else:\n self.loss_reg = tf.add_n([\n tf.reduce_sum(tf.abs(v))\n for v in self.variables\n if 'bias' not in v.name\n ])\n # total loss\n self.loss_total = self.loss + self._params.reg_weight * self.loss_reg\n\n def _make_summary_stats(self):\n \"\"\"make summary stats.\"\"\"\n probs = tf.nn.softmax(self.logits)\n pred_words = tf.argmax(probs, axis=2)\n self.acc_i_t = tf.equal(pred_words, tf.cast(self.y, dtype=tf.int64))\n self.acc_i = tf.reduce_mean(tf.cast(self.acc_i_t, dtype=tf.float32), axis=1)\n self.acc = tf.reduce_mean(self.acc_i)\n\n self.summary = tf.summary.merge([\n tf.summary.scalar('loss', self.loss),\n tf.summary.scalar('acc', self.acc),\n tf.summary.scalar('loss_total', self.loss_total),\n tf.summary.scalar('loss_reg', self.loss_reg)\n ])\n\n def _make_train_op(self):\n \"\"\"make train op.\"\"\"\n # training operations\n optimizer = tf.train.AdamOptimizer(self._params.learning_rate)\n grads = optimizer.compute_gradients(\n self.loss_total, var_list=self.variables)\n self.minimize = optimizer.apply_gradients(grads)\n\n def reset(self):\n \"\"\"prepare sess.\"\"\"\n # setup session and\n self.sess = tf.Session(self._params.master)\n self.sess.run(tf.global_variables_initializer())\n self.tr_writer = tf.summary.FileWriter(self._params.log_dir_in_tr,\n self.sess.graph)\n self.saver = tf.train.Saver(max_to_keep=500)\n\n def train(self, in_tr_dataset, prev_steps):\n \"\"\"training steps.\"\"\"\n in_tr_dataset = in_tr_dataset.repeat().shuffle(1000).batch(\n self._params.batch_size)\n\n in_tr_iterator = in_tr_dataset.make_one_shot_iterator()\n\n self.in_tr_handle = self.sess.run(in_tr_iterator.string_handle())\n\n num_steps = self._params.num_steps\n for i in range(prev_steps, num_steps, 1):\n _, in_tr_loss, _, in_tr_acc, in_tr_summary = self.sess.run(\n [self.minimize, self.loss, self.loss_i, self.acc, self.summary],\n feed_dict={\n self.handle: self.in_tr_handle,\n self.dropout_rate: self._params.dropout_rate\n })\n \n \n if i % self._params.val_freq == 0:\n tf.logging.info(\n ('i=%d \\t in_tr_loss=%.4f'\n 'in_tr_acc=%.4f'\n ), i, in_tr_loss, in_tr_acc)\n\n _ = self.saver.save(\n self.sess,\n os.path.join(self._params.model_dir, 'model_{}.ckpt'.format(i)),\n write_meta_graph=self._params.save_meta) # if meta file is too big\n\n self.tr_writer.add_summary(in_tr_summary, i)\n self.tr_writer.flush()\n \n\n def finish(self):\n tf.logging.info('training is done')\n self.tr_writer.close()\n #self.val_writer.close()\n self.saver.close()\n\n def restore_from_ckpt(self, ckpt_path):\n \"\"\"restore model from a ckpt.\"\"\"\n # meta_file = ckpt_path + '.meta'\n # saver = tf.train.import_meta_graph(meta_file)\n self.saver.restore(self.sess, ckpt_path)\n\n def pred_from_ckpt(self, test_dataset, num_samples):\n \"\"\"make prediction from a ckpt.\"\"\"\n test_dataset = test_dataset.batch(self._params.batch_size)\n test_iterator = test_dataset.make_one_shot_iterator()\n\n self.test_handle = self.sess.run(test_iterator.string_handle())\n\n loss_test = []\n loss_total_test = []\n acc_test = []\n y_test = []\n x_test = []\n for _ in range(num_samples // self._params.batch_size):\n out = self.sess.run(\n [self.loss_i, self.loss_total, self.acc_i, self.y0, self.y],\n feed_dict={\n self.handle: self.test_handle,\n self.dropout_rate: 0\n })\n loss_test.append(out[0])\n loss_total_test.append(out[1])\n acc_test.append(out[2])\n y_test.append(out[3])\n x_test.append(out[4])\n return loss_test, loss_total_test, acc_test, y_test, x_test\n\ndef main(_):\n\n tf.logging.set_verbosity(tf.logging.INFO)\n random.seed(FLAGS.random_seed)\n\n params = contrib_training.HParams(\n num_steps=FLAGS.num_steps,\n val_freq=FLAGS.val_freq,\n seq_len=FLAGS.seq_len,\n batch_size=FLAGS.batch_size,\n emb_variable=FLAGS.emb_variable,\n emb_size=FLAGS.emb_size,\n vocab_size=4,\n hidden_lstm_size=FLAGS.hidden_lstm_size,\n norm_lstm=FLAGS.norm_lstm,\n dropout_rate=FLAGS.dropout_rate,\n learning_rate=FLAGS.learning_rate,\n reg_type=FLAGS.reg_type,\n reg_weight=FLAGS.reg_weight,\n out_dir=FLAGS.out_dir,\n in_tr_fasta_data=FLAGS.in_tr_fasta_data,\n in_tr_fasta_data_class=FLAGS.in_tr_fasta_data_class,\n in_tr_data_dir=FLAGS.in_tr_data_dir,\n master=FLAGS.master,\n save_meta=FLAGS.save_meta,\n filter_label=FLAGS.filter_label,\n mutation_rate=FLAGS.mutation_rate,\n )\n \n # create the input tfrecord training data according to the input fasta data\n if not params.in_tr_data_dir.endswith('/'):\n params.in_tr_data_dir += '/'\n in_tr_tfrecord_file_name = params.in_tr_data_dir + 'in_tr_class_' + str(params.in_tr_fasta_data_class) + '.tfrecord'\n with tf.python_io.TFRecordWriter(in_tr_tfrecord_file_name) as tfwriter:\n with open(params.in_tr_fasta_data) as f:\n for line in f.readlines():\n if not line.startswith('>'):\n line = line.strip()\n if len(line) != params.seq_len:\n raise ValueError('Each sequence must have the same length as the input length!')\n numeric_line = [dict[x] if x in dict else x for x in line]\n str_numeric_line = ' '.join(numeric_line)\n data_record = {\n 'str_data': str_numeric_line,\n 'int_data': params.in_tr_fasta_data_class\n }\n example = get_example_object(data_record)\n tfwriter.write(example.SerializeToString())\n\n # setup output directory\n create_out_dir(params)\n\n # load datasets\n params.add_hparam('in_tr_file_pattern', 'in_tr')\n (in_tr_dataset) = load_datasets(params)\n\n # print parameter settings\n tf.logging.info(params)\n with tf.gfile.GFile(\n os.path.join(params.model_dir, 'params.json'), mode='w') as f:\n f.write(json.dumps(params.to_json(), sort_keys=True))\n\n # construct model\n model = SeqModel(params)\n model.reset()\n\n ## if previous model ckpt exists, restore the model from there\n tf.logging.info('model dir=%s', os.path.join(params.out_dir, '*.ckpt.index'))\n prev_steps, ckpt_file = utils.get_latest_ckpt(params.model_dir)\n if ckpt_file:\n tf.logging.info('previous ckpt exist, prev_steps=%s', prev_steps)\n model.restore_from_ckpt(ckpt_file)\n\n # training\n model.train(in_tr_dataset, prev_steps)\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.data.TFRecordDataset", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.logging.set_verbosity", "tensorflow.train.Int64List", "tensorflow.train.Feature", "tensorflow.data.Iterator.from_string_handle", "tensorflow.contrib.rnn.LayerNormBasicLSTMCell", "tensorflow.nn.dynamic_rnn", "tensorflow.nn.l2_loss", "tensorflow.one_hot", "tensorflow.convert_to_tensor", "tensorflow.abs", "tensorflow.summary.FileWriter", "tensorflow.nn.softmax", "tensorflow.gfile.ListDirectory", "tensorflow.global_variables_initializer", "tensorflow.train.Features", "tensorflow.python_io.TFRecordWriter", "tensorflow.contrib.training.HParams", "tensorflow.gfile.MakeDirs", "tensorflow.app.run", "tensorflow.cast", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.layers.dense", "tensorflow.placeholder_with_default", "tensorflow.placeholder", "tensorflow.logging.info", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.gfile.Exists", "tensorflow.train.Example" ] ]
tvogiannou/ctrigrid
[ "a562bf0b72af87deb0b24a0fd4b3687be98af113" ]
[ "python/tests/test_grid.py" ]
[ "\nimport sys\nimport numpy\nimport time\n\nimport ctrigrid_bindings # this needs to be copied in the local directory\n\n\ns=0.5\ncube_vertices=[\n -s, -s, -s,\n s, -s, -s,\n s, s, -s,\n -s, s, -s,\n -s, -s, s,\n s, -s, s,\n s, s, s,\n -s, s, s,\n ]\nx=0.577350269\ncube_normals=[\n -x, -x, -x,\n x, -x, -x,\n x, x, -x,\n -x, x, -x,\n -x, -x, x,\n x, -x, x,\n x, x, x,\n -x, x, x,\n ]\ncube_indices=[\n 0, 1, 2, 2, 3, 0,\n 0, 4, 5, 5, 1, 0,\n 1, 5, 6, 6, 2, 1,\n 2, 6, 7, 7, 3, 2,\n 3, 7, 4, 4, 0, 3,\n 4, 7, 6, 6, 5, 4,\n]\n\n\ndef gen_random_points(count, origin, steps, width):\n\n min3 = origin\n max3 = ctrigrid_bindings.vec3(\n origin.x + steps * width,\n origin.y + steps * width,\n origin.z + steps * width)\n\n x = numpy.random.rand(count, 1)\n y = numpy.random.rand(count, 1)\n z = numpy.random.rand(count, 1)\n \n cx = (min3.x + max3.x) / 2\n cy = (min3.y + max3.y) / 2\n cz = (min3.z + max3.z) / 2\n\n lx = min3.x - max3.x\n ly = min3.y - max3.y\n lz = min3.z - max3.z\n\n x = (x - 1/2) * lx + cx\n y = (y - 1/2) * ly + cy\n z = (z - 1/2) * lz + cz\n\n p = numpy.hstack((x, y, z))\n return p.ravel().tolist()\n\ndef main():\n\n mesh_vertices = cube_vertices\n mesh_indices = cube_indices\n mesh_normals = cube_normals\n\n # create the grid\n origin = ctrigrid_bindings.vec3(-1.0, -1.0, -1.0)\n N = 16\n width = 1/8\n grid = ctrigrid_bindings.grid(N, N, N, width, origin)\n grid.add_tri_mesh(mesh_vertices, mesh_indices)\n\n # query closest point\n c = gen_random_points(1024, origin, N, width)\n\n start = time.perf_counter()\n cp, _ = grid.closest_points(c) # note that with small number of points, multiple threads is not faster\n end = time.perf_counter()\n print(\"Grid query finished in {} msecs\".format((end - start) * 1000))\n\n print(cp)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.hstack", "numpy.random.rand" ] ]
MAZiqing/FEDformer
[ "7914d39df829494a8172afb9676982c3789d491d" ]
[ "layers/utils.py" ]
[ "import torch\nimport torch.nn as nn\n\nimport numpy as np\nfrom functools import partial\n\nfrom scipy.special import eval_legendre\nfrom sympy import Poly, legendre, Symbol, chebyshevt\n\ndef legendreDer(k, x):\n def _legendre(k, x):\n return (2*k+1) * eval_legendre(k, x)\n out = 0\n for i in np.arange(k-1,-1,-2):\n out += _legendre(i, x)\n return out\n\ndef phi_(phi_c, x, lb = 0, ub = 1):\n mask = np.logical_or(x<lb, x>ub) * 1.0\n return np.polynomial.polynomial.Polynomial(phi_c)(x) * (1-mask)\n\ndef get_phi_psi(k, base):\n \n x = Symbol('x')\n phi_coeff = np.zeros((k,k))\n phi_2x_coeff = np.zeros((k,k))\n if base == 'legendre':\n for ki in range(k):\n coeff_ = Poly(legendre(ki, 2*x-1), x).all_coeffs()\n phi_coeff[ki,:ki+1] = np.flip(np.sqrt(2*ki+1) * np.array(coeff_).astype(np.float64))\n coeff_ = Poly(legendre(ki, 4*x-1), x).all_coeffs()\n phi_2x_coeff[ki,:ki+1] = np.flip(np.sqrt(2) * np.sqrt(2*ki+1) * np.array(coeff_).astype(np.float64))\n \n psi1_coeff = np.zeros((k, k))\n psi2_coeff = np.zeros((k, k))\n for ki in range(k):\n psi1_coeff[ki,:] = phi_2x_coeff[ki,:]\n for i in range(k):\n a = phi_2x_coeff[ki,:ki+1]\n b = phi_coeff[i, :i+1]\n prod_ = np.convolve(a, b)\n prod_[np.abs(prod_)<1e-8] = 0\n proj_ = (prod_ * 1/(np.arange(len(prod_))+1) * np.power(0.5, 1+np.arange(len(prod_)))).sum()\n psi1_coeff[ki,:] -= proj_ * phi_coeff[i,:]\n psi2_coeff[ki,:] -= proj_ * phi_coeff[i,:]\n for j in range(ki):\n a = phi_2x_coeff[ki,:ki+1]\n b = psi1_coeff[j, :]\n prod_ = np.convolve(a, b)\n prod_[np.abs(prod_)<1e-8] = 0\n proj_ = (prod_ * 1/(np.arange(len(prod_))+1) * np.power(0.5, 1+np.arange(len(prod_)))).sum()\n psi1_coeff[ki,:] -= proj_ * psi1_coeff[j,:]\n psi2_coeff[ki,:] -= proj_ * psi2_coeff[j,:]\n\n a = psi1_coeff[ki,:]\n prod_ = np.convolve(a, a)\n prod_[np.abs(prod_)<1e-8] = 0\n norm1 = (prod_ * 1/(np.arange(len(prod_))+1) * np.power(0.5, 1+np.arange(len(prod_)))).sum()\n\n a = psi2_coeff[ki,:]\n prod_ = np.convolve(a, a)\n prod_[np.abs(prod_)<1e-8] = 0\n norm2 = (prod_ * 1/(np.arange(len(prod_))+1) * (1-np.power(0.5, 1+np.arange(len(prod_))))).sum()\n norm_ = np.sqrt(norm1 + norm2)\n psi1_coeff[ki,:] /= norm_\n psi2_coeff[ki,:] /= norm_\n psi1_coeff[np.abs(psi1_coeff)<1e-8] = 0\n psi2_coeff[np.abs(psi2_coeff)<1e-8] = 0\n\n phi = [np.poly1d(np.flip(phi_coeff[i,:])) for i in range(k)]\n psi1 = [np.poly1d(np.flip(psi1_coeff[i,:])) for i in range(k)]\n psi2 = [np.poly1d(np.flip(psi2_coeff[i,:])) for i in range(k)]\n \n elif base == 'chebyshev':\n for ki in range(k):\n if ki == 0:\n phi_coeff[ki,:ki+1] = np.sqrt(2/np.pi)\n phi_2x_coeff[ki,:ki+1] = np.sqrt(2/np.pi) * np.sqrt(2)\n else:\n coeff_ = Poly(chebyshevt(ki, 2*x-1), x).all_coeffs()\n phi_coeff[ki,:ki+1] = np.flip(2/np.sqrt(np.pi) * np.array(coeff_).astype(np.float64))\n coeff_ = Poly(chebyshevt(ki, 4*x-1), x).all_coeffs()\n phi_2x_coeff[ki,:ki+1] = np.flip(np.sqrt(2) * 2 / np.sqrt(np.pi) * np.array(coeff_).astype(np.float64))\n \n phi = [partial(phi_, phi_coeff[i,:]) for i in range(k)]\n \n x = Symbol('x')\n kUse = 2*k\n roots = Poly(chebyshevt(kUse, 2*x-1)).all_roots()\n x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64)\n # x_m[x_m==0.5] = 0.5 + 1e-8 # add small noise to avoid the case of 0.5 belonging to both phi(2x) and phi(2x-1)\n # not needed for our purpose here, we use even k always to avoid\n wm = np.pi / kUse / 2\n \n psi1_coeff = np.zeros((k, k))\n psi2_coeff = np.zeros((k, k))\n\n psi1 = [[] for _ in range(k)]\n psi2 = [[] for _ in range(k)]\n\n for ki in range(k):\n psi1_coeff[ki,:] = phi_2x_coeff[ki,:]\n for i in range(k):\n proj_ = (wm * phi[i](x_m) * np.sqrt(2)* phi[ki](2*x_m)).sum()\n psi1_coeff[ki,:] -= proj_ * phi_coeff[i,:]\n psi2_coeff[ki,:] -= proj_ * phi_coeff[i,:]\n\n for j in range(ki):\n proj_ = (wm * psi1[j](x_m) * np.sqrt(2) * phi[ki](2*x_m)).sum() \n psi1_coeff[ki,:] -= proj_ * psi1_coeff[j,:]\n psi2_coeff[ki,:] -= proj_ * psi2_coeff[j,:]\n\n psi1[ki] = partial(phi_, psi1_coeff[ki,:], lb = 0, ub = 0.5)\n psi2[ki] = partial(phi_, psi2_coeff[ki,:], lb = 0.5, ub = 1)\n\n norm1 = (wm * psi1[ki](x_m) * psi1[ki](x_m)).sum()\n norm2 = (wm * psi2[ki](x_m) * psi2[ki](x_m)).sum()\n\n norm_ = np.sqrt(norm1 + norm2)\n psi1_coeff[ki,:] /= norm_\n psi2_coeff[ki,:] /= norm_\n psi1_coeff[np.abs(psi1_coeff)<1e-8] = 0\n psi2_coeff[np.abs(psi2_coeff)<1e-8] = 0\n\n psi1[ki] = partial(phi_, psi1_coeff[ki,:], lb = 0, ub = 0.5+1e-16)\n psi2[ki] = partial(phi_, psi2_coeff[ki,:], lb = 0.5+1e-16, ub = 1)\n \n return phi, psi1, psi2\n\n\ndef get_filter(base, k):\n \n def psi(psi1, psi2, i, inp):\n mask = (inp<=0.5) * 1.0\n return psi1[i](inp) * mask + psi2[i](inp) * (1-mask)\n \n if base not in ['legendre', 'chebyshev']:\n raise Exception('Base not supported')\n \n x = Symbol('x')\n H0 = np.zeros((k,k))\n H1 = np.zeros((k,k))\n G0 = np.zeros((k,k))\n G1 = np.zeros((k,k))\n PHI0 = np.zeros((k,k))\n PHI1 = np.zeros((k,k))\n phi, psi1, psi2 = get_phi_psi(k, base)\n if base == 'legendre':\n roots = Poly(legendre(k, 2*x-1)).all_roots()\n x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64)\n wm = 1/k/legendreDer(k,2*x_m-1)/eval_legendre(k-1,2*x_m-1)\n \n for ki in range(k):\n for kpi in range(k):\n H0[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki](x_m/2) * phi[kpi](x_m)).sum()\n G0[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, x_m/2) * phi[kpi](x_m)).sum()\n H1[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki]((x_m+1)/2) * phi[kpi](x_m)).sum()\n G1[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, (x_m+1)/2) * phi[kpi](x_m)).sum()\n \n PHI0 = np.eye(k)\n PHI1 = np.eye(k)\n \n elif base == 'chebyshev':\n x = Symbol('x')\n kUse = 2*k\n roots = Poly(chebyshevt(kUse, 2*x-1)).all_roots()\n x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64)\n # x_m[x_m==0.5] = 0.5 + 1e-8 # add small noise to avoid the case of 0.5 belonging to both phi(2x) and phi(2x-1)\n # not needed for our purpose here, we use even k always to avoid\n wm = np.pi / kUse / 2\n\n for ki in range(k):\n for kpi in range(k):\n H0[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki](x_m/2) * phi[kpi](x_m)).sum()\n G0[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, x_m/2) * phi[kpi](x_m)).sum()\n H1[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki]((x_m+1)/2) * phi[kpi](x_m)).sum()\n G1[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, (x_m+1)/2) * phi[kpi](x_m)).sum()\n\n PHI0[ki, kpi] = (wm * phi[ki](2*x_m) * phi[kpi](2*x_m)).sum() * 2\n PHI1[ki, kpi] = (wm * phi[ki](2*x_m-1) * phi[kpi](2*x_m-1)).sum() * 2\n \n PHI0[np.abs(PHI0)<1e-8] = 0\n PHI1[np.abs(PHI1)<1e-8] = 0\n\n H0[np.abs(H0)<1e-8] = 0\n H1[np.abs(H1)<1e-8] = 0\n G0[np.abs(G0)<1e-8] = 0\n G1[np.abs(G1)<1e-8] = 0\n \n return H0, H1, G0, G1, PHI0, PHI1\n\n\ndef train(model, train_loader, optimizer, epoch, device, verbose = 0,\n lossFn = None, lr_schedule=None, \n post_proc = lambda args: args):\n \n if lossFn is None:\n lossFn = nn.MSELoss()\n\n model.train()\n \n total_loss = 0.\n\n for batch_idx, (data, target) in enumerate(train_loader):\n \n bs = len(data)\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n \n output = model(data)\n \n target = post_proc(target)\n output = post_proc(output)\n loss = lossFn(output.view(bs, -1), target.view(bs, -1))\n \n loss.backward()\n optimizer.step()\n total_loss += loss.sum().item()\n if lr_schedule is not None: lr_schedule.step()\n \n if verbose>0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n \n return total_loss/len(train_loader.dataset)\n\n\ndef test(model, test_loader, device, verbose=0, lossFn=None,\n post_proc = lambda args: args):\n \n model.eval()\n if lossFn is None:\n lossFn = nn.MSELoss()\n \n \n total_loss = 0.\n predictions = []\n \n with torch.no_grad():\n for data, target in test_loader:\n bs = len(data)\n\n data, target = data.to(device), target.to(device)\n output = model(data)\n output = post_proc(output)\n \n loss = lossFn(output.view(bs, -1), target.view(bs, -1))\n total_loss += loss.sum().item()\n \n return total_loss/len(test_loader.dataset)\n\n\n# Till EoF\n# taken from FNO paper:\n# https://github.com/zongyi-li/fourier_neural_operator\n\n# normalization, pointwise gaussian\nclass UnitGaussianNormalizer(object):\n def __init__(self, x, eps=0.00001):\n super(UnitGaussianNormalizer, self).__init__()\n\n # x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T\n self.mean = torch.mean(x, 0)\n self.std = torch.std(x, 0)\n self.eps = eps\n\n def encode(self, x):\n x = (x - self.mean) / (self.std + self.eps)\n return x\n\n def decode(self, x, sample_idx=None):\n if sample_idx is None:\n std = self.std + self.eps # n\n mean = self.mean\n else:\n if len(self.mean.shape) == len(sample_idx[0].shape):\n std = self.std[sample_idx] + self.eps # batch*n\n mean = self.mean[sample_idx]\n if len(self.mean.shape) > len(sample_idx[0].shape):\n std = self.std[:,sample_idx]+ self.eps # T*batch*n\n mean = self.mean[:,sample_idx]\n\n # x is in shape of batch*n or T*batch*n\n x = (x * std) + mean\n return x\n\n def cuda(self):\n self.mean = self.mean.cuda()\n self.std = self.std.cuda()\n\n def cpu(self):\n self.mean = self.mean.cpu()\n self.std = self.std.cpu()\n\n# normalization, Gaussian\nclass GaussianNormalizer(object):\n def __init__(self, x, eps=0.00001):\n super(GaussianNormalizer, self).__init__()\n\n self.mean = torch.mean(x)\n self.std = torch.std(x)\n self.eps = eps\n\n def encode(self, x):\n x = (x - self.mean) / (self.std + self.eps)\n return x\n\n def decode(self, x, sample_idx=None):\n x = (x * (self.std + self.eps)) + self.mean\n return x\n\n def cuda(self):\n self.mean = self.mean.cuda()\n self.std = self.std.cuda()\n\n def cpu(self):\n self.mean = self.mean.cpu()\n self.std = self.std.cpu()\n\n\n# normalization, scaling by range\nclass RangeNormalizer(object):\n def __init__(self, x, low=0.0, high=1.0):\n super(RangeNormalizer, self).__init__()\n mymin = torch.min(x, 0)[0].view(-1)\n mymax = torch.max(x, 0)[0].view(-1)\n\n self.a = (high - low)/(mymax - mymin)\n self.b = -self.a*mymax + high\n\n def encode(self, x):\n s = x.size()\n x = x.view(s[0], -1)\n x = self.a*x + self.b\n x = x.view(s)\n return x\n\n def decode(self, x):\n s = x.size()\n x = x.view(s[0], -1)\n x = (x - self.b)/self.a\n x = x.view(s)\n return x\n \nclass LpLoss(object):\n def __init__(self, d=2, p=2, size_average=True, reduction=True):\n super(LpLoss, self).__init__()\n\n #Dimension and Lp-norm type are postive\n assert d > 0 and p > 0\n\n self.d = d\n self.p = p\n self.reduction = reduction\n self.size_average = size_average\n\n def abs(self, x, y):\n num_examples = x.size()[0]\n\n #Assume uniform mesh\n h = 1.0 / (x.size()[1] - 1.0)\n\n all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)\n\n if self.reduction:\n if self.size_average:\n return torch.mean(all_norms)\n else:\n return torch.sum(all_norms)\n\n return all_norms\n\n def rel(self, x, y):\n num_examples = x.size()[0]\n\n diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)\n y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)\n\n if self.reduction:\n if self.size_average:\n return torch.mean(diff_norms/y_norms)\n else:\n return torch.sum(diff_norms/y_norms)\n\n return diff_norms/y_norms\n\n def __call__(self, x, y):\n return self.rel(x, y)" ]
[ [ "torch.sum", "numpy.logical_or", "numpy.eye", "torch.min", "torch.max", "torch.nn.MSELoss", "numpy.zeros", "torch.std", "torch.no_grad", "numpy.abs", "numpy.arange", "scipy.special.eval_legendre", "numpy.polynomial.polynomial.Polynomial", "numpy.sqrt", "numpy.flip", "numpy.convolve", "numpy.array", "torch.mean" ] ]
meliascosta/dicom2nifti
[ "b3bb7c93bc8456f61e5372235627c1c91195b015" ]
[ "scripts/anonymize_testdata.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\ndicom2nifti\n\n@author: abrys\n\"\"\"\nfrom __future__ import print_function\n\nimport pydicom\nimport pydicom.uid\nimport pydicom.dataset\nimport logging\nimport numpy\nimport os\nimport datetime\nfrom six import string_types, iteritems\n\nimport dicom2nifti.compressed_dicom as compressed_dicom\nfrom dicom2nifti.common import read_dicom_directory, is_philips, is_siemens, is_ge\n\n\ndef anonymize_directory(input_directory, output_directory=None):\n if output_directory is None:\n output_directory = input_directory\n\n study_uid = pydicom.uid.generate_uid()\n series_uid = pydicom.uid.generate_uid()\n date = datetime.datetime.now().strftime(\"%Y%m%d\")\n time = datetime.datetime.now().strftime(\"%H%M%S.000000\")\n\n fields_to_keep = {'SpecificCharacterSet': None,\n 'ImageType': None,\n 'AcquisitionMatrix': None,\n 'SOPClassUID': None,\n 'SOPInstanceUID': None, # Will be replaced by file-unique UID\n 'StudyDate': date,\n 'SeriesDate': date,\n 'AcquisitionDate': date,\n 'ContentDate': date,\n 'StudyTime': time,\n 'AcquisitionTime': time,\n 'AcquisitionNumber': None,\n 'Modality': None,\n 'Manufacturer': None,\n 'ManufacturersModelName': None,\n 'PatientName': 'dicom2nifti',\n 'PatientID': 'dicom2nifti',\n 'PatientsBirthDate': date,\n 'PatientsSex': None,\n 'PatientsAge': '0Y',\n 'PatientPosition': None,\n 'ScanningSequence': None,\n 'SequenceVariant': None,\n 'MRAcquisitionType': None,\n 'SequenceName': 'dicom2nifti',\n 'RepetitionTime': None,\n 'EchoTime': None,\n 'InversionTime': None,\n 'DeviceSerialNumber': '1234',\n 'StudyInstanceUID': study_uid,\n 'SeriesInstanceUID': series_uid,\n 'StudyID': 'dicom2nifti',\n 'SeriesNumber': None,\n 'InstanceNumber': None,\n 'ImagePositionPatient': None,\n 'ImageOrientationPatient': None,\n 'SliceLocation': None,\n 'PhotometricInterpretation': None,\n 'Rows': None,\n 'Columns': None,\n 'PixelSpacing': None,\n 'BitsAllocated': None,\n 'BitsStored': None,\n 'HighBit': None,\n 'RescaleSlope': None,\n 'RescaleIntercept': None,\n 'PixelRepresentation': None,\n 'NumberOfFrames': None,\n 'SamplesPerPixel': None,\n 'SpacingBetweenSlices': None,\n # Pixel Data must be specified with hex code as it will not work for compressed dicoms\n (0x7fe0, 0x0010): None}\n\n if is_philips(read_dicom_directory(input_directory)):\n philips_fields = {\n (0x2001, 0x100a): None,\n (0x2001, 0x1003): None,\n (0x2001, 0x105f): None,\n (0x2005, 0x100d): None,\n (0x2005, 0x100e): None,\n (0x2005, 0x10b0): None,\n (0x2005, 0x10b1): None,\n (0x2005, 0x10b2): None,\n (0x0018, 0x9087): None,\n (0x0018, 0x9089): None,\n (0x5200, 0x9230): None,\n 'SharedFunctionalGroupsSequence': None}\n fields_to_keep.update(philips_fields)\n\n if is_siemens(read_dicom_directory(input_directory)):\n siemens_fields = {(0x0019, 0x100c): None,\n (0x0029, 0x1020): None,\n (0x0051, 0x100b): None,\n (0x0019, 0x100e): None}\n fields_to_keep.update(siemens_fields)\n\n if is_ge(read_dicom_directory(input_directory)):\n ge_fields = {(0x0020, 0x9056): None,\n (0x0020, 0x9057): None,\n (0x0043, 0x1039): None,\n (0x0019, 0x10bb): None,\n (0x0019, 0x10bc): None,\n (0x0019, 0x10bd): None}\n fields_to_keep.update(ge_fields)\n\n _anonymize_files(input_directory, output_directory, fields_to_keep)\n\n\ndef _anonymize_file(dicom_file_in, dicom_file_out, fields_to_keep):\n \"\"\"\n Anonimize a single dicomfile\n :param dicom_file_in: filepath for input file\n :param dicom_file_out: filepath for output file\n :param fields_to_keep: dicom tags to keep\n \"\"\"\n # Default meta_fields\n # Required fields according to reference\n\n meta_fields = ['MediaStorageSOPClassUID',\n 'MediaStorageSOPInstanceUID',\n 'ImplementationClassUID']\n\n # Load dicom_file_in\n dicom_in = compressed_dicom.read_file(dicom_file_in)\n\n # Create new dicom file\n # Set new file meta information\n file_meta = pydicom.dataset.Dataset()\n file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian\n for field_key in meta_fields:\n file_meta.add(dicom_in.file_meta.data_element(field_key))\n\n # Create the FileDataset instance (initially no data elements, but file_meta supplied)\n dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\\0' * 128)\n\n # Copy transfer syntax\n dicom_out.is_little_endian = dicom_in.is_little_endian\n dicom_out.is_implicit_VR = dicom_in.is_implicit_VR\n\n # Add the data elements\n for (field_key, field_value) in iteritems(fields_to_keep):\n logging.info(field_key)\n if field_key == (0x7fe0, 0x0010):\n\n # anonimize the dicom pixeldata\n random_data = numpy.random.randint(0, 255, dicom_in.pixel_array.shape).astype(dicom_in.pixel_array.dtype)\n dicom_out.PixelData = random_data.tostring() # = byte array (see pydicom docs)\n\n # dicom_out.PixelData = dicom_in.pixel_array.tostring() # = byte array (see pydicom docs)\n\n # noinspection PyPep8Naming\n dicom_out[0x7fe0, 0x0010].VR = 'OB'\n elif field_value is None:\n try:\n if isinstance(field_key, string_types):\n if field_key in dicom_in:\n dicom_out.add(dicom_in.data_element(field_key))\n else:\n if dicom_in.get(field_key) is not None:\n dicom_out.add(dicom_in[field_key])\n except KeyError:\n logging.info('Warning: %s not found' % field_key)\n else:\n setattr(dicom_out, field_key, field_value)\n\n # Save dicom_file_out\n # Make sure we have a directory\n if not os.path.exists(os.path.dirname(dicom_file_out)):\n logging.info('Decompressing files')\n\n # Save the file\n dicom_out.is_little_endian = True\n dicom_out.is_implicit_VR = False\n\n dicom_out.save_as(dicom_file_out, write_like_original=False)\n\n\ndef _anonymize_files(dicom_directory_in, dicom_directory_out, fields_to_keep):\n \"\"\"\n See anonymize_file for more information.\n\n series_UID and instance_UID will create a new UID respectively for the series for each directory or for the\n instance for each file. Note that for a multi-series dataset it is thus required that each series is in its own\n directory.\n\n \"\"\"\n\n # Make sure we have absolute paths\n dicom_directory_in = os.path.abspath(dicom_directory_in)\n dicom_directory_out = os.path.abspath(dicom_directory_out)\n\n # looping over all files\n for root, _, file_names in os.walk(dicom_directory_in):\n # New directory\n\n for file_name in file_names:\n # Create instance_UID\n fields_to_keep['SOPInstanceUID'] = pydicom.uid.generate_uid()\n\n dicom_file_in = os.path.join(root, file_name)\n current_dir = root[len(dicom_directory_in) + 1:]\n dicom_file_out = os.path.join(dicom_directory_out, current_dir, file_name)\n if compressed_dicom.is_dicom_file(dicom_file_in):\n logging.info(\"Processing \" + dicom_file_in)\n _anonymize_file(dicom_file_in, dicom_file_out, fields_to_keep)\n else:\n logging.info(\"Skipping \" + dicom_file_in + \", no dicom file\")\n\nif __name__ == '__main__':\n anonymize_directory('/***',\n '/***')\n" ]
[ [ "numpy.random.randint" ] ]
HenryOsborne/SemanticSegmentation
[ "d41549c3fd22731d7a12cdb1b438f730b0ebfcbc" ]
[ "models/CCNet/ccnet.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom models.CCNet.CC import CC_module as CrissCrossAttention\n\naffine_par = True\nBatchNorm2d = nn.BatchNorm2d\n\n\ndef outS(i):\n i = int(i)\n i = (i + 1) / 2\n i = int(np.ceil((i + 1) / 2.0))\n i = (i + 1) / 2\n return i\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=dilation * multi_grid, dilation=dilation * multi_grid, bias=False)\n self.bn2 = BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=False)\n self.relu_inplace = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.dilation = dilation\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n out = self.relu_inplace(out)\n\n return out\n\n\nclass RCCAModule(nn.Module):\n def __init__(self, in_channels, out_channels, num_classes):\n super(RCCAModule, self).__init__()\n inter_channels = in_channels // 4\n self.conva = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n BatchNorm2d(inter_channels), nn.ReLU(inplace=False))\n self.cca = CrissCrossAttention(inter_channels)\n self.convb = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n BatchNorm2d(inter_channels), nn.ReLU(inplace=False))\n\n self.bottleneck = nn.Sequential(\n nn.Conv2d(in_channels + inter_channels, out_channels, kernel_size=3, padding=1, dilation=1, bias=False),\n BatchNorm2d(out_channels), nn.ReLU(inplace=False),\n nn.Dropout2d(0.1),\n nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def forward(self, x, recurrence=2):\n output = self.conva(x)\n for i in range(recurrence):\n output = self.cca(output)\n output = self.convb(output)\n\n output = self.bottleneck(torch.cat([x, output], 1))\n return output\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes, recurrence):\n self.inplanes = 128\n super(ResNet, self).__init__()\n self.conv1 = conv3x3(3, 64, stride=2)\n self.bn1 = BatchNorm2d(64)\n self.relu1 = nn.ReLU(inplace=False)\n self.conv2 = conv3x3(64, 64)\n self.bn2 = BatchNorm2d(64)\n self.relu2 = nn.ReLU(inplace=False)\n self.conv3 = conv3x3(64, 128)\n self.bn3 = BatchNorm2d(128)\n self.relu3 = nn.ReLU(inplace=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.relu = nn.ReLU(inplace=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1, 1, 1))\n # self.layer5 = PSPModule(2048, 512)\n self.head = RCCAModule(2048, 512, num_classes)\n\n self.dsn = nn.Sequential(\n nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),\n BatchNorm2d(512), nn.ReLU(inplace=False),\n nn.Dropout2d(0.1),\n nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n self.conv4 = nn.Conv2d(num_classes * 2, num_classes, kernel_size=1, stride=1, bias=False)\n # self.criterion = criterion\n self.recurrence = recurrence\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(planes * block.expansion, affine=affine_par))\n\n layers = []\n generate_multi_grid = lambda index, grids: grids[index % len(grids)] if isinstance(grids, tuple) else 1\n layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample,\n multi_grid=generate_multi_grid(0, multi_grid)))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid)))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, labels=None):\n size = (x.shape[2], x.shape[3])\n x = self.relu1(self.bn1(self.conv1(x)))\n x = self.relu2(self.bn2(self.conv2(x)))\n x = self.relu3(self.bn3(self.conv3(x)))\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x_dsn = self.dsn(x)\n # print(x_dsn.shape)\n x = self.layer4(x)\n # print(x.shape)\n x = self.head(x, self.recurrence)\n # print(x.shape)\n outs = torch.cat([x, x_dsn], 1)\n # print(outs.shape)\n outs = self.conv4(outs)\n outs = nn.Upsample(size, mode='bilinear', align_corners=True)(outs)\n # print(outs)\n return outs\n\n\ndef resnet152(num_classes=2, pretrained_model=None, recurrence=2, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], num_classes, recurrence)\n return model\n\n\nif __name__ == \"__main__\":\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = resnet152()\n model = model.to(device)\n x = torch.rand((2, 3, 512, 512))\n x = x.to(device)\n print(x.shape)\n output = model(x)\n print(output.shape)\n" ]
[ [ "torch.nn.MaxPool2d", "numpy.ceil", "torch.nn.Dropout2d", "torch.rand", "torch.nn.Upsample", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.cat" ] ]
pollenjp/pytorch-lightning
[ "06f83492919c4c72a989f9bb8f271b92b479648b" ]
[ "tests/trainer/test_trainer.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport math\nimport os\nimport pickle\nimport sys\nfrom argparse import Namespace\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom unittest.mock import ANY, call, patch\n\nimport cloudpickle\nimport pytest\nimport torch\nfrom omegaconf import OmegaConf\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\n\nimport tests.helpers.utils as tutils\nfrom pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\nfrom pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter\nfrom pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler\nfrom pytorch_lightning.plugins import DDPSpawnPlugin\nfrom pytorch_lightning.trainer.states import TrainerFn\nfrom pytorch_lightning.utilities import DeviceType, DistributedType\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.seed import seed_everything\nfrom tests.base import EvalModelTemplate\nfrom tests.helpers import BoringModel, RandomDataset\nfrom tests.helpers.runif import RunIf\n\n\[email protected](\"url_ckpt\", [True, False])\ndef test_no_val_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv(\"TORCH_HOME\", str(tmpdir))\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n )\n # fit model\n trainer.fit(model)\n # training complete\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n # save model\n new_weights_path = os.path.join(tmpdir, \"save_test.ckpt\")\n trainer.save_checkpoint(new_weights_path)\n\n # assert ckpt has hparams\n ckpt = torch.load(new_weights_path)\n assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), \"hyper_parameters missing from checkpoints\"\n\n # load new model\n hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)\n hparams_path = os.path.join(hparams_path, \"hparams.yaml\")\n ckpt_path = (\n f\"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}\"\n if url_ckpt else new_weights_path\n )\n model_2 = EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path,\n hparams_file=hparams_path,\n )\n model_2.eval()\n\n\[email protected](\"url_ckpt\", [True, False])\ndef test_no_val_end_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv(\"TORCH_HOME\", tmpdir)\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n )\n trainer.fit(model)\n\n # training complete\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n # save model\n new_weights_path = os.path.join(tmpdir, \"save_test.ckpt\")\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)\n hparams_path = os.path.join(hparams_path, \"hparams.yaml\")\n ckpt_path = (\n f\"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}\"\n if url_ckpt else new_weights_path\n )\n model_2 = EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path,\n hparams_file=hparams_path,\n )\n model_2.eval()\n\n\[email protected](\"url_ckpt\", [True, False])\ndef test_strict_model_load(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv(\"TORCH_HOME\", tmpdir)\n\n model = EvalModelTemplate()\n # Extra layer\n model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n )\n trainer.fit(model)\n\n # training complete\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n # save model\n new_weights_path = os.path.join(tmpdir, \"save_test.ckpt\")\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)\n hparams_path = os.path.join(hparams_path, \"hparams.yaml\")\n ckpt_path = (\n f\"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}\"\n if url_ckpt else new_weights_path\n )\n\n try:\n EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path,\n hparams_file=hparams_path,\n )\n # todo: specify the possible exception\n except Exception:\n failed = True\n else:\n failed = False\n\n assert failed, \"Model should not been loaded since the extra layer added.\"\n\n failed = False\n try:\n EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path,\n hparams_file=hparams_path,\n strict=False,\n )\n # todo: specify the possible exception\n except Exception:\n failed = True\n\n assert not failed, \"Model should be loaded due to strict=False.\"\n\n\[email protected](\"accumulate_grad_batches\", (1, 2, 3))\ndef test_trainer_accumulate_grad_batches_zero_grad(tmpdir, accumulate_grad_batches):\n with patch(\"torch.optim.SGD.zero_grad\") as sgd_zero_grad:\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=20,\n limit_val_batches=1,\n max_epochs=1,\n weights_summary=None,\n accumulate_grad_batches=accumulate_grad_batches,\n )\n trainer.fit(model)\n\n assert sgd_zero_grad.call_count == math.ceil(trainer.limit_train_batches / accumulate_grad_batches)\n\n\[email protected](\n [\"accumulate_grad_batches\", \"limit_train_batches\"],\n [\n ({\n 1: 2,\n 3: 4\n }, 1.0),\n ({\n 1: 2,\n 3: 4\n }, 0.5), # not to be divisible by accumulate_grad_batches on purpose\n (3, 1.0),\n (3, 0.8), # not to be divisible by accumulate_grad_batches on purpose\n (4, 1.0),\n (4, 0.7), # not to be divisible by accumulate_grad_batches on purpose\n ],\n)\ndef test_gradient_accumulation_scheduling_last_batch(tmpdir, accumulate_grad_batches, limit_train_batches):\n \"\"\" Verify optimizer.step() applied to last batch while grad accumulation \"\"\"\n\n class TestModel(BoringModel):\n\n def state_dict(self, *args, **kwargs):\n return deepcopy(super().state_dict(*args, **kwargs))\n\n def check(self, d1, d2, equal=True):\n keys = d1.keys() | d2.keys()\n values = [torch.equal(d1[k], d2[k]) for k in keys]\n return all(values) if equal else not any(values)\n\n def backward(self, *args, **kwargs) -> None:\n pre_bwd_state_dict = self.state_dict()\n assert self.check(self.start_state_dict, pre_bwd_state_dict)\n\n out = super().backward(*args, **kwargs)\n\n # state dict is equal, just the gradients changed\n assert self.check(pre_bwd_state_dict, self.state_dict())\n\n return out\n\n def optimizer_step(self, *args, **kwargs):\n pre_opt_step_state_dict = self.state_dict()\n assert self.check(self.start_state_dict, pre_opt_step_state_dict)\n\n # this calls `backward` and `on_after_backward` inside the closure\n out = super().optimizer_step(*args, **kwargs)\n\n # the state dict changed\n assert self.check(pre_opt_step_state_dict, self.state_dict(), equal=False)\n\n self.opt_step_called = True\n return out\n\n def on_train_batch_start(self, *_):\n self.start_state_dict = self.state_dict()\n self.opt_step_called = False\n\n def on_train_batch_end(self, outputs, batch, batch_idx, *_):\n end_state_dict = self.state_dict()\n is_last_batch = (batch_idx + 1) == self.trainer.num_training_batches\n\n if is_last_batch or self.opt_step_called:\n assert self.check(self.start_state_dict, end_state_dict, equal=False)\n else:\n assert self.check(self.start_state_dict, end_state_dict)\n\n model = TestModel()\n trainer = Trainer(\n accumulate_grad_batches=accumulate_grad_batches,\n max_epochs=2,\n limit_train_batches=limit_train_batches,\n limit_val_batches=0,\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n )\n\n trainer.fit(model)\n\n\ndef test_loading_meta_tags(tmpdir):\n \"\"\" test for backward compatibility to meta_tags.csv \"\"\"\n tutils.reset_seed()\n\n hparams = EvalModelTemplate.get_default_hparams()\n\n # save tags\n logger = tutils.get_default_logger(tmpdir)\n logger.log_hyperparams(Namespace(some_str=\"a_str\", an_int=1, a_float=2.0))\n logger.log_hyperparams(hparams)\n logger.save()\n\n # load hparams\n path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)\n hparams_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_HPARAMS_FILE)\n hparams = load_hparams_from_yaml(hparams_path)\n\n # save as legacy meta_tags.csv\n tags_path = os.path.join(path_expt_dir, \"meta_tags.csv\")\n save_hparams_to_tags_csv(tags_path, hparams)\n\n tags = load_hparams_from_tags_csv(tags_path)\n\n assert hparams == tags\n\n\ndef test_loading_yaml(tmpdir):\n tutils.reset_seed()\n\n hparams = EvalModelTemplate.get_default_hparams()\n\n # save tags\n logger = tutils.get_default_logger(tmpdir)\n logger.log_hyperparams(Namespace(some_str=\"a_str\", an_int=1, a_float=2.0))\n logger.log_hyperparams(hparams)\n logger.save()\n\n # load hparams\n path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)\n hparams_path = os.path.join(path_expt_dir, \"hparams.yaml\")\n tags = load_hparams_from_yaml(hparams_path)\n\n assert tags[\"batch_size\"] == 32 and tags[\"hidden_dim\"] == 1000\n\n\[email protected](\n \"save_top_k,save_last,expected_files\",\n [\n pytest.param(-1, False, [f\"epoch={i}.ckpt\" for i in range(5)], id=\"CASE K=-1 (all)\"),\n pytest.param(1, False, {\"epoch=4.ckpt\"}, id=\"CASE K=1 (2.5, epoch 4)\"),\n pytest.param(2, False, [f\"epoch={i}.ckpt\" for i in (2, 4)], id=\"CASE K=2 (2.5 epoch 4, 2.8 epoch 2)\"),\n pytest.param(4, False, [f\"epoch={i}.ckpt\" for i in range(1, 5)], id=\"CASE K=4 (save all 4 base)\"),\n pytest.param(3, False, [f\"epoch={i}.ckpt\" for i in range(2, 5)], id=\"CASE K=3 (save the 2nd, 3rd, 4th model)\"),\n pytest.param(1, True, {\"epoch=4.ckpt\", \"last.ckpt\"}, id=\"CASE K=1 (save the 4th model and the last model)\"),\n ],\n)\ndef test_model_checkpoint_options(tmpdir, save_top_k, save_last, expected_files):\n \"\"\"Test ModelCheckpoint options.\"\"\"\n\n def mock_save_function(filepath, *args):\n open(filepath, \"a\").close()\n\n # simulated losses\n losses = [10, 9, 2.8, 5, 2.5]\n\n checkpoint_callback = ModelCheckpoint(\n dirpath=tmpdir,\n filename='{epoch}',\n monitor='checkpoint_on',\n save_top_k=save_top_k,\n save_last=save_last,\n verbose=True\n )\n trainer = Trainer()\n trainer.state.fn = TrainerFn.FITTING\n trainer.save_checkpoint = mock_save_function\n\n # emulate callback's calls during the training\n for i, loss in enumerate(losses):\n trainer.fit_loop.current_epoch = i\n trainer.fit_loop.global_step = i\n trainer.logger_connector.callback_metrics.update({\"checkpoint_on\": loss})\n checkpoint_callback.on_validation_end(trainer, trainer.lightning_module)\n\n file_lists = set(os.listdir(tmpdir))\n\n assert len(file_lists) == len(\n expected_files\n ), f\"Should save {len(expected_files)} models when save_top_k={save_top_k} but found={file_lists}\"\n\n # verify correct naming\n for fname in expected_files:\n assert fname in file_lists\n\n\ndef test_model_checkpoint_only_weights(tmpdir):\n \"\"\"Tests use case where ModelCheckpoint is configured to save only model weights, and\n user tries to load checkpoint to resume training.\n \"\"\"\n model = EvalModelTemplate()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_weights_only=True)],\n )\n # fit model\n trainer.fit(model)\n # training complete\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]\n\n # assert saved checkpoint has no trainer data\n checkpoint = torch.load(checkpoint_path)\n assert \"optimizer_states\" not in checkpoint, \"checkpoint should contain only model weights\"\n assert \"lr_schedulers\" not in checkpoint, \"checkpoint should contain only model weights\"\n\n # assert loading model works when checkpoint has only weights\n assert EvalModelTemplate.load_from_checkpoint(checkpoint_path=checkpoint_path)\n\n # directly save model\n new_weights_path = os.path.join(tmpdir, \"save_test.ckpt\")\n trainer.save_checkpoint(new_weights_path, weights_only=True)\n # assert saved checkpoint has no trainer data\n checkpoint = torch.load(new_weights_path)\n assert \"optimizer_states\" not in checkpoint, \"checkpoint should contain only model weights\"\n assert \"lr_schedulers\" not in checkpoint, \"checkpoint should contain only model weights\"\n\n # assert restoring train state fails\n with pytest.raises(KeyError, match=\"checkpoint contains only the model\"):\n trainer.checkpoint_connector.restore(new_weights_path)\n\n\ndef test_model_freeze_unfreeze():\n model = EvalModelTemplate()\n model.freeze()\n model.unfreeze()\n\n\[email protected](\"url_ckpt\", [True, False])\ndef test_resume_from_checkpoint_epoch_restored(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Verify resuming from checkpoint runs the right number of epochs\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv(\"TORCH_HOME\", tmpdir)\n\n class TestModel(BoringModel):\n # Model that tracks epochs and batches seen\n num_epochs_end_seen = 0\n num_batches_seen = 0\n num_on_load_checkpoint_called = 0\n\n def on_epoch_end(self):\n self.num_epochs_end_seen += 1\n\n def on_train_batch_start(self, *_):\n self.num_batches_seen += 1\n\n def on_load_checkpoint(self, _):\n self.num_on_load_checkpoint_called += 1\n\n model = TestModel()\n trainer = Trainer(\n max_epochs=2,\n limit_train_batches=0.65,\n limit_val_batches=1,\n callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_top_k=-1)],\n default_root_dir=tmpdir,\n val_check_interval=1.0,\n progress_bar_refresh_rate=0,\n logger=False,\n weights_summary=None,\n )\n trainer.fit(model)\n\n # `on_epoch_end` will be called once for val_sanity, twice for train, twice for val\n assert model.num_epochs_end_seen == 1 + 2 + 2\n assert model.num_batches_seen == trainer.num_training_batches * 2\n assert model.num_on_load_checkpoint_called == 0\n\n # Other checkpoints can be uncommented if/when resuming mid-epoch is supported\n checkpoints = Path(trainer.checkpoint_callback.dirpath).glob(\"*.ckpt\")\n if url_ckpt:\n # transform local paths into url checkpoints\n ip, port = tmpdir_server\n checkpoints = [f\"http://{ip}:{port}/\" + ckpt.name for ckpt in checkpoints]\n\n for ckpt in checkpoints:\n next_model = TestModel()\n state = pl_load(ckpt)\n\n # Resume training\n new_trainer = Trainer(\n default_root_dir=tmpdir,\n resume_from_checkpoint=ckpt,\n max_epochs=2,\n )\n new_trainer.fit(next_model)\n assert state[\"global_step\"] + next_model.num_batches_seen == trainer.num_training_batches * trainer.max_epochs\n assert next_model.num_on_load_checkpoint_called == 1\n\n\ndef test_trainer_max_steps_and_epochs(tmpdir):\n \"\"\"Verify model trains according to specified max steps\"\"\"\n model = BoringModel()\n num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)\n\n # define less train steps than epochs\n trainer_kwargs = {\n 'limit_train_batches': 0.5,\n 'default_root_dir': tmpdir,\n 'max_epochs': 3,\n 'max_steps': num_train_samples + 10,\n 'logger': False,\n 'weights_summary': None,\n 'progress_bar_refresh_rate': 0,\n }\n trainer = Trainer(**trainer_kwargs)\n trainer.fit(model)\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.global_step == trainer.max_steps, \"Model did not stop at max_steps\"\n\n # define less train epochs than steps\n trainer_kwargs['max_epochs'] = 2\n trainer_kwargs['max_steps'] = 3 * 2 * num_train_samples\n trainer = Trainer(**trainer_kwargs)\n trainer.fit(model)\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.global_step == num_train_samples * trainer.max_epochs\n assert trainer.current_epoch == trainer.max_epochs - 1, \"Model did not stop at max_epochs\"\n\n\ndef test_trainer_min_steps_and_epochs(tmpdir):\n \"\"\"Verify model trains according to specified min steps\"\"\"\n model = EvalModelTemplate()\n num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)\n\n trainer_kwargs = {\n 'limit_train_batches': 0.5,\n 'default_root_dir': tmpdir,\n # define callback for stopping the model\n 'callbacks': [EarlyStopping(monitor=\"early_stop_on\", min_delta=1.0)],\n 'val_check_interval': 2,\n 'min_epochs': 1,\n 'max_epochs': 7,\n # define less min steps than 1 epoch\n 'min_steps': num_train_samples // 2,\n 'logger': False,\n 'weights_summary': None,\n 'progress_bar_refresh_rate': 0,\n }\n trainer = Trainer(**trainer_kwargs)\n trainer.fit(model)\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.current_epoch > 0\n assert trainer.global_step >= num_train_samples, \"Model did not train for at least min_epochs\"\n\n # define less epochs than min_steps\n trainer_kwargs[\"min_steps\"] = math.floor(num_train_samples * 1.5)\n trainer = Trainer(**trainer_kwargs)\n trainer.fit(model)\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.current_epoch > 0\n assert trainer.global_step >= math.floor(num_train_samples * 1.5), \"Model did not train for at least min_steps\"\n\n\ndef test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog):\n \"\"\" Test that min_epochs/min_steps in Trainer are enforced even if EarlyStopping is triggered. \"\"\"\n\n class TestModel(BoringModel):\n training_step_invoked = 0\n\n def training_step(self, batch, batch_idx):\n output = super().training_step(batch, batch_idx)\n output[\"loss\"] = output[\"loss\"] * 0.0 # force minimal loss to trigger early stopping\n self.log(\"loss\", output[\"loss\"])\n self.training_step_invoked += 1\n assert not self.trainer.should_stop\n return output\n\n model = TestModel()\n early_stop = EarlyStopping(monitor=\"loss\", patience=0, check_on_train_epoch_end=True)\n min_epochs = 5\n trainer = Trainer(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n min_epochs=min_epochs,\n limit_val_batches=0,\n limit_train_batches=2,\n callbacks=[early_stop]\n )\n with caplog.at_level(logging.INFO, logger=\"pytorch_lightning.trainer.trainer\"):\n trainer.fit(model)\n\n message = f\"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue\"\n num_messages = len([record.message for record in caplog.records if message in record.message])\n assert num_messages == min_epochs - 2\n assert model.training_step_invoked == min_epochs * 2\n\n\ndef test_trainer_max_steps_accumulate_batches(tmpdir):\n \"\"\"Verify model trains according to specified max steps with grad accumulated batches\"\"\"\n model = BoringModel()\n num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)\n\n # define less train steps than epochs\n trainer = Trainer(\n limit_train_batches=0.5,\n default_root_dir=tmpdir,\n max_steps=num_train_samples + 10,\n accumulate_grad_batches=10,\n logger=False,\n weights_summary=None,\n progress_bar_refresh_rate=0,\n )\n trainer.fit(model)\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.global_step == trainer.max_steps, \"Model did not stop at max_steps\"\n\n\ndef test_benchmark_option(tmpdir):\n \"\"\"Verify benchmark option.\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple\n\n # verify torch.backends.cudnn.benchmark is not turned on\n assert not torch.backends.cudnn.benchmark\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n benchmark=True,\n )\n trainer.fit(model)\n\n # verify training completed\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n # verify torch.backends.cudnn.benchmark is not turned off\n assert torch.backends.cudnn.benchmark\n\n\[email protected](\"ckpt_path\", (None, \"best\", \"specific\"))\[email protected](\"save_top_k\", (-1, 0, 1, 2))\[email protected](\"fn\", (\"validate\", \"test\", \"predict\"))\ndef test_tested_checkpoint_path(tmpdir, ckpt_path, save_top_k, fn):\n\n class TestModel(BoringModel):\n\n def validation_step(self, batch, batch_idx):\n self.log(\"foo\", -batch_idx)\n return super().validation_step(batch, batch_idx)\n\n def test_step(self, *args):\n return self.validation_step(*args)\n\n def predict_step(self, batch, *_):\n return self(batch)\n\n model = TestModel()\n model.test_epoch_end = None\n trainer = Trainer(\n max_epochs=2,\n limit_val_batches=1,\n limit_test_batches=1,\n limit_predict_batches=1,\n progress_bar_refresh_rate=0,\n default_root_dir=tmpdir,\n callbacks=[ModelCheckpoint(monitor=\"foo\", save_top_k=save_top_k)],\n )\n trainer.fit(model)\n\n trainer_fn = getattr(trainer, fn)\n path_attr = f\"{fn}{'d' if fn == 'validate' else 'ed'}_ckpt_path\"\n assert getattr(trainer, path_attr) is None\n\n if ckpt_path == \"best\":\n # ckpt_path is 'best', meaning we load the best weights\n if save_top_k == 0:\n with pytest.raises(MisconfigurationException, match=\".*is not configured to save the best.*\"):\n trainer_fn(ckpt_path=ckpt_path)\n else:\n trainer_fn(ckpt_path=ckpt_path)\n assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path\n elif ckpt_path is None:\n # ckpt_path is None, meaning we don't load any checkpoints and\n # use the weights from the end of training\n trainer_fn(ckpt_path=ckpt_path)\n assert getattr(trainer, path_attr) is None\n else:\n # specific checkpoint, pick one from saved ones\n if save_top_k == 0:\n with pytest.raises(FileNotFoundError):\n trainer_fn(ckpt_path=\"random.ckpt\")\n else:\n ckpt_path = str(\n list((Path(tmpdir) / f\"lightning_logs/version_{trainer.logger.version}/checkpoints\").iterdir()\n )[0].absolute()\n )\n trainer_fn(ckpt_path=ckpt_path)\n assert getattr(trainer, path_attr) == ckpt_path\n\n\ndef test_disabled_training(tmpdir):\n \"\"\"Verify that `limit_train_batches=0` disables the training loop unless `fast_dev_run=True`.\"\"\"\n\n class CurrentModel(BoringModel):\n\n training_step_invoked = False\n training_epoch_end_invoked = False\n\n def training_step(self, *args, **kwargs):\n self.training_step_invoked = True\n return super().training_step(*args, **kwargs)\n\n def training_epoch_end(self, *args, **kwargs):\n self.training_epoch_end_invoked = True\n return super().training_epoch_end(*args, **kwargs)\n\n model = CurrentModel()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.0,\n limit_val_batches=0.2,\n fast_dev_run=False,\n )\n\n before_state_dict = deepcopy(model.state_dict())\n\n trainer = Trainer(**trainer_options)\n trainer.fit(model)\n\n after_state_dict = model.state_dict()\n\n for key in before_state_dict.keys():\n assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))\n\n # check that limit_train_batches=0 turns off training\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.current_epoch == 0\n assert not model.training_step_invoked, \"`training_step` should not run when `limit_train_batches=0`\"\n assert not model.training_epoch_end_invoked, \"`training_epoch_end` should not run when `limit_train_batches=0`\"\n\n # check that limit_train_batches has no influence when fast_dev_run is turned on\n model = CurrentModel()\n trainer_options.update(fast_dev_run=True)\n before_state_dict = deepcopy(model.state_dict())\n\n trainer = Trainer(**trainer_options)\n trainer.fit(model)\n\n after_state_dict = model.state_dict()\n\n for key in before_state_dict.keys():\n assert not torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.current_epoch == 0\n assert model.training_step_invoked, \"did not run `training_step` with `fast_dev_run=True`\"\n assert model.training_epoch_end_invoked, \"did not run `training_epoch_end` with `fast_dev_run=True`\"\n\n\ndef test_disabled_validation(tmpdir):\n \"\"\"Verify that `limit_val_batches=0` disables the validation loop unless `fast_dev_run=True`.\"\"\"\n\n class CurrentModel(EvalModelTemplate):\n\n validation_step_invoked = False\n validation_epoch_end_invoked = False\n\n def validation_step(self, *args, **kwargs):\n self.validation_step_invoked = True\n return super().validation_step(*args, **kwargs)\n\n def validation_epoch_end(self, *args, **kwargs):\n self.validation_epoch_end_invoked = True\n return super().validation_epoch_end(*args, **kwargs)\n\n hparams = EvalModelTemplate.get_default_hparams()\n model = CurrentModel(**hparams)\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.4,\n limit_val_batches=0.0,\n fast_dev_run=False,\n )\n\n trainer = Trainer(**trainer_options)\n trainer.fit(model)\n\n # check that limit_val_batches=0 turns off validation\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.current_epoch == 1\n assert not model.validation_step_invoked, \"`validation_step` should not run when `limit_val_batches=0`\"\n assert not model.validation_epoch_end_invoked, \"`validation_epoch_end` should not run when `limit_val_batches=0`\"\n\n # check that limit_val_batches has no influence when fast_dev_run is turned on\n model = CurrentModel(**hparams)\n trainer_options.update(fast_dev_run=True)\n trainer = Trainer(**trainer_options)\n trainer.fit(model)\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.current_epoch == 0\n assert model.validation_step_invoked, \"did not run `validation_step` with `fast_dev_run=True`\"\n assert model.validation_epoch_end_invoked, \"did not run `validation_epoch_end` with `fast_dev_run=True`\"\n\n\ndef test_nan_loss_detection(tmpdir):\n\n class CurrentModel(BoringModel):\n test_batch_inf = 3\n\n def training_step(self, batch, batch_idx):\n output = super().training_step(batch, batch_idx)\n if batch_idx == self.test_batch_inf:\n if isinstance(output, dict):\n output[\"loss\"] *= torch.tensor(math.inf) # make loss infinite\n else:\n output /= 0\n return output\n\n model = CurrentModel()\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=(model.test_batch_inf + 1),\n terminate_on_nan=True,\n )\n\n with pytest.raises(ValueError, match=r\".*The loss returned in `training_step` is.*\"):\n trainer.fit(model)\n assert trainer.global_step == model.test_batch_inf\n\n for param in model.parameters():\n assert torch.isfinite(param).all()\n\n\ndef test_nan_params_detection(tmpdir):\n\n class CurrentModel(BoringModel):\n test_batch_nan = 3\n\n def on_after_backward(self):\n if self.global_step == self.test_batch_nan:\n # simulate parameter that became nan\n torch.nn.init.constant_(self.layer.bias, math.nan)\n\n model = CurrentModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=(model.test_batch_nan + 1),\n terminate_on_nan=True,\n )\n\n with pytest.raises(ValueError, match=r\".*Detected nan and/or inf values in `layer.bias`.*\"):\n trainer.fit(model)\n assert trainer.global_step == model.test_batch_nan\n\n # after aborting the training loop, model still has nan-valued params\n params = torch.cat([param.view(-1) for param in model.parameters()])\n assert not torch.isfinite(params).all()\n\n\ndef test_trainer_interrupted_flag(tmpdir):\n \"\"\"Test the flag denoting that a user interrupted training.\"\"\"\n\n model = EvalModelTemplate()\n\n class InterruptCallback(Callback):\n\n def __init__(self):\n super().__init__()\n\n def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):\n raise KeyboardInterrupt\n\n class HandleInterruptCallback(Callback):\n\n def __init__(self):\n super().__init__()\n self.exc_info = None\n\n def on_keyboard_interrupt(self, trainer, pl_module):\n self.exc_info = sys.exc_info()\n\n interrupt_callback = InterruptCallback()\n handle_interrupt_callback = HandleInterruptCallback()\n\n trainer = Trainer(\n callbacks=[interrupt_callback, handle_interrupt_callback],\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n progress_bar_refresh_rate=0,\n logger=False,\n default_root_dir=tmpdir,\n )\n assert not trainer.interrupted\n assert handle_interrupt_callback.exc_info is None\n trainer.fit(model)\n assert trainer.interrupted\n assert isinstance(handle_interrupt_callback.exc_info[1], KeyboardInterrupt)\n\n\ndef test_gradient_clipping(tmpdir):\n \"\"\"\n Test gradient clipping\n \"\"\"\n tutils.reset_seed()\n\n model = EvalModelTemplate()\n\n trainer = Trainer(\n max_steps=1,\n max_epochs=1,\n gradient_clip_val=1.0,\n default_root_dir=tmpdir,\n )\n\n old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward\n\n def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n # test that gradient is clipped correctly\n ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)\n parameters = model.parameters()\n grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)\n assert (grad_norm - 1.0).abs() < 0.01, \"Gradient norm != 1.0: {grad_norm}\".format(grad_norm=grad_norm)\n\n return ret_val\n\n trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward\n # for the test\n model.prev_called_batch_idx = 0\n\n trainer.fit(model)\n\n\ndef test_gradient_clipping_by_value(tmpdir):\n \"\"\"\n Test gradient clipping by value\n \"\"\"\n tutils.reset_seed()\n\n model = BoringModel()\n\n grad_clip_val = 1e-10\n trainer = Trainer(\n max_steps=1,\n max_epochs=1,\n gradient_clip_val=grad_clip_val,\n gradient_clip_algorithm='value',\n default_root_dir=tmpdir\n )\n\n old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward\n\n def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n # test that gradient is clipped correctly\n ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)\n parameters = model.parameters()\n grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]\n grad_max = torch.max(torch.stack(grad_max_list))\n assert abs(grad_max.item() - grad_clip_val) < 1e-11, \\\n f\"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} .\"\n\n return ret_val\n\n trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward\n # for the test\n model.prev_called_batch_idx = 0\n\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, amp_native=True)\ndef test_gradient_clipping_fp16(tmpdir):\n \"\"\"\n Test gradient clipping with fp16\n \"\"\"\n tutils.reset_seed()\n\n model = EvalModelTemplate()\n\n trainer = Trainer(\n max_steps=1,\n max_epochs=1,\n precision=16,\n gpus=1,\n gradient_clip_val=1.0,\n default_root_dir=tmpdir,\n )\n\n old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward\n\n def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n # test that gradient is clipped correctly\n ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)\n parameters = model.parameters()\n grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)\n assert (grad_norm - 1.0).abs() < 0.01, \"Gradient norm != 1.0: {grad_norm}\".format(grad_norm=grad_norm)\n\n return ret_val\n\n trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward\n model.prev_called_batch_idx = 0\n\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, amp_native=True)\ndef test_gradient_clipping_by_value_fp16(tmpdir):\n \"\"\"\n Test gradient clipping by value with fp16\n \"\"\"\n tutils.reset_seed()\n\n model = BoringModel()\n grad_clip_val = 1e-10\n trainer = Trainer(\n max_steps=1,\n max_epochs=1,\n precision=16,\n gpus=1,\n gradient_clip_val=grad_clip_val,\n gradient_clip_algorithm='value',\n default_root_dir=tmpdir,\n )\n\n old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward\n\n def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n # test that gradient is clipped correctly\n ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)\n parameters = model.parameters()\n grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]\n grad_max = torch.max(torch.stack(grad_max_list))\n assert abs(grad_max.item() - grad_clip_val) < 1e-11, \\\n f\"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} .\"\n\n return ret_val\n\n trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward\n model.prev_called_batch_idx = 0\n\n trainer.fit(model)\n\n\ndef test_gpu_choice(tmpdir):\n trainer_options = dict(default_root_dir=tmpdir)\n # Only run if CUDA is available\n if not torch.cuda.is_available():\n return\n\n num_gpus = torch.cuda.device_count()\n Trainer(**trainer_options, gpus=num_gpus, auto_select_gpus=True)\n\n with pytest.raises(RuntimeError, match=r\".*No GPUs available.*\"):\n Trainer(**trainer_options, gpus=num_gpus + 1, auto_select_gpus=True)\n\n\[email protected](\n \"limit_val_batches\",\n [0.0, 1, 1.0, 0.5, 5],\n)\ndef test_num_sanity_val_steps(tmpdir, limit_val_batches):\n \"\"\"\n Test that the number of sanity check batches is clipped to `limit_val_batches`.\n \"\"\"\n model = EvalModelTemplate()\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n num_sanity_val_steps = 4\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n num_sanity_val_steps=num_sanity_val_steps,\n limit_val_batches=limit_val_batches,\n max_steps=1,\n )\n assert trainer.num_sanity_val_steps == num_sanity_val_steps\n\n with patch.object(\n trainer.fit_loop.validation_loop.epoch_loop,\n \"evaluation_step\",\n wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step\n ) as mocked:\n val_dataloaders = model.val_dataloader__multiple_mixed_length()\n trainer.fit(model, val_dataloaders=val_dataloaders)\n\n assert mocked.call_count == sum(\n min(num_sanity_val_steps, num_batches) for num_batches in trainer.num_val_batches\n )\n\n\[email protected](\"limit_val_batches\", [0.0, 1, 1.0, 0.3])\ndef test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches):\n \"\"\"\n Test that `num_sanity_val_steps=-1` runs through all validation data once, and as many batches as\n limited by `limit_val_batches` Trainer argument.\n \"\"\"\n model = EvalModelTemplate()\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n trainer = Trainer(\n default_root_dir=tmpdir,\n num_sanity_val_steps=-1,\n limit_val_batches=limit_val_batches,\n max_steps=1,\n )\n assert trainer.num_sanity_val_steps == float(\"inf\")\n\n with patch.object(\n trainer.fit_loop.validation_loop.epoch_loop,\n \"evaluation_step\",\n wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step\n ) as mocked:\n val_dataloaders = model.val_dataloader__multiple()\n trainer.fit(model, val_dataloaders=val_dataloaders)\n\n assert mocked.call_count == sum(trainer.num_val_batches)\n\n\[email protected](\n \"trainer_kwargs,expected\",\n [\n (\n dict(accelerator=None, gpus=None),\n dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),\n ),\n (\n dict(accelerator=\"dp\", gpus=None),\n dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),\n ),\n (\n dict(accelerator=\"ddp\", gpus=None),\n dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),\n ),\n (\n dict(accelerator=\"ddp\", num_processes=2, gpus=None),\n dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),\n ),\n (\n dict(accelerator=\"ddp\", num_nodes=2, gpus=None),\n dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),\n ),\n (\n dict(accelerator=\"ddp_cpu\", num_processes=2, gpus=None),\n dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),\n ),\n (\n dict(accelerator=\"ddp2\", gpus=None),\n dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),\n ),\n (\n dict(accelerator=None, gpus=1),\n dict(_distrib_type=None, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),\n ),\n (\n dict(accelerator=\"dp\", gpus=1),\n dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),\n ),\n (\n dict(accelerator=\"ddp\", gpus=1),\n dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),\n ),\n (\n dict(accelerator=\"ddp_cpu\", num_processes=2, gpus=1),\n dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),\n ),\n (\n dict(accelerator=\"ddp2\", gpus=1),\n dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),\n ),\n (\n dict(accelerator=None, gpus=2),\n dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),\n ),\n (\n dict(accelerator=\"dp\", gpus=2),\n dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),\n ),\n (\n dict(accelerator=\"ddp\", gpus=2),\n dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),\n ),\n (\n dict(accelerator=\"ddp2\", gpus=2),\n dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),\n ),\n ],\n)\ndef test_trainer_config(trainer_kwargs, expected, monkeypatch):\n if trainer_kwargs[\"gpus\"] is not None:\n monkeypatch.setattr(torch.cuda, \"is_available\", lambda: True)\n monkeypatch.setattr(torch.cuda, \"device_count\", lambda: trainer_kwargs[\"gpus\"])\n trainer = Trainer(**trainer_kwargs)\n assert len(expected) == 4\n for k, v in expected.items():\n assert getattr(trainer, k) == v, f\"Failed {k}: {v}\"\n\n\ndef test_trainer_subclassing():\n model = EvalModelTemplate()\n\n # First way of pulling out args from signature is to list them\n class TrainerSubclass(Trainer):\n\n def __init__(self, custom_arg, *args, custom_kwarg=\"test\", **kwargs):\n super().__init__(*args, **kwargs)\n self.custom_arg = custom_arg\n self.custom_kwarg = custom_kwarg\n\n trainer = TrainerSubclass(123, custom_kwarg=\"custom\", fast_dev_run=True)\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.custom_arg == 123\n assert trainer.custom_kwarg == \"custom\"\n assert trainer.fast_dev_run\n\n # Second way is to pop from the dict\n # It's a special case because Trainer does not have any positional args\n class TrainerSubclass(Trainer):\n\n def __init__(self, **kwargs):\n self.custom_arg = kwargs.pop(\"custom_arg\", 0)\n self.custom_kwarg = kwargs.pop(\"custom_kwarg\", \"test\")\n super().__init__(**kwargs)\n\n trainer = TrainerSubclass(custom_kwarg=\"custom\", fast_dev_run=True)\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.custom_kwarg == \"custom\"\n assert trainer.fast_dev_run\n\n # when we pass in an unknown arg, the base class should complain\n with pytest.raises(TypeError, match=r\"__init__\\(\\) got an unexpected keyword argument 'abcdefg'\"):\n TrainerSubclass(abcdefg=\"unknown_arg\")\n\n\[email protected](\n \"trainer_params\", [\n OmegaConf.create(dict(max_epochs=1, gpus=1)),\n OmegaConf.create(dict(max_epochs=1, gpus=[0])),\n ]\n)\n@RunIf(min_gpus=1)\ndef test_trainer_omegaconf(trainer_params):\n Trainer(**trainer_params)\n\n\ndef test_trainer_pickle(tmpdir):\n trainer = Trainer(\n max_epochs=1,\n default_root_dir=tmpdir,\n )\n pickle.dumps(trainer)\n cloudpickle.dumps(trainer)\n\n\[email protected](\"stage\", (\"fit\", \"validate\", \"test\"))\ndef test_trainer_setup_call(tmpdir, stage):\n \"\"\"Test setup call gets the correct stage\"\"\"\n\n class CurrentModel(BoringModel):\n\n def setup(self, stage):\n self.stage = stage\n\n class TrainerSubclass(Trainer):\n\n def setup(self, model, stage):\n assert model is not None\n self.stage = stage\n\n model = CurrentModel()\n\n # fit model\n trainer = TrainerSubclass(default_root_dir=tmpdir, max_epochs=1, checkpoint_callback=False)\n\n if stage == \"fit\":\n trainer.fit(model)\n elif stage == \"validate\":\n trainer.validate(model, ckpt_path=None)\n else:\n trainer.test(model, ckpt_path=None)\n\n assert trainer.stage == stage\n assert trainer.lightning_module.stage == stage\n\n\[email protected](\n \"train_batches, max_steps, log_interval\",\n [\n (10, 10, 1),\n (3, 10, 1),\n (3, 10, 5),\n ],\n)\n@patch(\"pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics\")\ndef test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval):\n\n class TestModel(BoringModel):\n\n def training_step(self, *args, **kwargs):\n self.log(\"foo\", -1)\n return super().training_step(*args, **kwargs)\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n log_every_n_steps=log_interval,\n flush_logs_every_n_steps=log_interval,\n limit_train_batches=train_batches,\n limit_val_batches=0,\n max_steps=max_steps,\n )\n trainer.fit(model)\n expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)]\n log_metrics_mock.assert_has_calls(expected_calls)\n\n\nclass TestLightningDataModule(LightningDataModule):\n\n def __init__(self, dataloaders):\n super().__init__()\n self._dataloaders = dataloaders\n\n def test_dataloader(self):\n return self._dataloaders\n\n def predict_dataloader(self):\n return self._dataloaders\n\n\nclass CustomPredictionWriter(BasePredictionWriter):\n\n write_on_batch_end_called = False\n write_on_epoch_end_called = False\n\n def __init__(self, output_dir: str, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.output_dir = output_dir\n\n def write_on_batch_end(self, trainer, pl_module, prediction, batch_indices, *args, **kwargs):\n assert prediction.shape == torch.Size([1, 2])\n if trainer.accelerator_connector.is_distributed:\n assert len(batch_indices) == 1\n else:\n assert batch_indices is None\n self.write_on_batch_end_called = True\n\n def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):\n expected = 1 if trainer.accelerator_connector.is_distributed else 2\n assert len(predictions) == 2\n assert len(predictions[0]) == expected\n if trainer.accelerator_connector.is_distributed:\n assert len(batch_indices) == 2\n assert len(batch_indices[0]) == expected\n else:\n assert batch_indices is None\n self.write_on_epoch_end_called = True\n\n def on_predict_epoch_end(self, trainer, pl_module, outputs):\n if trainer.accelerator_connector.is_distributed:\n for idx in range(2):\n assert isinstance(trainer.predict_dataloaders[idx].batch_sampler.sampler, UnrepeatedDistributedSampler)\n assert isinstance(trainer.predict_dataloaders[idx].batch_sampler, IndexBatchSamplerWrapper)\n super().on_predict_epoch_end(trainer, pl_module, outputs)\n\n\ndef predict(\n tmpdir, accelerator, gpus, num_processes, model=None, plugins=None, datamodule=True, pbrr=None, use_callbacks=True\n):\n dataloaders = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))]\n\n model = model or BoringModel()\n dm = TestLightningDataModule(dataloaders)\n\n cb = CustomPredictionWriter(tmpdir, write_interval=\"batch\")\n cb_1 = CustomPredictionWriter(tmpdir, write_interval=\"epoch\")\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n log_every_n_steps=1,\n weights_summary=None,\n accelerator=accelerator,\n gpus=gpus,\n num_processes=num_processes,\n plugins=plugins,\n progress_bar_refresh_rate=pbrr,\n callbacks=[cb, cb_1] if use_callbacks else []\n )\n if accelerator == \"ddp_spawn\":\n with pytest.raises(MisconfigurationException):\n trainer.predict(model, datamodule=dm, return_predictions=True)\n\n if datamodule:\n results = trainer.predict(model, datamodule=dm)\n else:\n results = trainer.predict(model, dataloaders=dataloaders)\n\n if not isinstance(trainer.training_type_plugin, DDPSpawnPlugin):\n if use_callbacks:\n assert cb.write_on_batch_end_called\n assert not cb.write_on_epoch_end_called\n\n assert not cb_1.write_on_batch_end_called\n assert cb_1.write_on_epoch_end_called\n\n num_samples = 1 if accelerator == \"ddp\" else 2\n assert len(results) == 2\n assert len(results[0]) == num_samples\n assert results[0][0].shape == torch.Size([1, 2])\n\n\ndef test_trainer_predict_no_return(tmpdir):\n \"\"\"\n Test trainer.predict warns when nothing is returned\n \"\"\"\n\n class CustomBoringModel(BoringModel):\n\n def predict_step(self, batch, batch_idx, dataloader_idx=None):\n if (batch_idx + 1) % 2 == 0:\n return\n\n return super().predict_step(batch, batch_idx, dataloader_idx)\n\n with pytest.warns(UserWarning, match='predict returned None'):\n predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)\n\n\ndef test_trainer_predict_grad(tmpdir):\n\n class CustomBoringModel(BoringModel):\n\n def predict_step(self, batch, batch_idx, dataloader_idx=None):\n assert batch.expand_as(batch).grad_fn is None\n return super().predict_step(batch, batch_idx, dataloader_idx)\n\n predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)\n\n x = torch.zeros(1, requires_grad=True)\n assert x.expand_as(x).grad_fn is not None\n\n\[email protected]('progress_bar_refresh_rate', [0, 5, None])\[email protected]('datamodule', [False, True])\ndef test_trainer_predict_cpu(tmpdir, datamodule, progress_bar_refresh_rate):\n predict(tmpdir, None, None, 1, datamodule=datamodule, pbrr=progress_bar_refresh_rate)\n\n\n@RunIf(min_gpus=2, special=True)\[email protected]('num_gpus', [1, 2])\ndef test_trainer_predict_dp(tmpdir, num_gpus):\n predict(tmpdir, \"dp\", num_gpus, None)\n\n\n@RunIf(min_gpus=2, special=True, fairscale=True)\ndef test_trainer_predict_ddp(tmpdir):\n predict(tmpdir, \"ddp\", 2, None)\n\n\n@RunIf(min_gpus=2, skip_windows=True, special=True)\ndef test_trainer_predict_ddp_spawn(tmpdir):\n predict(tmpdir, \"ddp_spawn\", 2, None)\n\n\n@RunIf(min_gpus=2, special=True)\ndef test_trainer_predict_1_gpu(tmpdir):\n predict(tmpdir, None, 1, None)\n\n\n@RunIf(skip_windows=True)\ndef test_trainer_predict_ddp_cpu(tmpdir):\n predict(tmpdir, \"ddp_cpu\", 0, 2)\n\n\n@patch('torch.cuda.device_count', return_value=2)\n@patch('torch.cuda.is_available', return_value=True)\ndef test_spawn_predict_return_predictions(*_):\n \"\"\"\n Test that `return_predictions=True` raise a MisconfigurationException with spawn training type plugins.\n \"\"\"\n model = BoringModel()\n\n def run(expected_plugin, **trainer_kwargs):\n trainer = Trainer(**trainer_kwargs, fast_dev_run=True)\n assert isinstance(trainer.training_type_plugin, expected_plugin)\n with pytest.raises(MisconfigurationException, match=\"`return_predictions` should be set to `False`\"):\n trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=True)\n\n run(DDPSpawnPlugin, accelerator=\"ddp_spawn\", gpus=2)\n run(DDPSpawnPlugin, accelerator=\"ddp_cpu\", num_processes=2)\n\n\[email protected](\"return_predictions\", [None, False, True])\[email protected](\"precision\", [32, 64])\ndef test_predict_return_predictions_cpu(return_predictions, precision, tmpdir):\n \"\"\"\n Test that `return_predictions=True`.\n \"\"\"\n seed_everything(42)\n model = BoringModel()\n\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, precision=precision)\n preds = trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=return_predictions)\n if return_predictions or return_predictions is None:\n assert len(preds) == 1\n assert preds[0].shape == torch.Size([1, 2])\n assert preds[0].dtype == (torch.float64 if precision == 64 else torch.float32)\n\n\[email protected](\n [\"limit_train_batches\", \"global_step\", \"num_training_batches\", \"current_epoch\", \"should_train\"],\n [(0.2, 0, 0, 0, False), (0.5, 10, 2, 4, True)],\n)\ndef test_disabled_training_for_insufficient_limit_train_batches(\n tmpdir, limit_train_batches, global_step, num_training_batches, current_epoch, should_train\n):\n \"\"\"\n Verify when `limit_train_batches` is float & between [0.0, 1.0] and\n `int(self.num_training_batches * self.limit_train_batches) == 0`, the training loop is disabled.\n \"\"\"\n\n class CurrentModel(BoringModel):\n\n training_step_invoked = False\n training_epoch_end_invoked = False\n\n def training_step(self, *args, **kwargs):\n self.training_step_invoked = True\n return super().training_step(*args, **kwargs)\n\n def training_epoch_end(self, *args, **kwargs):\n self.training_epoch_end_invoked = True\n return super().training_epoch_end(*args, **kwargs)\n\n dataset_len = 100\n batch_size = 25\n\n train = RandomDataset(32, length=dataset_len)\n train_loader = DataLoader(train, batch_size=batch_size)\n\n model = CurrentModel()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=5,\n limit_train_batches=limit_train_batches,\n )\n trainer.fit(model, train_loader)\n\n params_string = f\"\"\"`limit_train_batches={limit_train_batches}`, `dataset_len={dataset_len}`\n & `batch_size={batch_size}` as\n `num_training_batches={num_training_batches}`\"\"\"\n if should_train:\n error_string = f\"should run with {params_string}\"\n else:\n error_string = f\"should not run with {params_string}\"\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n assert trainer.global_step == global_step\n assert trainer.num_training_batches == num_training_batches\n assert trainer.current_epoch == current_epoch\n assert model.training_step_invoked == should_train, f\"`training_step` {error_string}\"\n assert model.training_epoch_end_invoked == should_train, f\"`training_epoch_end` {error_string}\"\n\n\[email protected]([\"max_steps\", \"max_epochs\", \"global_step\"], [(10, 5, 10), (20, None, 20)])\ndef test_repeated_fit_calls_with_max_epochs_and_steps(tmpdir, max_steps, max_epochs, global_step):\n \"\"\"\n Ensure that the training loop is bound by `max_steps` and\n `max_epochs` for repeated calls of `trainer.fit`, and\n disabled if the limit is reached\n \"\"\"\n\n dataset_len = 200\n batch_size = 10\n\n train_data = DataLoader(RandomDataset(32, dataset_len), batch_size=batch_size)\n\n model = BoringModel()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=max_steps,\n max_epochs=max_epochs,\n )\n trainer.fit(model, train_data)\n assert trainer.global_step == global_step\n trainer.fit(model, train_data)\n assert trainer.global_step == global_step\n\n\ndef test_trainer_access_in_configure_optimizers(tmpdir):\n \"\"\"\n Verify that the configure optimizer function can reference the trainer.\n \"\"\"\n\n class TestModel(BoringModel):\n\n def configure_optimizers(self):\n assert self.trainer is not None, \"Expect to have access to the trainer within `configure_optimizers`\"\n\n train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n model = TestModel()\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n trainer.fit(model, train_data)\n\n\n@RunIf(min_gpus=1)\ndef test_setup_hook_move_to_device_correctly(tmpdir):\n \"\"\"\n Verify that if a user defines a layer in the setup hook function, this is moved to the correct device.\n \"\"\"\n\n class TestModel(BoringModel):\n\n def setup(self, stage: str) -> None:\n self.new_layer = torch.nn.Linear(2, 2)\n\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n # will crash if not moved to correct device\n output = self.new_layer(output)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n # fake data\n train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n # model\n model = TestModel()\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, gpus=1)\n trainer.fit(model, train_data)\n\n\ndef test_train_loop_system(tmpdir):\n \"\"\"\n Test the following methods are called in the order in automatic optimization.\n 1. optimizer.step (skip when gradient accumulation)\n 2. model.training_step\n 3. optimizer.zero_grad (run when the first batch of gradient accumulation)\n 4. model.backward\n\n Note that the order is NOT `training_step`->`zero_grad`->`backward`->`step`.\n This is because `optimizer.step(closure)` calls `closure()` which then calls\n the three remaining methods `training_step`, `zero_grad` and `backward` inside.\n \"\"\"\n called_methods = []\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=5,\n limit_val_batches=1,\n limit_test_batches=1,\n progress_bar_refresh_rate=0,\n )\n\n class TestOptimizer(SGD):\n\n def step(self, *args, **kwargs):\n called_methods.append(\"step\")\n return super().step(*args, **kwargs)\n\n def zero_grad(self, *args, **kwargs):\n called_methods.append(\"zero_grad\")\n return super().zero_grad(*args, **kwargs)\n\n class TestModel(BoringModel):\n\n def configure_optimizers(self):\n return TestOptimizer(self.parameters(), lr=0.1)\n\n def training_step(self, *args, **kwargs):\n called_methods.append(\"training_step\")\n return super().training_step(*args, **kwargs)\n\n def backward(self, *args, **kwargs):\n called_methods.append(\"backward\")\n return super().backward(*args, **kwargs)\n\n model = TestModel()\n trainer = Trainer(**trainer_options)\n\n # No methods are called yet.\n assert called_methods == []\n\n trainer.fit(model)\n assert called_methods == [\n \"step\",\n \"training_step\",\n \"zero_grad\",\n \"backward\",\n ] * trainer.limit_train_batches\n\n called_methods.clear()\n trainer = Trainer(**trainer_options, accumulate_grad_batches=3)\n\n # No methods are called yet.\n assert called_methods == []\n\n trainer.fit(model)\n assert called_methods == [\n # 0\n \"training_step\",\n \"zero_grad\",\n \"backward\",\n # 1\n \"training_step\",\n \"backward\",\n # 2\n \"step\",\n \"training_step\",\n \"backward\",\n # 3\n \"training_step\",\n \"zero_grad\",\n \"backward\",\n # 4\n \"step\",\n \"training_step\",\n \"backward\",\n ]\n\n\ndef test_init_optimizers_resets_lightning_optimizers(tmpdir):\n \"\"\" Test that the Trainer resets the `lightning_optimizers` list everytime new optimizers get initialized. \"\"\"\n\n def compare_optimizers():\n assert trainer.lightning_optimizers[0].optimizer is trainer.optimizers[0]\n\n model = BoringModel()\n model.lr = 0.2\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n auto_lr_find=True,\n )\n\n trainer.tune(model)\n compare_optimizers()\n\n trainer.fit(model)\n compare_optimizers()\n\n trainer.fit_loop.max_epochs = 2 # simulate multiple fit calls\n trainer.fit(model)\n compare_optimizers()\n\n\ndef test_check_val_every_n_epoch_exception(tmpdir):\n\n with pytest.raises(MisconfigurationException, match=\"should be an integer.\"):\n Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n check_val_every_n_epoch=1.2,\n )\n\n\ndef test_trainer_attach_data_pipeline_to_model(tmpdir):\n\n class DataPipeline:\n\n pass\n\n class TestDataModule(LightningDataModule):\n\n data_pipeline = DataPipeline()\n\n def train_dataloader(self):\n return DataLoader(RandomDataset(32, 64))\n\n def val_dataloader(self):\n return DataLoader(RandomDataset(32, 64))\n\n def test_dataloader(self):\n return DataLoader(RandomDataset(32, 64))\n\n class TestCallback(Callback):\n\n def on_fit_start(self, trainer, pl_module: LightningModule) -> None:\n \"\"\"Called when fit begins\"\"\"\n assert isinstance(pl_module.data_pipeline, DataPipeline)\n\n model = BoringModel()\n dm = TestDataModule()\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[TestCallback()])\n trainer.fit(model, datamodule=dm)\n\n\ndef test_exception_when_testing_or_validating_with_fast_dev_run(tmpdir):\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n model = BoringModel()\n trainer.fit(model)\n\n with pytest.raises(MisconfigurationException, match=r\"\\.validate\\(\\)` with `fast_dev_run=True\"):\n trainer.validate()\n with pytest.raises(MisconfigurationException, match=r\"\\.test\\(\\)` with `fast_dev_run=True\"):\n trainer.test()\n\n\nclass TrainerStagesModel(BoringModel):\n\n def on_train_start(self) -> None:\n assert self.trainer.model.training\n assert self.training\n\n def on_validation_start(self) -> None:\n assert not self.trainer.model.training\n assert not self.training\n\n def on_test_start(self) -> None:\n assert not self.trainer.model.training\n assert not self.training\n\n def on_predict_start(self) -> None:\n assert not self.trainer.model.training\n assert not self.training\n\n\[email protected](\n 'accelerator,num_processes', [(None, 1), pytest.param('ddp', 2, marks=RunIf(skip_windows=True))]\n)\ndef test_model_in_correct_mode_during_stages(tmpdir, accelerator, num_processes):\n model = TrainerStagesModel()\n trainer = Trainer(default_root_dir=tmpdir, accelerator=accelerator, num_processes=num_processes, fast_dev_run=True)\n trainer.fit(model)\n trainer.validate(model)\n trainer.test(model)\n trainer.predict(model, model.val_dataloader())\n\n\nclass TestDummyModelForCheckpoint(BoringModel):\n\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n self.log('x', loss)\n\n def validation_epoch_end(self, outputs) -> None:\n pass\n\n\n@RunIf(skip_windows=True)\ndef test_fit_test_synchronization(tmpdir):\n \"\"\"Test that the trainer synchronizes processes before returning control back to the caller. \"\"\"\n tutils.set_random_master_port()\n model = TestDummyModelForCheckpoint()\n checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor='x', mode='min', save_top_k=1)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=2,\n accelerator='ddp_cpu',\n num_processes=2,\n callbacks=[checkpoint],\n )\n trainer.fit(model)\n assert os.path.exists(checkpoint.best_model_path), f'Could not find checkpoint at rank {trainer.global_rank}'\n trainer.test()\n\n\nclass CustomCallbackOnLoadCheckpoint(Callback):\n\n def on_save_checkpoint(self, trainer, pl_module, checkpoint) -> dict:\n return {\"a\": None}\n\n\ndef test_on_load_checkpoint_missing_callbacks(tmpdir):\n \"\"\" Test a warning appears when callbacks in the checkpoint don't match callbacks provided when resuming. \"\"\"\n\n model = BoringModel()\n chk = ModelCheckpoint(dirpath=tmpdir, save_last=True)\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, callbacks=[chk, CustomCallbackOnLoadCheckpoint()])\n trainer.fit(model)\n\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=5, resume_from_checkpoint=chk.last_model_path, progress_bar_refresh_rate=1\n )\n with pytest.warns(UserWarning, match=\"CustomCallbackOnLoadCheckpoint\"):\n trainer.fit(model)\n\n\ndef test_module_current_fx_attributes_reset(tmpdir):\n \"\"\" Ensure that lightning module's attributes related to current fx are reset at the end of execution. \"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=1,\n checkpoint_callback=False,\n logger=False,\n )\n\n trainer.fit(model)\n assert model._current_fx_name is None\n assert model._current_dataloader_idx is None\n\n trainer.test(model)\n assert model._current_fx_name is None\n assert model._current_dataloader_idx is None\n\n\ndef test_exception_when_lightning_module_is_not_set_on_trainer():\n trainer = Trainer()\n\n with pytest.raises(MisconfigurationException, match=r\"`model` must be provided.*validate\"):\n trainer.validate()\n with pytest.raises(MisconfigurationException, match=r\"`model` must be provided.*test\"):\n trainer.test()\n with pytest.raises(MisconfigurationException, match=r\"`model` must be provided.*predict\"):\n trainer.predict()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "torch.nn.Linear", "torch.load", "torch.Size", "torch.nn.init.constant_", "torch.eq", "torch.equal", "torch.cuda.device_count", "torch.tensor", "torch.cuda.is_available", "torch.zeros", "torch.isfinite" ] ]
gordonnchy/kamusi
[ "61600f9fac1d7a1fe74b1a48add55545f3f98e47" ]
[ "to_json.py" ]
[ "import pandas as pd\n\n\ndef convert_to_json():\n kamusi = pd.read_csv(\n \"words.csv\", usecols=[\"Index\", \"Word\", \"Meaning\", \"Synonyms\", \"Conjugation\"]\n )\n kamusi = kamusi.set_index(\"Index\")\n kamusi.to_json(\"kamusi.json\", orient=\"index\")\n\n\nif __name__ == \"__main__\":\n convert_to_json()\n" ]
[ [ "pandas.read_csv" ] ]
AyodeAwe/cuspatial
[ "77971ac91a24228bc46cf461c0ac7b6f2ed78e44" ]
[ "python/cuspatial/cuspatial/io/geopandas_adapter.py" ]
[ "# Copyright (c) 2020-2021 NVIDIA CORPORATION.\n\nimport numpy as np\nfrom geopandas import GeoSeries as gpGeoSeries\nfrom shapely.geometry import (\n LineString,\n MultiLineString,\n MultiPoint,\n MultiPolygon,\n Point,\n Polygon,\n)\n\n\nclass GeoPandasAdapter:\n def __init__(self, geoseries: gpGeoSeries):\n \"\"\"\n GeoPandasAdapter copies a GeoPandas GeoSeries object iteratively into\n a set of arrays: points, multipoints, lines, and polygons.\n\n Parameters\n ----------\n geoseries : A GeoPandas GeoSeries\n \"\"\"\n self.offsets = self._load_geometry_offsets(geoseries)\n self.buffers = self._read_geometries(geoseries, self.offsets)\n\n def _load_geometry_offsets(self, geoseries: gpGeoSeries) -> dict:\n \"\"\"\n Computes the offet arrays and buffer sizes that will be required\n to store the geometries.\n\n Parameters\n ----------\n geoseries : A GeoPandas GeoSeries\n \"\"\"\n offsets = {\n \"points\": [0],\n \"multipoints\": [0],\n \"lines\": [0],\n \"mlines\": [],\n \"polygons\": {\"polygons\": [0], \"rings\": [0], \"mpolys\": []},\n }\n for geometry in geoseries:\n if isinstance(geometry, Point):\n # a single Point geometry will go into the GpuPoints\n # structure. No offsets are required, but an index to the\n # position in the GeoSeries is required.\n current = offsets[\"points\"][-1]\n offsets[\"points\"].append(len(geometry.xy) + current)\n elif isinstance(geometry, MultiPoint):\n # A MultiPoint geometry also is copied into the GpuPoints\n # structure. A MultiPoint object must be created, containing\n # the size of the number of points, the position they are\n # stored in GpuPoints, and the index of the MultiPoint in the\n # GeoSeries.\n current = offsets[\"multipoints\"][-1]\n offsets[\"multipoints\"].append(len(geometry) * 2 + current)\n elif isinstance(geometry, LineString):\n # A LineString geometry is stored in the GpuLines structure.\n # Every LineString has a size which is stored in the GpuLines\n # structure. The index of the LineString back into the\n # GeoSeries is also stored.\n current = offsets[\"lines\"][-1]\n offsets[\"lines\"].append(2 * len(geometry.coords) + current)\n elif isinstance(geometry, MultiLineString):\n # A MultiLineString geometry is stored identically to\n # LineString in the GpuLines structure. The index of the\n # GeoSeries object is also stored.\n offsets[\"mlines\"].append(len(offsets[\"lines\"]) - 1)\n for linestring in geometry:\n current = offsets[\"lines\"][-1]\n offsets[\"lines\"].append(\n 2 * len(linestring.coords) + current\n )\n offsets[\"mlines\"].append(len(offsets[\"lines\"]) - 1)\n elif isinstance(geometry, Polygon):\n # A Polygon geometry is stored like a LineString and also\n # contains a buffer of sizes for each inner ring.\n num_rings = 1\n rings_current = offsets[\"polygons\"][\"rings\"][-1]\n offsets[\"polygons\"][\"rings\"].append(\n len(geometry.exterior.coords) * 2 + rings_current\n )\n for interior in geometry.interiors:\n rings_current = offsets[\"polygons\"][\"rings\"][-1]\n offsets[\"polygons\"][\"rings\"].append(\n len(interior.coords) * 2 + rings_current\n )\n num_rings = num_rings + 1\n current = offsets[\"polygons\"][\"polygons\"][-1]\n offsets[\"polygons\"][\"polygons\"].append(num_rings + current)\n elif isinstance(geometry, MultiPolygon):\n current = offsets[\"polygons\"][\"polygons\"][-1]\n offsets[\"polygons\"][\"mpolys\"].append(\n len(offsets[\"polygons\"][\"polygons\"]) - 1\n )\n for poly in geometry:\n current = offsets[\"polygons\"][\"polygons\"][-1]\n num_rings = 1\n rings_current = offsets[\"polygons\"][\"rings\"][-1]\n offsets[\"polygons\"][\"rings\"].append(\n len(poly.exterior.coords) * 2 + rings_current\n )\n for interior in poly.interiors:\n rings_current = offsets[\"polygons\"][\"rings\"][-1]\n offsets[\"polygons\"][\"rings\"].append(\n len(interior.coords) * 2 + rings_current\n )\n num_rings = num_rings + 1\n offsets[\"polygons\"][\"polygons\"].append(num_rings + current)\n offsets[\"polygons\"][\"mpolys\"].append(\n len(offsets[\"polygons\"][\"polygons\"]) - 1\n )\n return offsets\n\n def _read_geometries(self, geoseries: gpGeoSeries, offsets: dict,) -> dict:\n \"\"\"\n Creates a set of buffers sized to fit all of the geometries and\n iteratively populates them with geometry coordinate values.\n\n Parameters\n ----------\n geoseries : A GeoPandas GeoSeries object.\n offsets : The set of offsets that correspond to the geoseries argument.\n \"\"\"\n buffers = {\n \"points\": np.zeros(offsets[\"points\"][-1]),\n \"multipoints\": np.zeros(offsets[\"multipoints\"][-1]),\n \"lines\": np.zeros(offsets[\"lines\"][-1]),\n \"polygons\": {\n \"polygons\": np.zeros(len(offsets[\"polygons\"][\"polygons\"])),\n \"rings\": np.zeros(len(offsets[\"polygons\"][\"rings\"])),\n \"coords\": np.zeros(offsets[\"polygons\"][\"rings\"][-1]),\n },\n }\n read_count = {\n \"points\": 0,\n \"multipoints\": 0,\n \"lines\": 0,\n \"polygons\": 0,\n }\n inputs = []\n input_types = []\n input_lengths = []\n for geometry in geoseries:\n if isinstance(geometry, Point):\n # write a point to the points buffer\n # increase read_count of points pass\n i = read_count[\"points\"] * 2\n buffers[\"points\"][i] = geometry.x\n buffers[\"points\"][i + 1] = geometry.y\n read_count[\"points\"] = read_count[\"points\"] + 1\n input_types.append(\"p\")\n input_lengths.append(1)\n inputs.append({\"type\": \"p\", \"length\": 1})\n elif isinstance(geometry, MultiPoint):\n points = np.array(geometry)\n size = points.shape[0] * 2\n i = read_count[\"multipoints\"]\n buffers[\"multipoints\"][slice(i, i + size, 2)] = points[:, 0]\n buffers[\"multipoints\"][slice(i + 1, i + size, 2)] = points[\n :, 1\n ]\n read_count[\"multipoints\"] = read_count[\"multipoints\"] + size\n input_types.append(\"mp\")\n input_lengths.append(len(geometry))\n inputs.append({\"type\": \"mp\", \"length\": len(geometry)})\n elif isinstance(geometry, LineString):\n size = len(geometry.xy[0]) * 2\n i = read_count[\"lines\"]\n buffers[\"lines\"][slice(i, i + size, 2)] = geometry.xy[0]\n buffers[\"lines\"][slice(i + 1, i + size, 2)] = geometry.xy[1]\n read_count[\"lines\"] = read_count[\"lines\"] + size\n input_types.append(\"l\")\n input_lengths.append(1)\n inputs.append({\"type\": \"l\", \"length\": 1})\n elif isinstance(geometry, MultiLineString):\n substrings = []\n for linestring in geometry:\n size = len(linestring.xy[0]) * 2\n i = read_count[\"lines\"]\n buffers[\"lines\"][slice(i, i + size, 2)] = linestring.xy[0]\n buffers[\"lines\"][\n slice(i + 1, i + size, 2)\n ] = linestring.xy[1]\n read_count[\"lines\"] = read_count[\"lines\"] + size\n substrings.append({\"type\": \"l\", \"length\": size})\n input_types.append(\"ml\")\n input_lengths.append(len(geometry))\n inputs.append(\n {\n \"type\": \"ml\",\n \"length\": len(geometry),\n \"children\": substrings,\n }\n )\n elif isinstance(geometry, Polygon):\n # copy exterior\n exterior = geometry.exterior.coords.xy\n size = len(exterior[0]) * 2\n i = read_count[\"polygons\"]\n buffers[\"polygons\"][\"coords\"][\n slice(i, i + size, 2)\n ] = exterior[0]\n buffers[\"polygons\"][\"coords\"][\n slice(i + 1, i + size, 2)\n ] = exterior[1]\n read_count[\"polygons\"] = read_count[\"polygons\"] + size\n interiors = geometry.interiors\n for interior in interiors:\n interior_coords = interior.coords.xy\n size = len(interior_coords[0]) * 2\n i = read_count[\"polygons\"]\n buffers[\"polygons\"][\"coords\"][\n slice(i, i + size, 2)\n ] = interior_coords[0]\n buffers[\"polygons\"][\"coords\"][\n slice(i + 1, i + size, 2)\n ] = interior_coords[1]\n read_count[\"polygons\"] = read_count[\"polygons\"] + size\n input_types.append(\"poly\")\n input_lengths.append(1)\n inputs.append({\"type\": \"poly\", \"length\": 1})\n elif isinstance(geometry, MultiPolygon):\n subpolys = []\n for polygon in geometry:\n exterior = polygon.exterior.coords.xy\n size = len(exterior[0]) * 2\n i = read_count[\"polygons\"]\n buffers[\"polygons\"][\"coords\"][\n slice(i, i + size, 2)\n ] = exterior[0]\n buffers[\"polygons\"][\"coords\"][\n slice(i + 1, i + size, 2)\n ] = exterior[1]\n read_count[\"polygons\"] = read_count[\"polygons\"] + size\n interiors = polygon.interiors\n for interior in interiors:\n interior_coords = interior.coords.xy\n size = len(interior_coords[0]) * 2\n i = read_count[\"polygons\"]\n buffers[\"polygons\"][\"coords\"][\n slice(i, i + size, 2)\n ] = interior_coords[0]\n buffers[\"polygons\"][\"coords\"][\n slice(i + 1, i + size, 2)\n ] = interior_coords[1]\n read_count[\"polygons\"] = read_count[\"polygons\"] + size\n subpolys.append({\"type\": \"poly\", \"length\": 1})\n input_types.append(\"mpoly\")\n input_lengths.append(len(geometry))\n inputs.append(\n {\n \"type\": \"ml\",\n \"length\": len(geometry),\n \"children\": subpolys,\n }\n )\n else:\n raise NotImplementedError\n return {\n \"buffers\": buffers,\n \"input_types\": input_types,\n \"input_lengths\": input_lengths,\n \"inputs\": inputs,\n }\n\n def get_geoarrow_host_buffers(self) -> dict:\n \"\"\"\n Returns a set of host buffers containing the geopandas object converted\n to GeoArrow format.\n \"\"\"\n points_xy = []\n mpoints_xy = []\n mpoints_offsets = []\n lines_xy = []\n lines_offsets = []\n mlines = []\n polygons_xy = []\n polygons_polygons = []\n polygons_rings = []\n mpolygons = []\n buffers = self.buffers[\"buffers\"]\n points_xy = buffers[\"points\"]\n mpoints_xy = buffers[\"multipoints\"]\n mpoints_offsets = self.offsets[\"multipoints\"]\n lines_xy = buffers[\"lines\"]\n lines_offsets = self.offsets[\"lines\"]\n mlines = self.offsets[\"mlines\"]\n polygons_xy = buffers[\"polygons\"][\"coords\"]\n polygons_polygons = self.offsets[\"polygons\"][\"polygons\"]\n polygons_rings = self.offsets[\"polygons\"][\"rings\"]\n mpolygons = self.offsets[\"polygons\"][\"mpolys\"]\n return {\n \"points_xy\": points_xy,\n \"mpoints_xy\": mpoints_xy,\n \"mpoints_offsets\": mpoints_offsets,\n \"lines_xy\": lines_xy,\n \"lines_offsets\": lines_offsets,\n \"mlines\": mlines,\n \"polygons_xy\": polygons_xy,\n \"polygons_polygons\": polygons_polygons,\n \"polygons_rings\": polygons_rings,\n \"mpolygons\": mpolygons,\n }\n\n def get_geopandas_meta(self) -> dict:\n \"\"\"\n Returns the metadata that was created converting the GeoSeries into\n GeoArrow format. The metadata essentially contains the object order\n in the GeoSeries format. GeoArrow doesn't support custom orderings,\n every GeoArrow data store contains points, multipoints, lines, and\n polygons in an arbitrary order.\n \"\"\"\n buffers = self.buffers\n return {\n \"input_types\": buffers[\"input_types\"],\n \"input_lengths\": buffers[\"input_lengths\"],\n \"inputs\": buffers[\"inputs\"],\n }\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
shipjobs/herbarium
[ "db49442e9322e20c2556bffa9dbcb0dfcd695788" ]
[ "herbarium/data/build.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nfrom collections import defaultdict\nimport itertools\nimport logging\nimport numpy as np\nimport operator\nimport pickle\nimport torch.utils.data\nfrom tabulate import tabulate\nfrom termcolor import colored\n\nfrom herbarium.config import configurable\nfrom herbarium.utils.comm import get_world_size\nfrom herbarium.utils.env import seed_all_rng\nfrom herbarium.utils.file_io import PathManager\nfrom herbarium.utils.logger import _log_api_usage, log_first_n\n\nfrom .catalog import DatasetCatalog, MetadataCatalog\nfrom .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset\nfrom .dataset_mapper import DatasetMapper\nfrom .dataset_utils import check_metadata_consistency\nfrom .samplers import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler\nfrom tqdm import tqdm\n\n\"\"\"\nThis file contains the default logic to build a dataloader for training or testing.\n\"\"\"\n\n__all__ = [\n \"build_batch_data_loader\",\n \"build_general_train_loader\",\n \"build_general_test_loader\",\n \"get_dataset_dicts\",\n \"print_instances_class_histogram\",\n]\n\ndef print_instances_class_histogram(dataset_dicts, class_names):\n \"\"\"\n Args:\n dataset_dicts (list[dict]): list of dataset dicts.\n class_names (list[str]): list of class names (zero-indexed).\n \"\"\"\n\n logger = logging.getLogger(__name__)\n logger.info(\"Build instances class histogram\")\n\n num_classes = len(class_names)\n hist_bins = np.arange(num_classes + 1)\n histogram = np.zeros((num_classes,), dtype=np.int)\n for entry in tqdm(dataset_dicts):\n annos = entry[\"annotations\"]\n classes = np.asarray(\n [x[\"category_id\"] for x in annos], dtype=np.int\n )\n if len(classes):\n assert classes.min() >= 0, f\"Got an invalid category_id={classes.min()}\"\n assert (\n classes.max() < num_classes\n ), f\"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes\"\n histogram += np.histogram(classes, bins=hist_bins)[0]\n\n N_COLS = min(6, len(class_names) * 2)\n\n def short_name(x):\n # make long class names shorter. useful for lvis\n if len(x) > 13:\n return x[:11] + \"..\"\n return x\n\n data = list(\n itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])\n )\n total_num_instances = sum(data[1::2])\n data.extend([None] * (N_COLS - (len(data) % N_COLS)))\n if num_classes > 1:\n data.extend([\"total\", total_num_instances])\n data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])\n\n # TODO: Too many classes expected. Need to summarize histogram\n\n table = tabulate(\n data,\n headers=[\"category\", \"#instances\"] * (N_COLS // 2),\n tablefmt=\"pipe\",\n numalign=\"left\",\n stralign=\"center\",\n )\n log_first_n(\n logging.INFO,\n \"Distribution of instances among all {} categories:\\n\".format(num_classes)\n + colored(table, \"cyan\"),\n key=\"message\",\n )\n\ndef get_dataset_dicts(names):\n \"\"\"\n Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.\n\n Args:\n names (str or list[str]): a dataset name or a list of dataset names\n filter_empty (bool): whether to filter out images without instance annotations\n\n Returns:\n list[dict]: a list of dicts following the standard dataset dict format.\n \"\"\"\n if isinstance(names, str):\n names = [names]\n assert len(names), names\n dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]\n for dataset_name, dicts in zip(names, dataset_dicts):\n assert len(dicts), \"Dataset '{}' is empty!\".format(dataset_name)\n\n dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))\n\n has_instances = \"annotations\" in dataset_dicts[0]\n\n if has_instances:\n try:\n check_metadata_consistency(\"thing_classes\", names)\n # TODO: printing class histogram for 250M image is amazingly slow.\n #class_names = MetadataCatalog.get(names[0]).thing_classes\n #print_instances_class_histogram(dataset_dicts, class_names)\n except AttributeError: # class names are not available for this dataset\n pass\n\n assert len(dataset_dicts), \"No valid data found in {}.\".format(\",\".join(names))\n\n # TODO: change here to only able in evaluating stage with random index\n\n if \"val\" in names[0]:\n per_cat_data = defaultdict(list)\n for data in dataset_dicts:\n per_cat_data[data[\"annotations\"][0][\"category_id\"]].append(data)\n \n dataset_dicts = []\n for cid, val in per_cat_data.items():\n dataset_dicts.extend(val[:5])\n\n return dataset_dicts\n\n\ndef build_batch_data_loader(\n dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0\n):\n \"\"\"\n Build a batched dataloader for training.\n\n Args:\n dataset (torch.utils.data.Dataset): map-style PyTorch dataset. Can be indexed.\n sampler (torch.utils.data.sampler.Sampler): a sampler that produces indices\n total_batch_size, aspect_ratio_grouping, num_workers): see\n :func:`build_detection_train_loader`.\n\n Returns:\n iterable[list]. Length of each list is the batch size of the current\n GPU. Each element in the list comes from the dataset.\n \"\"\"\n world_size = get_world_size()\n assert (\n total_batch_size > 0 and total_batch_size % world_size == 0\n ), \"Total batch size ({}) must be divisible by the number of gpus ({}).\".format(\n total_batch_size, world_size\n )\n\n batch_size = total_batch_size // world_size\n if aspect_ratio_grouping:\n data_loader = torch.utils.data.DataLoader(\n dataset,\n sampler=sampler,\n num_workers=num_workers,\n batch_sampler=None,\n collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements\n worker_init_fn=worker_init_reset_seed,\n ) # yield individual mapped dict\n return AspectRatioGroupedDataset(data_loader, batch_size)\n else:\n batch_sampler = torch.utils.data.sampler.BatchSampler(\n sampler, batch_size, drop_last=True\n ) # drop_last so the batch always have the same size\n return torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=batch_sampler,\n collate_fn=trivial_batch_collator,\n worker_init_fn=worker_init_reset_seed,\n )\n\n\ndef _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):\n if dataset is None:\n dataset = get_dataset_dicts(\n cfg.DATASETS.TRAIN,\n )\n _log_api_usage(\"dataset.\" + cfg.DATASETS.TRAIN[0])\n\n if mapper is None:\n mapper = DatasetMapper(cfg, True)\n\n if sampler is None:\n sampler_name = cfg.DATALOADER.SAMPLER_TRAIN\n logger = logging.getLogger(__name__)\n logger.info(\"Using training sampler {}\".format(sampler_name))\n if sampler_name == \"TrainingSampler\":\n sampler = TrainingSampler(len(dataset))\n elif sampler_name == \"RepeatFactorTrainingSampler\":\n repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(\n dataset, cfg.DATALOADER.REPEAT_THRESHOLD\n )\n sampler = RepeatFactorTrainingSampler(repeat_factors)\n else:\n raise ValueError(\"Unknown training sampler: {}\".format(sampler_name))\n\n return {\n \"dataset\": dataset,\n \"sampler\": sampler,\n \"mapper\": mapper,\n \"total_batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"aspect_ratio_grouping\": cfg.DATALOADER.ASPECT_RATIO_GROUPING,\n \"num_workers\": cfg.DATALOADER.NUM_WORKERS,\n }\n\n\n# TODO can allow dataset as an iterable or IterableDataset to make this function more general\n@configurable(from_config=_train_loader_from_config)\ndef build_general_train_loader(\n dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0\n):\n \"\"\"\n Build a dataloader for object detection with some default features.\n This interface is experimental.\n\n Args:\n dataset (list or torch.utils.data.Dataset): a list of dataset dicts,\n or a map-style pytorch dataset. They can be obtained by using\n :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.\n mapper (callable): a callable which takes a sample (dict) from dataset and\n returns the format to be consumed by the model.\n When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.\n sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces\n indices to be applied on ``dataset``. Default to :class:`TrainingSampler`,\n which coordinates an infinite random shuffle sequence across all workers.\n total_batch_size (int): total batch size across all workers. Batching\n simply puts data into a list.\n aspect_ratio_grouping (bool): whether to group images with similar\n aspect ratio for efficiency. When enabled, it requires each\n element in dataset be a dict with keys \"width\" and \"height\".\n num_workers (int): number of parallel data loading workers\n\n Returns:\n torch.utils.data.DataLoader:\n a dataloader. Each output from it is a ``list[mapped_element]`` of length\n ``total_batch_size / num_workers``, where ``mapped_element`` is produced\n by the ``mapper``.\n \"\"\"\n if isinstance(dataset, list):\n dataset = DatasetFromList(dataset, copy=False)\n if mapper is not None:\n dataset = MapDataset(dataset, mapper)\n if sampler is None:\n sampler = TrainingSampler(len(dataset))\n assert isinstance(sampler, torch.utils.data.sampler.Sampler)\n return build_batch_data_loader(\n dataset,\n sampler,\n total_batch_size,\n aspect_ratio_grouping=aspect_ratio_grouping,\n num_workers=num_workers,\n )\n\n\ndef _test_loader_from_config(cfg, dataset_name, mapper=None):\n \"\"\"\n Uses the given `dataset_name` argument (instead of the names in cfg), because the\n standard practice is to evaluate each test set individually (not combining them).\n \"\"\"\n dataset = get_dataset_dicts(\n [dataset_name],\n )\n if mapper is None:\n mapper = DatasetMapper(cfg, False)\n return {\"dataset\": dataset, \"mapper\": mapper, \"num_workers\": cfg.DATALOADER.NUM_WORKERS, \"total_batch_size\": cfg.SOLVER.IMS_PER_BATCH}\n\n\n@configurable(from_config=_test_loader_from_config)\ndef build_general_test_loader(dataset, *, mapper, total_batch_size=1, sampler=None, num_workers=0):\n \"\"\"\n Similar to `build_detection_train_loader`, but uses a batch size of 1,\n and :class:`InferenceSampler`. This sampler coordinates all workers to\n produce the exact set of all samples.\n This interface is experimental.\n\n Args:\n dataset (list or torch.utils.data.Dataset): a list of dataset dicts,\n or a map-style pytorch dataset. They can be obtained by using\n :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.\n mapper (callable): a callable which takes a sample (dict) from dataset\n and returns the format to be consumed by the model.\n When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.\n sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces\n indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,\n which splits the dataset across all workers.\n num_workers (int): number of parallel data loading workers\n\n Returns:\n DataLoader: a torch DataLoader, that loads the given detection\n dataset, with test-time transformation and batching.\n\n Examples:\n ::\n data_loader = build_detection_test_loader(\n DatasetRegistry.get(\"my_test\"),\n mapper=DatasetMapper(...))\n\n # or, instantiate with a CfgNode:\n data_loader = build_detection_test_loader(cfg, \"my_test\")\n \"\"\"\n if isinstance(dataset, list):\n dataset = DatasetFromList(dataset, copy=False)\n if mapper is not None:\n dataset = MapDataset(dataset, mapper)\n if sampler is None:\n sampler = InferenceSampler(len(dataset))\n\n world_size = get_world_size()\n assert (\n total_batch_size > 0 and total_batch_size % world_size == 0\n ), \"Total batch size ({}) must be divisible by the number of gpus ({}).\".format(\n total_batch_size, world_size\n )\n\n batch_size = total_batch_size // world_size\n\n # Always use 1 image per worker during inference since this is the\n # standard when reporting inference time in papers.\n batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, batch_size, drop_last=False)\n data_loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=batch_sampler,\n collate_fn=trivial_batch_collator,\n )\n return data_loader\n\n\ndef trivial_batch_collator(batch):\n \"\"\"\n A batch collator that does nothing.\n \"\"\"\n return batch\n\n\ndef worker_init_reset_seed(worker_id):\n initial_seed = torch.initial_seed() % 2 ** 31\n seed_all_rng(initial_seed + worker_id)\n" ]
[ [ "numpy.arange", "numpy.histogram", "numpy.asarray", "numpy.zeros" ] ]
poppingtonic/fastai2
[ "026fb13f5df3f338378fbd37d9ce8b0399ef1927" ]
[ "fastai2/callback/captum.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/73_callback.captum.ipynb (unless otherwise specified).\n\n__all__ = ['json_clean', 'IntegradedGradientsCallback', 'CaptumInsightsCallback']\n\n# Cell\nimport tempfile\nfrom ..basics import *\nfrom ..learner import Callback\n\n# Cell\n\n# Dirty hack as json_clean doesn't support CategoryMap type\n\nfrom ipykernel import jsonutil\n\n_json_clean=jsonutil.json_clean\ndef json_clean(o):\n o = list(o.items) if isinstance(o,CategoryMap) else o\n return _json_clean(o)\n\njsonutil.json_clean = json_clean\n\n# Cell\nfrom captum.attr import IntegratedGradients\nfrom captum.attr import visualization as viz\n\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\nfrom captum.insights import AttributionVisualizer, Batch\nfrom captum.insights.features import ImageFeature\n\n# Cell\nclass IntegradedGradientsCallback(Callback):\n \"Captum Callback for Resnet Interpretation\"\n def __init__(self):\n pass\n\n def after_fit(self):\n self.integrated_gradients = IntegratedGradients(self.model)\n\n def visualize(self, inp_data, n_steps=200, cmap_name='custom blue', colors=None, N=256,\n methods=None, signs=None, outlier_perc=1):\n if methods is None: methods=['original_image','heat_map']\n if signs is None: signs=[\"all\", \"positive\"]\n dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=1)\n self.enc_inp,self.enc_preds= dl.one_batch()\n dec_data=dl.decode((self.enc_inp,self.enc_preds))\n self.dec_img,self.dec_pred=dec_data[0][0],dec_data[1][0]\n self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors\n self.attributions_ig = self.integrated_gradients.attribute(self.enc_inp.to(self.dl.device), target=self.enc_preds, n_steps=200)\n default_cmap = LinearSegmentedColormap.from_list(cmap_name,\n self.colors, N=N)\n _ = viz.visualize_image_attr_multiple(np.transpose(self.attributions_ig.squeeze().cpu().detach().numpy(), (1,2,0)),\n np.transpose(self.dec_img.numpy(), (1,2,0)),\n methods=methods,\n cmap=default_cmap,\n show_colorbar=True,\n signs=signs,\n outlier_perc=outlier_perc, titles=[f'Original Image - ({self.dec_pred})', 'IG'])\n\n# Cell\nclass CaptumInsightsCallback(Callback):\n \"Captum Insights Callback for Image Interpretation\"\n def __init__(self): pass\n\n def _formatted_data_iter(self, dl, normalize_func):\n dl_iter=iter(dl)\n while True:\n images,labels=next(dl_iter)\n images=normalize_func.decode(images).to(dl.device)\n yield Batch(inputs=images, labels=labels)\n\n def visualize(self, inp_data, debug=True):\n _baseline_func= lambda o: o*0\n _get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab\n dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=4)\n normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)\n\n visualizer = AttributionVisualizer(\n models=[self.model],\n score_func=lambda o: torch.nn.functional.softmax(o, 1),\n classes=_get_vocab(dl.vocab),\n features=[\n ImageFeature(\n \"Image\",\n baseline_transforms=[_baseline_func],\n input_transforms=[normalize_func],\n )\n ],\n dataset=self._formatted_data_iter(dl,normalize_func)\n )\n visualizer.render(debug=debug)\n" ]
[ [ "matplotlib.colors.LinearSegmentedColormap.from_list" ] ]
Inquiring-Nomad/ml-ops-simple
[ "ab7047e0b1d0ff8fbb2843d7a727a8d2a0f47b37" ]
[ "src/models/eval_model.py" ]
[ "import os\nimport warnings\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split,cross_val_score\nfrom sklearn.model_selection import GridSearchCV,RandomizedSearchCV\nfrom sklearn.linear_model import LinearRegression\nimport argparse\nimport joblib\nimport json\nimport yaml\nimport pickle\n\n\ndef eval_metrics(actual, pred):\n rmse = np.sqrt(mean_squared_error(actual, pred))\n mae = mean_absolute_error(actual, pred)\n r2 = r2_score(actual, pred)\n return rmse, mae, r2\n\n\ndef eval(config_path):\n config = read_params(config_path)\n test_data_path = config[\"split_data\"][\"test_path\"]\n pipeline_path = config[\"prepro_data\"][\"pipeline_path\"]\n model_dir = config[\"model_dir\"]\n target = [config[\"base\"][\"target_col\"]]\n df = pd.read_csv(test_data_path,sep=\",\")\n X_test = df.drop(\"median_house_value\", axis=1)\n y_test = df[\"median_house_value\"].copy()\n pipeline = pickle.load(open(pipeline_path, 'rb'))\n\n\n model_path = os.path.join(model_dir, \"linear_regression.joblib\")\n linear_reg_model = joblib.load(model_path)\n X_test_preprocessed = pipeline.transform(X_test)\n final_predictions = linear_reg_model.predict(X_test_preprocessed)\n (rmse, mae, r2) = eval_metrics(y_test, final_predictions)\n ln_scores_file = config[\"reports\"][\"linreg_eval_scores\"]\n with open(ln_scores_file, \"w\") as f:\n scores = {\n \"rmse\": rmse,\n \"mae\": mae,\n \"r2\": r2\n }\n json.dump(scores, f, indent=4)\n\n #Random Forrest\n rfr_model_path = os.path.join(model_dir, \"rfr.joblib\")\n rfr_model = joblib.load(rfr_model_path)\n X_test_preprocessed = pipeline.transform(X_test)\n final_rfr_predictions = rfr_model.predict(X_test_preprocessed)\n (rmse, mae, r2) = eval_metrics(y_test, final_rfr_predictions)\n rfr_scores_file = config[\"reports\"][\"rfr_eval_scores\"]\n with open(rfr_scores_file, \"w\") as f:\n scores = {\n \"rmse\": rmse,\n \"mae\": mae,\n \"r2\": r2\n }\n json.dump(scores, f, indent=4)\n\n\n\n\n\n\n\ndef read_params(config_path):\n with open(config_path) as yaml_file:\n config = yaml.safe_load(yaml_file)\n return config\n\n\nif __name__ == \"__main__\":\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", default=\"params.yaml\")\n parsed_args = args.parse_args()\n eval(config_path=parsed_args.config)" ]
[ [ "sklearn.metrics.mean_absolute_error", "sklearn.metrics.r2_score", "sklearn.metrics.mean_squared_error", "pandas.read_csv" ] ]
olsonjonny/cusp_cirq_demo
[ "f7236d93018d4c7d7eb177dba3188d9c1fa1cffe" ]
[ "cusp/cusp_stage2.py" ]
[ "\"\"\"Routines for Stage Two of CUSP: Training the Quantum Autoencoder.\"\"\"\n\nimport numpy as np\nfrom multiprocessing import Pool\n\nfrom cirq import Circuit, MeasurementGate, ParamResolver\nfrom cirq.ops import *\nfrom cirq.google import ExpZGate, XmonQubit, XmonSimulator\nfrom cirq.circuits import InsertStrategy\nfrom cirq.contrib.jobs.job import Job\nfrom cirq.contrib.jobs.depolarizer_channel import DepolarizerChannel\n\nimport settings\nfrom cusp_demo_utils import *\n\n\n# Probability of noisy Z rotations\nnoise_level = settings.noise_level\n\n# Qubit initialization\nq00, q01, q10, q11 = settings.q00, settings.q01, settings.q10, settings.q11 \nqubit_ordering = settings.qubit_ordering\n\ndef _input_prep_gates_stage2(alpha):\n \"\"\"Helper routine for producing sequence of gates\n for the state preparation circuit.\n\n Args:\n =====\n alpha : numeric\n Parameter for state preparation circuit\n\n Returns:\n ========\n state_prep_gates : list\n List (ordered sequence) of Cirq gates for the state preparation circuit\n\n \"\"\"\n state_prep_gates = ([X(q10),\n X(q11),\n H(q00),\n X2(q01),\n X2(q10),\n X2(q11),\n CNOT(q00, q01),\n CNOT(q01, q10),\n CNOT(q10, q11),\n param_Z(alpha)(q11),\n CNOT(q10, q11),\n CNOT(q01, q10),\n CNOT(q00, q01),\n H(q00),\n X2inv(q01),\n X2(q10),\n X2(q11)])\n return state_prep_gates\n\ndef compression_circuit(a, b, x, z, alpha, exact=False):\n \"\"\"Returns compression circuit (state preparation circuit followed by\n encoding circuit).\n\n Args:\n =====\n a, b, x, z : numeric\n Circuit parameters for encoding circuit\n alpha : numeric\n Parameter for state preparation circuit\n exact : bool\n If True, works with wavefunction\n\n Returns:\n ========\n comp_circuit : cirq.Circuit\n Compression circuit\n \"\"\"\n comp_circuit = Circuit()\n comp_circuit.append(_input_prep_gates_stage2(alpha))\n comp_circuit.append(param_CNOT(a, b, x, z, q01, q00))\n comp_circuit.append(param_CNOT(a, b, x, z, q11, q10), strategy=InsertStrategy.EARLIEST)\n comp_circuit.append(param_CNOT(a, b, x, z, q11, q01))\n if exact == False:\n comp_circuit.append([MeasurementGate('r00').on(q00),\n MeasurementGate('r01').on(q01),\n MeasurementGate('r10').on(q10),\n MeasurementGate('r11').on(q11)])\n return comp_circuit\n\ndef noisy_job(a, b, x, z, alpha, exact=False):\n \"\"\"Adds noise to compression circuit.\n\n Args:\n =====\n a, b, x, z : numeric\n Circuit parameters for encoding circuit\n alpha : numeric\n Parameter for state preparation circuit\n exact : bool\n If True, works with wavefunction\n\n Returns:\n ========\n noisy_circuit : cirq.Circuit\n Noisy version of input circuit\n param_resolvers : list\n \"\"\"\n job = Job(compression_circuit(a, b, x, z, alpha, exact))\n noisy = DepolarizerChannel(probability=noise_level)\n noisy_job = noisy.transform_job(job)\n param_resolvers = [ParamResolver({k:v for k, v in e}) for e in noisy_job.sweep.param_tuples()]\n return noisy_job.circuit, param_resolvers\n\ndef _run_sim_stage2(a, b, x, z, alpha, exact=False, print_circuit=False, noisy=False):\n \"\"\"Executes circuit a single time. Outputs 1 for a success (i.e. reference qubits are |000>)\n and 0 for a failure.\n\n Args:\n =====\n a, b, x, z : numeric\n Circuit parameters for encoding circuit\n alpha : numeric\n Parameter for state preparation circuit\n exact : bool\n If True, works with wavefunction\n print_circuit : bool\n If True, prints circuit\n noisy : bool\n If True, runs noisy version of circuit\n\n Returns:\n ========\n total : int\n Value of 1 if reference qubits are all 0's. Value of 0 else.\n \"\"\"\n simulator = XmonSimulator()\n\n if noisy:\n circuit_run, resolvers = noisy_job(a, b, x, z, alpha, exact)\n else:\n circuit_run = compression_circuit(a, b, x, z, alpha, exact)\n \n if exact:\n if noisy:\n for resolver in resolvers:\n result = simulator.simulate(circuit=circuit_run, param_resolver=resolver)\n else:\n result = simulator.simulate(circuit=circuit_run)\n avg = 0\n for j in range(2):\n avg += np.abs(result.final_state[j])**2\n return avg\n \n else:\n if noisy:\n for resolver in resolvers:\n result = simulator.run(circuit=circuit_run,\n param_resolver=resolver, repetitions=1)\n else:\n result = simulator.run(circuit=circuit_run, repetitions=1)\n \n reference_measurements = []\n reference_labels = ['r00', 'r01', 'r10']\n for j in reference_labels:\n reference_measurements.append(int(result.measurements[j][0]))\n total = 0\n res = []\n for y in range(3):\n res.append(reference_measurements[y])\n if res == [0, 0, 0]:\n total = 1\n if print_circuit==True:\n print(circuit_run.to_text_diagram(use_unicode_characters=False))\n return total\n\ndef one_run(a, b, x, z, alpha):\n success = _run_sim_stage2(a, b, x, z, alpha, exact=True, print_circuit=False, noisy=True)\n return success\n\ndef compute_stage2_cost_function(a, b, x, z, alpha, n_repetitions, exact=False, noisy=False):\n \"\"\"Executes circuit multiple times and computes the average fidelity.\n over n times (n_repetitions).\n\n Args:\n =====\n a, b, x, z : numeric\n Circuit parameters for encoding circuit\n alpha : numeric\n Parameter for state preparation circuit\n n_repetitions : int\n Number of circuit runs\n exact : bool\n If True, works with wavefunction\n noisy : bool\n If True, runs noisy version of circuit\n\n Returns:\n ========\n avg_fid : float\n Average fidelity (maximum: 1)\n \"\"\"\n if exact == True and noisy == False:\n return _run_sim_stage2(a, b, x, z, alpha, exact=exact, print_circuit=False, noisy=noisy)\n \n # success_count = 0\n # for k in range(n_repetitions):\n # fid_one_run = _run_sim_stage2(a,b,x,z,alpha, exact=exact, print_circuit=False, noisy=noisy)\n # success_count += fid_one_run \n # success_count = success_count / float(n_repetitions)\n p = Pool()\n args = [(a,b,x,z,alpha)] * n_repetitions\n results = p.starmap(one_run,args)\n success_count = np.array(results).sum()\n avg_fid = float(success_count) / float(n_repetitions)\n \n return success_count\n" ]
[ [ "numpy.array", "numpy.abs" ] ]
JulienL3vesque/Hexoskin_RnD_OSM
[ "b524430d6f4b2b300d119b6a1586141e6c2d14a3", "b524430d6f4b2b300d119b6a1586141e6c2d14a3" ]
[ "Python Project Filter-Detect-GUI/mne/io/bti/tests/test_bti.py", "Python Project Filter-Detect-GUI/various_functions.py" ]
[ "from __future__ import print_function\n# Authors: Denis Engemann <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\nfrom functools import reduce, partial\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import (assert_array_almost_equal, assert_array_equal,\n assert_allclose)\nfrom nose.tools import assert_true, assert_raises, assert_equal\n\nfrom mne.datasets import testing\nfrom mne.io import read_raw_fif, read_raw_bti\nfrom mne.io.bti.bti import (_read_config, _process_bti_headshape,\n _read_bti_header, _get_bti_dev_t,\n _correct_trans, _get_bti_info)\nfrom mne.io.tests.test_raw import _test_raw_reader\nfrom mne.tests.common import assert_dig_allclose\nfrom mne.io.pick import pick_info\nfrom mne.io.constants import FIFF\nfrom mne import pick_types\nfrom mne.utils import run_tests_if_main, slow_test\nfrom mne.transforms import Transform, combine_transforms, invert_transform\nfrom mne.externals import six\n\nwarnings.simplefilter('always')\n\nbase_dir = op.join(op.abspath(op.dirname(__file__)), 'data')\n\narchs = 'linux', 'solaris'\npdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]\nconfig_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]\nhs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]\nexported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a)\n for a in archs]\ntmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')\n\nfname_2500 = op.join(testing.data_path(download=False), 'BTi', 'erm_HFH',\n 'c,rfDC')\n\n# the 4D exporter doesn't export all channels, so we confine our comparison\nNCH = 248\n\n\[email protected]_testing_data\ndef test_read_2500():\n \"\"\"Test reading data from 2500 system.\"\"\"\n _test_raw_reader(read_raw_bti, pdf_fname=fname_2500, head_shape_fname=None)\n\n\ndef test_read_config():\n \"\"\"Test read bti config file.\"\"\"\n # for config in config_fname, config_solaris_fname:\n for config in config_fnames:\n cfg = _read_config(config)\n assert_true(all('unknown' not in block.lower() and block != ''\n for block in cfg['user_blocks']))\n\n\ndef test_crop_append():\n \"\"\"Test crop and append raw.\"\"\"\n raw = _test_raw_reader(\n read_raw_bti, pdf_fname=pdf_fnames[0],\n config_fname=config_fnames[0], head_shape_fname=hs_fnames[0])\n y, t = raw[:]\n t0, t1 = 0.25 * t[-1], 0.75 * t[-1]\n mask = (t0 <= t) * (t <= t1)\n raw_ = raw.copy().crop(t0, t1)\n y_, _ = raw_[:]\n assert_true(y_.shape[1] == mask.sum())\n assert_true(y_.shape[0] == y.shape[0])\n\n\ndef test_transforms():\n \"\"\"Test transformations.\"\"\"\n bti_trans = (0.0, 0.02, 0.11)\n bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))\n for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames):\n raw = read_raw_bti(pdf, config, hs, preload=False)\n dev_ctf_t = raw.info['dev_ctf_t']\n dev_head_t_old = raw.info['dev_head_t']\n ctf_head_t = raw.info['ctf_head_t']\n\n # 1) get BTI->Neuromag\n bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))\n\n # 2) get Neuromag->BTI head\n t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,\n 'meg', 'ctf_head')\n # 3) get Neuromag->head\n dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head')\n\n assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans'])\n\n\n@slow_test\ndef test_raw():\n \"\"\"Test bti conversion to Raw object.\"\"\"\n for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,\n exported_fnames):\n # rx = 2 if 'linux' in pdf else 0\n assert_raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False)\n assert_raises(ValueError, read_raw_bti, pdf, config, 'spam',\n preload=False)\n if op.exists(tmp_raw_fname):\n os.remove(tmp_raw_fname)\n ex = read_raw_fif(exported, preload=True)\n ra = read_raw_bti(pdf, config, hs, preload=False)\n assert_true('RawBTi' in repr(ra))\n assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])\n assert_array_almost_equal(ex.info['dev_head_t']['trans'],\n ra.info['dev_head_t']['trans'], 7)\n assert_dig_allclose(ex.info, ra.info)\n coil1, coil2 = [np.concatenate([d['loc'].flatten()\n for d in r_.info['chs'][:NCH]])\n for r_ in (ra, ex)]\n assert_array_almost_equal(coil1, coil2, 7)\n\n loc1, loc2 = [np.concatenate([d['loc'].flatten()\n for d in r_.info['chs'][:NCH]])\n for r_ in (ra, ex)]\n assert_allclose(loc1, loc2)\n\n assert_allclose(ra[:NCH][0], ex[:NCH][0])\n assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]],\n [c['range'] for c in ex.info['chs'][:NCH]])\n assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]],\n [c['cal'] for c in ex.info['chs'][:NCH]])\n assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])\n\n # check our transforms\n for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):\n if ex.info[key] is None:\n pass\n else:\n assert_true(ra.info[key] is not None)\n for ent in ('to', 'from', 'trans'):\n assert_allclose(ex.info[key][ent],\n ra.info[key][ent])\n\n ra.save(tmp_raw_fname)\n re = read_raw_fif(tmp_raw_fname)\n print(re)\n for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):\n assert_true(isinstance(re.info[key], dict))\n this_t = re.info[key]['trans']\n assert_equal(this_t.shape, (4, 4))\n # cehck that matrix by is not identity\n assert_true(not np.allclose(this_t, np.eye(4)))\n os.remove(tmp_raw_fname)\n\n\ndef test_info_no_rename_no_reorder_no_pdf():\n \"\"\"Test private renaming, reordering and partial construction option.\"\"\"\n for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):\n info, bti_info = _get_bti_info(\n pdf_fname=pdf, config_fname=config, head_shape_fname=hs,\n rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,\n ecg_ch='E31', eog_ch=('E63', 'E64'),\n rename_channels=False, sort_by_ch_name=False)\n info2, bti_info = _get_bti_info(\n pdf_fname=None, config_fname=config, head_shape_fname=hs,\n rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,\n ecg_ch='E31', eog_ch=('E63', 'E64'),\n rename_channels=False, sort_by_ch_name=False)\n\n assert_equal(info['ch_names'],\n [ch['ch_name'] for ch in info['chs']])\n assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5],\n ['A22', 'A2', 'A104', 'A241', 'A138'])\n assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:],\n ['A133', 'A158', 'A44', 'A134', 'A216'])\n\n info = pick_info(info, pick_types(info, meg=True, stim=True,\n resp=True))\n info2 = pick_info(info2, pick_types(info2, meg=True, stim=True,\n resp=True))\n\n assert_true(info['sfreq'] is not None)\n assert_true(info['lowpass'] is not None)\n assert_true(info['highpass'] is not None)\n assert_true(info['meas_date'] is not None)\n\n assert_equal(info2['sfreq'], None)\n assert_equal(info2['lowpass'], None)\n assert_equal(info2['highpass'], None)\n assert_equal(info2['meas_date'], None)\n\n assert_equal(info['ch_names'], info2['ch_names'])\n assert_equal(info['ch_names'], info2['ch_names'])\n for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']:\n assert_array_equal(info[key]['trans'], info2[key]['trans'])\n\n assert_array_equal(\n np.array([ch['loc'] for ch in info['chs']]),\n np.array([ch['loc'] for ch in info2['chs']]))\n\n # just check reading data | corner case\n raw1 = read_raw_bti(\n pdf_fname=pdf, config_fname=config, head_shape_fname=None,\n sort_by_ch_name=False, preload=True)\n # just check reading data | corner case\n raw2 = read_raw_bti(\n pdf_fname=pdf, config_fname=config, head_shape_fname=None,\n rename_channels=False,\n sort_by_ch_name=True, preload=True)\n\n sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels]\n raw1._data = raw1._data[sort_idx]\n assert_array_equal(raw1._data, raw2._data)\n assert_array_equal(raw2.bti_ch_labels, raw2.ch_names)\n\n\ndef test_no_conversion():\n \"\"\"Test bti no-conversion option.\"\"\"\n get_info = partial(\n _get_bti_info,\n rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,\n ecg_ch='E31', eog_ch=('E63', 'E64'),\n rename_channels=False, sort_by_ch_name=False)\n\n for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):\n raw_info, _ = get_info(pdf, config, hs, convert=False)\n raw_info_con = read_raw_bti(\n pdf_fname=pdf, config_fname=config, head_shape_fname=hs,\n convert=True, preload=False).info\n\n pick_info(raw_info_con,\n pick_types(raw_info_con, meg=True, ref_meg=True),\n copy=False)\n pick_info(raw_info,\n pick_types(raw_info, meg=True, ref_meg=True), copy=False)\n bti_info = _read_bti_header(pdf, config)\n dev_ctf_t = _correct_trans(bti_info['bti_transform'][0])\n assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans'])\n assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4))\n assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4))\n dig, t = _process_bti_headshape(hs, convert=False, use_hpi=False)\n assert_array_equal(t['trans'], np.eye(4))\n\n for ii, (old, new, con) in enumerate(zip(\n dig, raw_info['dig'], raw_info_con['dig'])):\n assert_equal(old['ident'], new['ident'])\n assert_array_equal(old['r'], new['r'])\n assert_true(not np.allclose(old['r'], con['r']))\n\n if ii > 10:\n break\n\n ch_map = dict((ch['chan_label'],\n ch['loc']) for ch in bti_info['chs'])\n\n for ii, ch_label in enumerate(raw_info['ch_names']):\n if not ch_label.startswith('A'):\n continue\n t1 = ch_map[ch_label] # correction already performed in bti_info\n t2 = raw_info['chs'][ii]['loc']\n t3 = raw_info_con['chs'][ii]['loc']\n assert_allclose(t1, t2, atol=1e-15)\n assert_true(not np.allclose(t1, t3))\n idx_a = raw_info_con['ch_names'].index('MEG 001')\n idx_b = raw_info['ch_names'].index('A22')\n assert_equal(\n raw_info_con['chs'][idx_a]['coord_frame'],\n FIFF.FIFFV_COORD_DEVICE)\n assert_equal(\n raw_info['chs'][idx_b]['coord_frame'],\n FIFF.FIFFV_MNE_COORD_4D_HEAD)\n\n\ndef test_bytes_io():\n \"\"\"Test bti bytes-io API.\"\"\"\n for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):\n raw = read_raw_bti(pdf, config, hs, convert=True, preload=False)\n\n with open(pdf, 'rb') as fid:\n pdf = six.BytesIO(fid.read())\n with open(config, 'rb') as fid:\n config = six.BytesIO(fid.read())\n with open(hs, 'rb') as fid:\n hs = six.BytesIO(fid.read())\n\n raw2 = read_raw_bti(pdf, config, hs, convert=True, preload=False)\n repr(raw2)\n assert_array_equal(raw[:][0], raw2[:][0])\n\n\ndef test_setup_headshape():\n \"\"\"Test reading bti headshape.\"\"\"\n for hs in hs_fnames:\n dig, t = _process_bti_headshape(hs)\n expected = set(['kind', 'ident', 'r'])\n found = set(reduce(lambda x, y: list(x) + list(y),\n [d.keys() for d in dig]))\n assert_true(not expected - found)\n\nrun_tests_if_main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 30 13:06:52 2017\n\n@author: Mostafa Hammoud\nThis script contains various functions that will be used\n\"\"\"\n\nimport numpy as np\nimport operator\nfrom os import listdir\nfrom os.path import isfile, join\nfrom matplotlib import pyplot as plt\n\"\"\"\nThis function takes the R waves that were detected and makes sure they are good \nR waves\n\"\"\"\ndef fix_r(peaks,long_filt_ecg):\n # Loop through the peaks \n for i in range (0,len(peaks)):\n # Select a range close to the peaks to check the maximum\n temp_range = np.arange(peaks[i] - 10,peaks[i] + 10)\n \n # If we have a negative R wave we don't want to confuze it with a \n # significant S wave\n if long_filt_ecg[peaks[i]] < 0:\n temp_range = np.arange(peaks[i] - 15,peaks[i] + 10)\n # Make sure the range does not go out of bound\n if temp_range[-1] >= len(long_filt_ecg):\n temp_range = np.arange(peaks[i] - 6,len(long_filt_ecg) - 1)\n # Find the maximum\n max_index, max_value = max(enumerate(long_filt_ecg[temp_range]), \n key=operator.itemgetter(1))\n # Set the peak to the maximum that was found\n peaks[i] = max_index + temp_range[0]\n \n return peaks\n \n\"\"\"\nThis function takes an uper limit and a lower limit and removes all values that\nare not in that range\nupper: Upper limit\nlower: Lower limit\nequal: Boolean to select if the limits are included. Default is True\n\n\"\"\" \ndef remove_unwanted(array, upper, lower, equal = True):\n \n out = []\n \n for i in array:\n # i can be []\n if np.size(i) > 0:\n if equal is True:\n if (i >= lower and i <= upper):\n out.append(i)\n else:\n if (i > lower and i < upper):\n out.append(i) \n \n # Select the peaks that are inside the range start to end\n array = out\n \n return array\n\"\"\"\nThis function returns the an array with the path to all three ECGs\n\"\"\"\ndef get_ecg_path(file_name):\n \n ecg_path = [f for f in listdir(file_name) if isfile(join(file_name,f))]\n \n j = 0\n for i in ecg_path:\n \n if 'ECG_I.csv' in i or 'ECG_II.csv' in i or 'ECG_III.csv' in i:\n ecg_path[j] = file_name + '\\\\' + i\n j += 1\n \n return ecg_path\n\"\"\"\nThis function calculates the derivative\n\"\"\"\ndef derivative(y, fs):\n \n if len(y) < 2:\n return 0\n \n dy = np.zeros(len(y),np.float)\n dx = 1/fs\n \n dy[0:-1] = np.diff(y)/dx\n \n dy[-1] = (y[-1] - y[-2])/dx\n \n return dy\n\n\"\"\"\nThis function plots the points on the figure\n\"\"\"\ndef plot_points(long_ecg, index, long_time, start, end):\n \n r = remove_unwanted(index.r, end, start) \n \n if (len(r)):\n \n p = remove_unwanted(index.p, end, start) \n \n q = remove_unwanted(index.q, end, start) \n\n s = remove_unwanted(index.s, end, start) \n\n t = remove_unwanted(index.t, end, start) \n \n pon = remove_unwanted(index.pon , end, start) \n poff = remove_unwanted(index.poff, end, start) \n qon = remove_unwanted(index.qon , end, start) \n soff = remove_unwanted(index.soff, end, start) \n ton = remove_unwanted(index.ton , end, start) \n toff = remove_unwanted(index.toff, end, start) \n \n plt.scatter(long_time[r],long_ecg[r], color = 'red')\n plt.scatter(long_time[p],long_ecg[p], color = 'blue')\n plt.scatter(long_time[q],long_ecg[q], color = 'green')\n plt.scatter(long_time[s],long_ecg[s], color = 'magenta')\n plt.scatter(long_time[t],long_ecg[t], color = 'black')\n \n #plt.scatter(long_time[pon],long_ecg[pon])\n #plt.scatter(long_time[poff],long_ecg[poff])\n #plt.scatter(long_time[qon],long_ecg[qon])\n #plt.scatter(long_time[soff],long_ecg[soff])\n #plt.scatter(long_time[ton],long_ecg[ton])\n #plt.scatter(long_time[toff],long_ecg[toff])\n\n\"\"\"\n\nThis function loops thorugh the index from start to end and fixes the location \nof the peaks or troughs. Since sometimes we might have a small shift in their \nlocation\n\nIf the start and end were not indicated then we loop through the whole index\n\nIf \n\n\"\"\"\ndef fix_peaks(long_ecg, wave_index, start_index, end_index, peak = False, \n trough = False):\n # If we are dealing with a peak, then we keep the ecg as it is\n if peak:\n ecg = long_ecg.copy()\n # If we are dealng with a trough, then we inverse the ecg so the min is max\n elif trough:\n ecg = -long_ecg.copy() \n else:\n print('A type must be selected')\n return\n\n for i in range(start_index, end_index):\n if np.size(wave_index[i]) > 0:\n temp_range = np.arange(wave_index[i] - 2,wave_index[i] + 3)\n \n if temp_range[-1] >= len(ecg):\n temp_range = np.arange(wave_index[i] - 2,len(ecg) - 1)\n \n # Find the maximum\n max_index, max_value = max(enumerate(ecg[temp_range]), \n key=operator.itemgetter(1))\n \n # Set the peak to the maximum that was found\n wave_index[i] = max_index + temp_range[0]\n\n return wave_index" ]
[ [ "numpy.eye", "numpy.allclose", "numpy.testing.assert_array_equal", "numpy.testing.assert_array_almost_equal", "numpy.testing.assert_allclose", "numpy.array" ], [ "numpy.arange", "numpy.size", "matplotlib.pyplot.scatter", "numpy.diff" ] ]
sky-2002/weaviate-examples
[ "34bb73f8d2096e04aaf19455f2f8da743f21b8d5" ]
[ "attendance-system-example/mark.py" ]
[ "import weaviate\nimport cv2\nimport os,sys\nimport pandas as pd\nfrom student_test import getFaces, testImage, testit\n\ndef markAttendance(faces,own=False):\n '''\n This function takes in a list of image paths (paths of face images)\n and then uses weaviate's image2vec-neural module to classify\n each image depending upon the images initially uploaded to weaviate.\n It then classifies the image (ie identifies the student) and marks their attendance.\n '''\n data = pd.DataFrame(columns=['Name','Present'])\n for img in faces:\n \n if own:\n name = testit(img)\n else:\n name = testImage({\"image\":\"{}\".format(img)})\n #print(name)\n dict = {\"Name\":name,\"Present\":'P'}\n data = data.append(dict,ignore_index=True)\n print(data)\n return data" ]
[ [ "pandas.DataFrame" ] ]
ufpa-organization-repositories/artificial-neural-networks
[ "bfd3e62f9f8c353a1d3ca798928a7db5a6aabc50" ]
[ "final_homework/aplicacaoKohonen_nDeTecnologias.py" ]
[ "from openpyxl import load_workbook\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# book = load_workbook('results_teste.xlsx')\nbook = load_workbook('results_pleno_completo.xlsx')\nsheet = book.active\nfrom trabalho_final.kohonen import kohonen\n\ni = 2\nend = False\n\njobs_dict = dict()\n\nwhile not end:\n if not sheet['A' + str(i)].value == None:\n # jobs_dict['A' + str(i)] = [int(sheet['A' + str(i)].value[2:].replace('.', '')), []]\n jobs_dict['A' + str(i)] = [float(sheet['A' + str(i)].value[2:].replace('.', '')) / 1000, []]\n j = 1\n\n while not sheet[chr(ord(chr(ord('B') + j))) + str(i)].value == None:\n jobs_dict['A' + str(i)][1].append(sheet[chr(ord(chr(ord('B') + j))) + str(i)].value)\n j += 1\n\n i += 1\n else:\n end = True\n\n\ninitial_SIGMA = 1\ninitial_ALPHA = 0.2\n\nn_epocas = 1\n\nSIGMA_array = np.linspace(initial_SIGMA, 0.1, n_epocas)\nALPHA_array = np.linspace(initial_ALPHA, 0.1, n_epocas)\n\nw_arrays = np.zeros((1000, 2))\n\nfor index, array in enumerate(w_arrays):\n # print(index, array)\n # w_arrays[index] = [np.random.randint(1000, 10000), np.random.random() * 2]\n w_arrays[index] = [np.random.random() * 10, np.random.random() * 10]\n\n\nx = [array[1] for array in w_arrays]\ny = [array[0] for array in w_arrays]\n# plt.scatter(x=x, y=y, label='Pesos iniciais', color='black')\nplt.scatter(x=x, y=y, label='Initial weights', color='gray', edgecolors='black', marker='_')\nplt.legend()\n\nplt.title('number of epochs: ' + str(n_epocas))\nplt.xlabel('number of required technologies')\nplt.ylabel('Salary (x2000)')\nplt.savefig('nTechnologies_initial')\nplt.show()\nplt.close()\n\nentry = np.array([])\n\ncurrent_job = 0\nfor epoca in range(n_epocas):\n print('epoca: ', epoca)\n for key, value in jobs_dict.items():\n print(key, jobs_dict.__len__())\n # print('\\n' + '-' * 30)\n # print(key, value)\n temp_atributes = []\n\n entry = np.array([jobs_dict[key][0], value[1].__len__()])\n\n # w_arrays = kohonen(w_arrays=w_arrays, entry=entry, SIGMA=initial_SIGMA, ALPHA=initial_ALPHA, n_epocas=1)\n w_arrays = kohonen(w_arrays=w_arrays, entry=entry, SIGMA=SIGMA_array[epoca], ALPHA=ALPHA_array[epoca], n_epocas=1)\n\n\n current_job += 1\n\n current_job = 0\n\nprint(w_arrays)\nx = [array[1] for array in w_arrays]\ny = [array[0] for array in w_arrays]\nplt.scatter(x=x, y=y, label='final weights', color='gray', edgecolors='black', marker='_')\nplt.legend()\n\nplt.title('number of epochs: ' + str(n_epocas))\nplt.xlabel('number of required technologies')\nplt.ylabel('Salary (x2000)')\n\nplt.savefig('ntechnologies_final.png')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.zeros", "matplotlib.pyplot.savefig", "numpy.random.random", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "numpy.array", "numpy.linspace", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
OliverZijia/tensorlayer2
[ "01113b53e84a3bbb298b9c35ebd53254e487350f" ]
[ "tests/layers/test_layers_convolution.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\nfrom tensorlayer.models import *\n\nfrom tests.utils import CustomTestCase\n\n\nclass Layer_Convolution_1D_Test(CustomTestCase):\n\n @classmethod\n def setUpClass(cls):\n print(\"\\n#################################\")\n\n cls.batch_size = 8\n cls.inputs_shape = [cls.batch_size, 100, 1]\n cls.input_layer = Input(cls.inputs_shape, name='input_layer')\n\n cls.n1 = tl.layers.Conv1dLayer(\n shape=(5, 1, 32), stride=2\n )(cls.input_layer)\n\n cls.n2 = tl.layers.Conv1d(\n n_filter=32, filter_size=5, stride=2\n )(cls.n1)\n\n cls.n3 = tl.layers.DeConv1dLayer(\n shape=(5, 64, 32), outputs_shape=(cls.batch_size, 50, 64), strides=(1, 2, 1), name='deconv1dlayer'\n )(cls.n2)\n\n cls.n4 = tl.layers.SeparableConv1d(\n n_filter=32, filter_size=3, strides=2, padding='SAME', act=tf.nn.relu, name='separable_1d'\n )(cls.n3)\n\n cls.n5 = tl.layers.SubpixelConv1d(\n scale=2, act=tf.nn.relu, in_channels=32, name='subpixel_1d'\n )(cls.n4)\n\n cls.model = Model(\n inputs=cls.input_layer, outputs=cls.n5\n )\n print(\"Testing Conv1d model: \\n\", cls.model)\n\n @classmethod\n def tearDownClass(cls):\n pass\n # tf.reset_default_graph()\n\n def test_layer_n1(self):\n\n # self.assertEqual(len(self.n1.all_layers), 2)\n # self.assertEqual(len(self.n1.all_params), 2)\n # self.assertEqual(self.n1.count_params(), 192)\n self.assertEqual(len(self.n1._info[0].layer.weights), 2)\n self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32])\n\n def test_layer_n2(self):\n\n # self.assertEqual(len(self.n2.all_layers), 3)\n # self.assertEqual(len(self.n2.all_params), 4)\n # self.assertEqual(self.n2.count_params(), 5344)\n self.assertEqual(len(self.n2._info[0].layer.weights), 2)\n self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32])\n\n def test_layer_n3(self):\n\n # self.assertEqual(len(self.n2.all_layers), 3)\n # self.assertEqual(len(self.n2.all_params), 4)\n # self.assertEqual(self.n2.count_params(), 5344)\n self.assertEqual(len(self.n3._info[0].layer.weights), 2)\n self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 64])\n\n def test_layer_n4(self):\n\n # self.assertEqual(len(self.n2.all_layers), 3)\n # self.assertEqual(len(self.n2.all_params), 4)\n # self.assertEqual(self.n2.count_params(), 5344)\n self.assertEqual(len(self.n4._info[0].layer.weights), 3)\n self.assertEqual(self.n4.get_shape().as_list()[1:], [25, 32])\n\n def test_layer_n5(self):\n\n # self.assertEqual(len(self.n2.all_layers), 3)\n # self.assertEqual(len(self.n2.all_params), 4)\n # self.assertEqual(self.n2.count_params(), 5344)\n self.assertEqual(self.n5.get_shape().as_list()[1:], [50, 16])\n\n # def test_layer_n3(self):\n #\n # self.assertEqual(len(self.n3.all_layers), 4)\n # self.assertEqual(len(self.n3.all_params), 7)\n # self.assertEqual(self.n3.count_params(), 6496)\n # self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32])\n\n# FIXME: TF2.0 only supports NHWC now\n# class Layer_Convolution_1D_NCW_Test(CustomTestCase):\n#\n# @classmethod\n# def setUpClass(cls):\n# print(\"\\n#################################\")\n#\n# cls.batch_size = 8\n# cls.inputs_shape = [cls.batch_size, 1, 100]\n# cls.input_layer = Input(cls.inputs_shape, name='input_layer')\n#\n# cls.n1 = tl.layers.Conv1dLayer(\n# shape=(5, 1, 32), stride=2, data_format=\"NCW\"\n# )(cls.input_layer)\n# cls.n2 = tl.layers.Conv1d(\n# n_filter=32, filter_size=5, stride=2, data_format='channels_first'\n# )(cls.n1)\n# cls.model = Model(inputs=cls.input_layer, outputs=cls.n2)\n# print(\"Testing Conv1d model: \\n\", cls.model)\n#\n# # cls.n3 = tl.layers.SeparableConv1d(\n# # cls.n2, n_filter=32, filter_size=3, strides=1, padding='VALID', act=tf.nn.relu, name='separable_1d'\n# # )\n#\n# @classmethod\n# def tearDownClass(cls):\n# pass\n# # tf.reset_default_graph()\n#\n# def test_layer_n1(self):\n#\n# # self.assertEqual(len(self.n1.all_layers), 2)\n# # self.assertEqual(len(self.n1.all_params), 2)\n# # self.assertEqual(self.n1.count_params(), 192)\n# self.assertEqual(len(self.n1._info[0].layer.weights), 2)\n# self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32])\n#\n# def test_layer_n2(self):\n#\n# # self.assertEqual(len(self.n2.all_layers), 3)\n# # self.assertEqual(len(self.n2.all_params), 4)\n# # self.assertEqual(self.n2.count_params(), 5344)\n# self.assertEqual(len(self.n2._info[0].layer.weights), 2)\n# self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32])\n#\n# # def test_layer_n3(self):\n# #\n# # self.assertEqual(len(self.n3.all_layers), 4)\n# # self.assertEqual(len(self.n3.all_params), 7)\n# # self.assertEqual(self.n3.count_params(), 6496)\n# # self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32])\n\n\nclass Layer_Convolution_2D_Test(CustomTestCase):\n\n @classmethod\n def setUpClass(cls):\n print(\"\\n#################################\")\n\n cls.batch_size = 5\n cls.inputs_shape = [cls.batch_size, 400, 400, 3]\n cls.input_layer = Input(cls.inputs_shape, name='input_layer')\n\n cls.n1 = tl.layers.Conv2dLayer(\n act=tf.nn.relu, shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME',\n b_init=tf.constant_initializer(value=0.0),\n name='conv2dlayer'\n )(cls.input_layer)\n\n cls.n2 = tl.layers.Conv2d(\n n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d'\n )(cls.n1)\n\n cls.n3 = tl.layers.Conv2d(\n n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias'\n )(cls.n2)\n\n cls.n4 = tl.layers.DeConv2dLayer(\n shape=(5, 5, 32, 32), outputs_shape=(cls.batch_size, 100, 100, 32), strides=(1, 2, 2, 1), name='deconv2dlayer'\n )(cls.n3)\n\n cls.n5 = tl.layers.DeConv2d(\n n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d'\n )(cls.n4)\n\n cls.n6 = tl.layers.DepthwiseConv2d(\n filter_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise'\n )(cls.n5)\n\n cls.n7 = tl.layers.Conv2d(\n n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=64, name='conv2d2'\n )(cls.n6)\n\n cls.n8 = tl.layers.BinaryConv2d(\n n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=32, name='binaryconv2d'\n )(cls.n7)\n\n cls.n9 = tl.layers.SeparableConv2d(\n n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='separableconv2d'\n )(cls.n8)\n\n cls.n10 = tl.layers.GroupConv2d(\n n_filter=64, filter_size=(3, 3), strides=(2, 2), n_group=2, name='group'\n )(cls.n9)\n\n cls.n11 = tl.layers.DorefaConv2d(\n n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='dorefaconv2d'\n )(cls.n10)\n\n cls.n12 = tl.layers.TernaryConv2d(\n n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d'\n )(cls.n11)\n\n cls.n13 = tl.layers.QuanConv2d(\n n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d'\n )(cls.n12)\n\n cls.n14 = tl.layers.SubpixelConv2d(\n scale=2, act=tf.nn.relu, name='subpixelconv2d'\n )(cls.n13)\n\n cls.model = Model(cls.input_layer, cls.n14)\n print(\"Testing Conv2d model: \\n\", cls.model)\n\n # cls.n12 = tl.layers.QuanConv2d(cls.n11, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='quancnn')\n\n @classmethod\n def tearDownClass(cls):\n pass\n # tf.reset_default_graph()\n\n def test_layer_n1(self):\n\n # self.assertEqual(len(self.n1.all_layers), 2)\n # self.assertEqual(len(self.n1.all_params), 2)\n # self.assertEqual(self.n1.count_params(), 2432)\n self.assertEqual(len(self.n1._info[0].layer.weights), 2)\n self.assertEqual(self.n1.get_shape().as_list()[1:], [200, 200, 32])\n\n def test_layer_n2(self):\n\n # self.assertEqual(len(self.n2.all_layers), 3)\n # self.assertEqual(len(self.n2.all_params), 4)\n # self.assertEqual(self.n2.count_params(), 11680)\n self.assertEqual(len(self.n2._info[0].layer.weights), 2)\n self.assertEqual(self.n2.get_shape().as_list()[1:], [100, 100, 32])\n\n def test_layer_n3(self):\n\n # self.assertEqual(len(self.n3.all_layers), 4)\n # self.assertEqual(len(self.n3.all_params), 5)\n # self.assertEqual(self.n3.count_params(), 20896)\n self.assertEqual(len(self.n3._info[0].layer.weights), 1) # b_init is None\n self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 50, 32])\n\n def test_layer_n4(self):\n\n # self.assertEqual(len(self.n4.all_layers), 5)\n # self.assertEqual(len(self.n4.all_params), 7)\n # self.assertEqual(self.n4.count_params(), 46528)\n self.assertEqual(len(self.n4._info[0].layer.weights), 2)\n self.assertEqual(self.n4.get_shape().as_list()[1:], [100, 100, 32])\n\n def test_layer_n5(self):\n\n # self.assertEqual(len(self.n5.all_layers), 6)\n # self.assertEqual(len(self.n5.all_params), 9)\n # self.assertEqual(self.n5.count_params(), 55776)\n self.assertEqual(len(self.n5._info[0].layer.weights), 2)\n self.assertEqual(self.n5.get_shape().as_list()[1:], [200, 200, 32])\n\n def test_layer_n6(self):\n\n # self.assertEqual(len(self.n6.all_layers), 7)\n # self.assertEqual(len(self.n6.all_params), 11)\n # self.assertEqual(self.n6.count_params(), 56416)\n self.assertEqual(len(self.n6._info[0].layer.weights), 2)\n self.assertEqual(self.n6.get_shape().as_list()[1:], [200, 200, 64])\n\n def test_layer_n7(self):\n\n # self.assertEqual(len(self.n7.all_layers), 8)\n # self.assertEqual(len(self.n7.all_params), 13)\n # self.assertEqual(self.n7.count_params(), 74880)\n self.assertEqual(len(self.n7._info[0].layer.weights), 2)\n self.assertEqual(self.n7.get_shape().as_list()[1:], [100, 100, 32])\n\n def test_layer_n8(self):\n\n # self.assertEqual(len(self.n7.all_layers), 8)\n # self.assertEqual(len(self.n7.all_params), 13)\n # self.assertEqual(self.n7.count_params(), 74880)\n self.assertEqual(len(self.n8._info[0].layer.weights), 2)\n self.assertEqual(self.n8.get_shape().as_list()[1:], [50, 50, 64])\n\n def test_layer_n9(self):\n\n # self.assertEqual(len(self.n7.all_layers), 8)\n # self.assertEqual(len(self.n7.all_params), 13)\n # self.assertEqual(self.n7.count_params(), 74880)\n self.assertEqual(len(self.n9._info[0].layer.weights), 3)\n self.assertEqual(self.n9.get_shape().as_list()[1:], [24, 24, 32])\n\n def test_layer_n10(self):\n # self.assertEqual(len(self.n7.all_layers), 8)\n # self.assertEqual(len(self.n7.all_params), 13)\n # self.assertEqual(self.n7.count_params(), 74880)\n self.assertEqual(len(self.n10._info[0].layer.weights), 2)\n self.assertEqual(self.n10.get_shape().as_list()[1:], [12, 12, 64])\n\n def test_layer_n11(self):\n # self.assertEqual(len(self.n7.all_layers), 8)\n # self.assertEqual(len(self.n7.all_params), 13)\n # self.assertEqual(self.n7.count_params(), 74880)\n self.assertEqual(len(self.n11._info[0].layer.weights), 2)\n self.assertEqual(self.n11.get_shape().as_list()[1:], [12, 12, 32])\n\n def test_layer_n12(self):\n # self.assertEqual(len(self.n7.all_layers), 8)\n # self.assertEqual(len(self.n7.all_params), 13)\n # self.assertEqual(self.n7.count_params(), 74880)\n self.assertEqual(len(self.n12._info[0].layer.weights), 2)\n self.assertEqual(self.n12.get_shape().as_list()[1:], [12, 12, 64])\n\n def test_layer_n13(self):\n # self.assertEqual(len(self.n7.all_layers), 8)\n # self.assertEqual(len(self.n7.all_params), 13)\n # self.assertEqual(self.n7.count_params(), 74880)\n self.assertEqual(len(self.n13._info[0].layer.weights), 2)\n self.assertEqual(self.n13.get_shape().as_list()[1:], [12, 12, 32])\n\n def test_layer_n14(self):\n self.assertEqual(self.n14.get_shape().as_list()[1:], [24, 24, 8])\n\n # def test_layer_n8(self):\n #\n # self.assertEqual(len(self.n8.all_layers), 9)\n # self.assertEqual(len(self.n8.all_params), 15)\n # self.assertEqual(self.n8.count_params(), 79520)\n # self.assertEqual(self.n8.outputs.get_shape().as_list()[1:], [50, 50, 32])\n #\n # def test_layer_n9(self):\n #\n # self.assertEqual(len(self.n9.all_layers), 10)\n # self.assertEqual(len(self.n9.all_params), 18)\n # self.assertEqual(self.n9.count_params(), 80864)\n # self.assertEqual(self.n9.outputs.get_shape().as_list()[1:], [48, 48, 32])\n #\n # def test_layer_n10(self):\n #\n # self.assertEqual(len(self.n10.all_layers), 11)\n # self.assertEqual(len(self.n10.all_params), 20)\n # self.assertEqual(self.n10.count_params(), 132128)\n # self.assertEqual(self.n10.outputs.get_shape().as_list()[1:], [48, 48, 64])\n #\n # def test_layer_n11(self):\n #\n # self.assertEqual(len(self.n11.all_layers), 12)\n # self.assertEqual(len(self.n11.all_params), 22)\n # self.assertEqual(self.n11.count_params(), 150592)\n # self.assertEqual(self.n11.outputs.get_shape().as_list()[1:], [96, 96, 32])\n #\n # def test_layer_n12(self):\n #\n # self.assertEqual(len(self.n12.all_layers), 13)\n # self.assertEqual(len(self.n12.all_params), 24)\n # self.assertEqual(self.n12.count_params(), 201856)\n # self.assertEqual(self.n12.outputs.get_shape().as_list()[1:], [96, 96, 64])\n\n\nclass Layer_Convolution_3D_Test(CustomTestCase):\n\n @classmethod\n def setUpClass(cls):\n print(\"\\n#################################\")\n\n cls.batch_size = 5\n cls.inputs_shape = [cls.batch_size, 20, 20, 20, 3]\n cls.input_layer = Input(cls.inputs_shape, name='input_layer')\n\n cls.n1 = tl.layers.Conv3dLayer(\n shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1)\n )(cls.input_layer)\n\n cls.n2 = tl.layers.DeConv3dLayer(\n shape=(2, 2, 2, 128, 32), outputs_shape=(cls.batch_size, 20, 20, 20, 128), strides=(1, 2, 2, 2, 1)\n )(cls.n1)\n\n cls.n3 = tl.layers.Conv3d(\n n_filter=64, filter_size=(3, 3, 3), strides=(3, 3, 3), act=tf.nn.relu, b_init=None, in_channels=128, name='conv3d_no_bias'\n )(cls.n2)\n\n cls.n4 = tl.layers.DeConv3d(\n n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2)\n )(cls.n3)\n\n cls.model = Model(inputs=cls.input_layer, outputs=cls.n4)\n print(\"Testing Conv3d model: \\n\", cls.model)\n\n @classmethod\n def tearDownClass(cls):\n pass\n # tf.reset_default_graph()\n\n def test_layer_n1(self):\n\n # self.assertEqual(len(self.n1.all_layers), 2)\n # self.assertEqual(len(self.n1.all_params), 2)\n # self.assertEqual(self.n1.count_params(), 800)\n self.assertEqual(len(self.n1._info[0].layer.weights), 2)\n self.assertEqual(self.n1.get_shape().as_list()[1:], [10, 10, 10, 32])\n\n def test_layer_n2(self):\n\n # self.assertEqual(len(self.n2.all_layers), 3)\n # self.assertEqual(len(self.n2.all_params), 4)\n # self.assertEqual(self.n2.count_params(), 33696)\n self.assertEqual(len(self.n2._info[0].layer.weights), 2)\n self.assertEqual(self.n2.get_shape().as_list()[1:], [20, 20, 20, 128])\n\n def test_layer_n3(self):\n\n # self.assertEqual(len(self.n3.all_layers), 4)\n # self.assertEqual(len(self.n3.all_params), 6)\n # self.assertEqual(self.n3.count_params(), 144320)\n self.assertEqual(len(self.n3._info[0].layer.weights), 1) # b_init is None\n self.assertEqual(self.n3.get_shape().as_list()[1:], [7, 7, 7, 64])\n\n def test_layer_n4(self):\n\n # self.assertEqual(len(self.n3.all_layers), 4)\n # self.assertEqual(len(self.n3.all_params), 6)\n # self.assertEqual(self.n3.count_params(), 144320)\n self.assertEqual(len(self.n4._info[0].layer.weights), 2)\n self.assertEqual(self.n4.get_shape().as_list()[1:], [14, 14, 14, 32])\n\n# class Layer_DeformableConvolution_Test(CustomTestCase):\n#\n# @classmethod\n# def setUpClass(cls):\n#\n# cls.batch_size = 5\n# cls.inputs_shape = [cls.batch_size, 299, 299, 3]\n# cls.input_layer = Input(cls.inputs_shape, name='input_layer')\n#\n# offset1 = tl.layers.Conv2d(\n# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset1'\n# )(cls.input_layer)\n# cls.net1 = tl.layers.DeformableConv2d(\n# offset1, 32, (3, 3), act=tf.nn.relu, name='deformable1'\n# )(cls.input_layer)\n#\n# offset2 = tl.layers.Conv2d(\n# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset2'\n# )(cls.net1)\n# cls.net2 = tl.layers.DeformableConv2d(\n# offset2, 64, (3, 3), act=tf.nn.relu, name='deformable2'\n# )(cls.net1)\n#\n# @classmethod\n# def tearDownClass(cls):\n# pass\n#\n# def test_layer_n1(self):\n#\n# self.assertEqual(len(self.net1.all_layers), 2)\n# self.assertEqual(len(self.net1.all_params), 2)\n# self.assertEqual(self.net1.count_params(), 896)\n# self.assertEqual(self.net1.outputs.get_shape().as_list()[1:], [299, 299, 32])\n#\n# def test_layer_n2(self):\n#\n# self.assertEqual(len(self.net2.all_layers), 3)\n# self.assertEqual(len(self.net2.all_params), 4)\n# self.assertEqual(self.net2.count_params(), 19392)\n# self.assertEqual(self.net2.outputs.get_shape().as_list()[1:], [299, 299, 64])\n\n\nif __name__ == '__main__':\n\n tl.logging.set_verbosity(tl.logging.DEBUG)\n\n unittest.main()\n" ]
[ [ "tensorflow.constant_initializer" ] ]
RajanPatel97/FYP
[ "81ca4a6782c1205e1313da280ee5f5cdeb4f19f7" ]
[ "Embeddings/Char-CNN-RNN/generate_cnn_features.py" ]
[ "import os \nimport csv\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport pickle\nimport numpy as np\ndata_folder = '/media/user/DATA/ArtImages'\n\n\nnet = torchvision.models.resnet50(pretrained=True)\nnet = nn.Sequential(*(list(net.children())[:-2])) \n\nnet.eval()\nnet.cuda()\n\n\n\nfiles = []\nwith open(os.path.join(data_folder, 'new_info.csv'), 'r') as file:\n\treader = csv.reader(file, delimiter='|')\n\theader = next(reader)\n\tfor k, row in enumerate(reader):\n\t\tfiles.append([k, row[0]])\nlen_data = len(files)\nbatch_size = 36\n\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\nt = transforms.Compose([transforms.Resize(512), transforms.TenCrop(224), \n\ttransforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))])\nout_folder = './images/'\nif not os.path.exists(out_folder):\n\tos.makedirs(out_folder)\nmax_file_length = 40\nmanifest = open(os.path.join('./art_data', 'manifest.txt'), 'w')\nfor k, f in files:\n\timg = t(Image.open(os.path.join(data_folder, f)).convert('RGB')).cuda()\n\twith torch.no_grad():\n\t\tfeat = net(img).mean(-1).mean(-1)\n\tfeat = feat.permute(1,0).unsqueeze(0)\n\tfile_name = str(k+1).zfill(5)+'.'+f.replace('/', '_')[:-4]\n\tout_file_name = os.path.join(out_folder, file_name[:max_file_length]+'.npy')\n\tnp.save(out_file_name, feat.cpu().numpy())\n\tmanifest.write(file_name[:max_file_length]+'.t7'+'\\n')\n\t\n" ]
[ [ "torch.no_grad" ] ]
CodeRevenge/practicas_ia_2
[ "b81e3b68680b61785918b19360cb0afc5b14c26e" ]
[ "Practica06_Clustering/codigoFuente/Algorithms/RBF.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass RBF(object):\n \"\"\"Implementation of a Radial Basis Function Network\"\"\"\n def __init__(self, hidden_neurons=2, learning_rate=0.01, max_ephochs=100, min_error = 0.01):\n self.hidden_neurons = hidden_neurons\n self.learning_rate = learning_rate\n self.max_ephochs = max_ephochs\n self.min_eror = min_error\n \n self.w = np.random.randn(hidden_neurons)\n self.bias = np.random.randn(1)\n\n self.errors = []\n\n def fit(self, X, y):\n # Training hidden layer\n self.centroids, self.std_dev = self.kmeans(X, self.hidden_neurons)\n\n # Training output layer\n acumulated_error = 999\n errors = []\n ephoc = 0\n while ephoc < self.max_ephochs:\n self.acumulated_outputs = []\n for i in range(X.shape[0]):\n outputs_rbf = np.array([self.gaussian(X[i], c, s) for c, s, in zip(self.centroids, self.std_dev)])\n net = outputs_rbf.T.dot(self.w) + self.bias\n self.acumulated_outputs.append(net)\n\n \n error = -(y[i] - net).flatten()\n errors.append(error)\n \n self.w = self.w - self.learning_rate * error * outputs_rbf\n self.bias = self.bias - self.learning_rate * error\n acumulated_error = (np.sum(abs((np.array(y) - np.array(self.acumulated_outputs))))) / (len(y)**2)\n self.errors.append(acumulated_error)\n ephoc += 1\n\n def predict(self, X):\n y_pred = []\n for i in range(X.shape[0]):\n outputs_rbf = np.array([self.gaussian(X[i], c, s) for c, s, in zip(self.centroids, self.std_dev)])\n net = outputs_rbf.T.dot(self.w) + self.bias\n y_pred.append(net)\n return np.array(y_pred)\n\n def gaussian(self, x, c, s):\n return np.exp(-1 / (2 * s**2) * (x-c)**2)\n\n def kmeans(self, X, hidden_neurons):\n '''\n Clustering\n '''\n # Choice random elements\n clusters = np.random.choice(np.squeeze(X), size=hidden_neurons)\n prev_clusters = clusters.copy()\n std_dev = np.zeros(hidden_neurons)\n converged = False\n\n while not converged:\n\n distances = np.squeeze(np.abs(X[:, np.newaxis] - clusters[np.newaxis, :]))\n\n closestCluster = np.argmin(distances, axis=1)\n\n for i in range(hidden_neurons):\n pointsForCluster = X[closestCluster == i]\n if len(pointsForCluster) > 0:\n clusters[i] = np.mean(pointsForCluster, axis=0)\n\n converged = np.linalg.norm(clusters - prev_clusters) < 1e-6\n prev_clusters = clusters.copy()\n\n distances = np.squeeze(np.abs(X[:, np.newaxis] - clusters[np.newaxis, :]))\n closestCluster = np.argmin(distances, axis=1)\n\n clustersWithNoPoints = []\n for i in range(hidden_neurons):\n pointsForCluster = X[closestCluster == i]\n if len(pointsForCluster) < 2:\n clustersWithNoPoints.append(i)\n continue\n else:\n std_dev[i] = np.std(X[closestCluster == i])\n\n if len(clustersWithNoPoints) > 0:\n pointsToAverage = []\n for i in range(hidden_neurons):\n if i not in clustersWithNoPoints:\n pointsToAverage.append(X[closestCluster == i])\n pointsToAverage = np.concatenate(pointsToAverage).ravel()\n std_dev[clustersWithNoPoints] = np.mean(np.std(pointsToAverage))\n\n return clusters, std_dev\n\n\nif __name__ == \"__main__\":\n # sample inputs and add noise\n NUM_SAMPLES = 100\n X = np.random.uniform(0., 1., NUM_SAMPLES)\n X = np.sort(X, axis=0)\n noise = np.random.uniform(-0.1, 0.1, NUM_SAMPLES)\n y = np.sin(2 * np.pi * X) + noise\n \n rbfnet = RBF(learning_rate=1e-2, hidden_neurons=2)\n rbfnet.fit(X, y)\n \n y_pred = rbfnet.predict(X)\n \n plt.plot(X, y, '-o', label='true')\n plt.plot(X, y_pred, '-o', label='RBF-Net')\n plt.legend()\n \n plt.tight_layout()\n plt.show()" ]
[ [ "numpy.random.uniform", "matplotlib.pyplot.legend", "numpy.linalg.norm", "numpy.sin", "numpy.zeros", "numpy.squeeze", "numpy.argmin", "matplotlib.pyplot.tight_layout", "numpy.random.randn", "numpy.abs", "numpy.exp", "numpy.std", "matplotlib.pyplot.show", "numpy.sort", "matplotlib.pyplot.plot", "numpy.concatenate", "numpy.array", "numpy.mean" ] ]
vxsharma-14/project-NAnPack
[ "fad644ec9a614605f84562745a317e5512db1d58" ]
[ "nanpack/mesh.py" ]
[ "\"\"\"A module consisting of various meshing functions.\"\"\"\r\n# ***********************************************************************\r\n#\r\n# FILE mesh.py\r\n#\r\n# AUTHOR Dr. Vishal Sharma\r\n#\r\n# VERSION 1.0.0-alpha4\r\n#\r\n# WEBSITE https://github.com/vxsharma-14/project-NAnPack\r\n#\r\n# NAnPack Learner's Edition is distributed under the MIT License.\r\n#\r\n# Copyright (c) 2020 Vishal Sharma\r\n#\r\n# Permission is hereby granted, free of charge, to any person\r\n# obtaining a copy of this software and associated documentation\r\n# files (the \"Software\"), to deal in the Software without restriction,\r\n# including without limitation the rights to use, copy, modify, merge,\r\n# publish, distribute, sublicense, and/or sell copies of the Software,\r\n# and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be\r\n# included in all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\r\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\r\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n#\r\n# You should have received a copy of the MIT License along with\r\n# NAnPack Learner's Edition.\r\n#\r\n# ***********************************************************************\r\n\r\nfrom .backend.exceptions import InvalidValueError\r\n\r\n\r\ndef ComputeGridPoints(Dimension, Length, delX, Height=None, delY=None):\r\n \"\"\"Return the grid points along X and Y direction in the mesh.\r\n\r\n Call signature:\r\n\r\n ComputeGridPoints(Dimension, Length, delX, Height=None, delY=None)\r\n\r\n Parameters\r\n ----------\r\n Dimension: str\r\n\r\n Dimension of the domain. Allowed inputs are \"1D\" or \"2D\".\r\n\r\n Length: float\r\n\r\n Length of the domain.\r\n\r\n delX: float\r\n\r\n Grid step size along X-axis.\r\n\r\n Height: float\r\n\r\n Height of the domain. Value required for 2D applications.\r\n\r\n delY: float\r\n\r\n Grid step size along Y-axis. Value required for 2D applications.\r\n\r\n Returns\r\n -------\r\n iMax : int\r\n\r\n Number of grid points along X-axis within the domain.\r\n\r\n jMax : int\r\n\r\n Number of grid points along Y-axis within the domain. Returns 0 for\r\n 1D applications.\r\n \"\"\"\r\n iMax = int(Length/delX) + 1\r\n if Dimension.upper() == \"2D\":\r\n jMax = int(Height/delY) + 1\r\n else:\r\n jMax = 0\r\n print(\"Calculating grid size: Completed.\")\r\n\r\n return iMax, jMax\r\n\r\n\r\ndef ComputeGridSteps(Dimension, Length, iMax, Height=None, jMax=None):\r\n \"\"\"Return the uniform grid steps size along X and Y axis.\r\n\r\n Call signature:\r\n\r\n ComputeGridSteps(Dimension, Length, iMax, Height=None, jMax=None)\r\n\r\n Parameters\r\n ----------\r\n Dimension: str\r\n\r\n Dimension of the domain. Allowed inputs are \"1D\" or \"2D\".\r\n\r\n Length: float\r\n\r\n Length of the domain.\r\n\r\n iMax : int\r\n\r\n Number of grid points along X-axis within the domain.\r\n\r\n Height: float\r\n\r\n Height of the domain. Value required for 2D applications.\r\n\r\n jMax : int\r\n\r\n Number of grid points along Y-axis within the domain. Value\r\n required for 2D applications.\r\n\r\n Returns\r\n -------\r\n delX: float\r\n\r\n Grid step size along X-axis.\r\n\r\n delY: float\r\n\r\n Grid step size along Y-axis. Returns 0.0 for 1D applications.\r\n \"\"\"\r\n delX = Length/(iMax - 1)\r\n if Dimension.upper() == \"2D\":\r\n delY = Height/(jMax - 1)\r\n else:\r\n delY = 0.0\r\n print(\"Calculating grid step size: Completed.\")\r\n\r\n return delX, delY\r\n\r\n\r\ndef RectangularGrid(dX, iMax, dY=None, jMax=None):\r\n \"\"\"Return a rectangular uniform rectangular mesh.\r\n\r\n X and/or Y grid point locations are computed in a cartesian coordinate\r\n system using the grid step size and grid points.\r\n\r\n Call Signature:\r\n\r\n RectangularGrid(dX, iMax, dY=None, jMax=None)\r\n\r\n Parameters\r\n ----------\r\n dX: float\r\n\r\n Grid step size along X-axis.\r\n\r\n iMax : int\r\n\r\n Number of grid points along X-axis within the domain.\r\n\r\n dY: float\r\n\r\n Grid step size along Y-axis. Value required for 2D applications.\r\n\r\n jMax : int\r\n\r\n Number of grid points along Y-axis within the domain. Value\r\n required for 2D applications.\r\n\r\n Returns\r\n -------\r\n X: 1D or 2D array, float\r\n\r\n Returns X coordinates at each grid points locations.\r\n\r\n Y: 2D array, float\r\n\r\n Returns Y coordinates at each grid points locations. Returns 0 for\r\n 1D applications.\r\n \"\"\"\r\n import numpy as np\r\n\r\n if isinstance(dY, float) and isinstance(jMax, int):\r\n X = np.zeros((iMax, jMax), dtype=\"float\")\r\n Y = np.zeros((iMax, jMax), dtype=\"float\")\r\n for i in range(0, iMax):\r\n for j in range(0, jMax):\r\n X[i][j] = i*dX\r\n Y[i][j] = j*dY\r\n else:\r\n X = np.zeros((iMax), dtype=\"float\")\r\n for i in range(0, iMax):\r\n X[i] = i*dX\r\n Y = 0.0\r\n print(\"Uniform rectangular grid generation in cartesian\\\r\n coordinate system: Completed.\")\r\n\r\n return X, Y\r\n\r\n\r\ndef CurvilinearGrid(dX, iMax, dY=None, jMax=None):\r\n \"\"\"Return a rectangular uniform/non-uniform rectangular mesh.\r\n\r\n Documentation incomplete. This routine is under construction.\r\n \"\"\"\r\n print(\"Calculating X and Y locations of all grid points within\\\r\n the mesh.\")\r\n from .backend import gridmetrics\r\n from .backend import plotmetrics\r\n dXi = 1.0\r\n dEta = 1.0\r\n\r\n X, Y = RectangularGrid(dX, iMax, dY, jMax)\r\n dim = X.shape\r\n\r\n if len(dim) == 2: # Two dimensional\r\n Xi = [[i*dXi for j in range(0, jMax)] for i in range(0, iMax)]\r\n Eta = [[j*dEta for j in range(0, jMax)] for i in range(0, iMax)]\r\n XiX, XiY, EtaX, EtaY, JJ = gridmetrics.Metrics2D(X, Y)\r\n print(\"Grid metrics and Jacobian evaluation: Completed.\")\r\n plotmetrics.PlotMetrics2D(X, Y, XiX, XiY, EtaX, EtaY)\r\n\r\n elif len(dim) == 1:\r\n Xi = [i*dX for i in range(0, iMax)]\r\n Eta = 0.0\r\n Xi, Eta, JJ = gridmetrics.Metrics1D(X)\r\n print(\"Grid metrics and Jacobian evaluation: Completed.\")\r\n\r\n print(\"Grid transformation to curvilinear coordinate system:\\\r\n Completed.\")\r\n\r\n return X, Y\r\n\r\n\r\ndef CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model):\r\n \"\"\"Return the time step size in the numerical approximation.\r\n\r\n Call Signature:\r\n\r\n CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model)\r\n\r\n Parameters\r\n ----------\r\n CFL: float\r\n\r\n In this program, CFL is treated as the\r\n diffusion number for diffusion equations, and\r\n Courant number for the convection equations.\r\n Caution: This is not a true numerical definition of CFL though.\r\n\r\n diff : float\r\n\r\n Physics specific coefficient in the diffusion model.\r\n For example, kinematic viscosity or thermal diffusivity.\r\n\r\n conv: float\r\n\r\n Physics specific coefficient in the convection model.\r\n For example, speed of sound in the first-order linear wave eq.\r\n\r\n dX: float\r\n\r\n Grid step size along X-axis.\r\n\r\n dY: float\r\n\r\n Grid step size along Y-axis. Value required for 2D applications.\r\n\r\n Dimension: str\r\n\r\n Dimension of the domain. Allowed inputs are \"1D\" or \"2D\".\r\n\r\n Model: str\r\n\r\n Model of the governing equation. To see available options for this\r\n parameter, type the following command on your terminal\r\n python fetchoption.py \"model\"\r\n\r\n Returns\r\n -------\r\n TimeStep: float\r\n\r\n Time step in the model equation.\r\n \"\"\"\r\n # ************** DIFFUSION EQN. ******************\r\n if Model.upper() == \"DIFFUSION\":\r\n dX2 = dX*dX\r\n if Dimension.upper() == \"1D\":\r\n TimeStep = CFL*dX2/diff\r\n elif Dimension.upper() == \"2D\":\r\n dY2 = dY*dY\r\n TimeStep = CFL*(1.0/((1/dX2) + (1/dY2)))/diff\r\n # ************** FIRST-ORDER WAVE EQN. *****************\r\n elif Model.upper() == \"FO_WAVE\":\r\n if Dimension.upper() == \"1D\":\r\n TimeStep = CFL*dX/conv\r\n # ************** BURGERS EQN. *****************\r\n elif Model.upper() in [\"INV_BURGERS\", \"VISC_BURGERS\"]:\r\n if Dimension.upper() == \"1D\":\r\n TimeStep = CFL*dX\r\n print(\"Calculating time step size for the simulation: Completed.\")\r\n\r\n return TimeStep\r\n\r\n\r\ndef CalcMaxSteps(State, nMax, dT, simTime):\r\n \"\"\"Return the max iteration/time steps for the program to run.\r\n\r\n Call Signature:\r\n\r\n CalcMaxSteps(State, nMax, dT, simTime)\r\n\r\n Parameters\r\n ----------\r\n State: str\r\n\r\n State at which the final solution is desired. It can be\r\n steady-state or transient.\r\n To obtain solution at several intermediate time steps before\r\n convergence, use transient option and provide the time in\r\n configuration file at which the solution is desired. The\r\n program will calculate when to stop the solution.\r\n\r\n Available inputs are \"STEADY\" or \"TRANSIENT\"\r\n\r\n nMax: int\r\n\r\n Maximum number of iterations until which the program must seek\r\n convergence. If convergence is not achieved after going thtough\r\n nMax steps, the program will stop solving any further.\r\n\r\n dT: float\r\n\r\n Time step in the discretized equation. The value is auto calculated\r\n by the program from the CFL value during the configuration step.\r\n\r\n simTime: float\r\n\r\n Intermediate time before convergence at which numerical solution\r\n is required.\r\n\r\n Returns\r\n -------\r\n MaxSteps: int\r\n\r\n Maximum iteration/time steps for the program to run.\r\n \"\"\"\r\n if State.upper() == \"TRANSIENT\":\r\n if not simTime > 0.0: # simulation time can't be negative\r\n raise InvalidValueError(\"SIM_TIME\", simTime)\r\n try:\r\n MaxSteps = int(simTime/dT)\r\n except dT:\r\n raise Exception(\"No time step provided.\")\r\n elif State.upper() == \"STEADY\":\r\n MaxSteps = nMax\r\n print(\"Calculating maximum iterations/steps for the simulation:\\\r\n Completed.\")\r\n\r\n return MaxSteps\r\n" ]
[ [ "numpy.zeros" ] ]
ambader/hcrystalball
[ "713636e698d9a260fab982764fce4a13699be1a8" ]
[ "tests/integration/test_frequency.py" ]
[ "import pandas as pd\nimport pytest\n\n\[email protected](\n \"X_y_with_freq, freq\",\n [\n (\"series_with_freq_D\", \"D\"),\n (\"series_with_freq_M\", \"M\"),\n (\"series_with_freq_Q\", \"Q-DEC\"),\n (\"series_with_freq_Y\", \"A-DEC\"),\n ],\n indirect=[\"X_y_with_freq\"],\n)\[email protected](\n \"wrapper_instance\",\n [\n (\"sklearn\"),\n (\"stacking_ensemble\"),\n (\"simple_ensemble\"),\n (\"smoothing\"),\n (\"sarimax\"),\n (\"prophet\"),\n (\"tbats\"),\n ],\n indirect=[\"wrapper_instance\"],\n)\ndef test_model_frequencies(X_y_with_freq, freq, wrapper_instance):\n X, y = X_y_with_freq\n\n predicted_index = wrapper_instance.fit(X[:-10], y[:-10]).predict(X[-10:]).index\n assert pd.infer_freq(predicted_index) == freq\n assert len(predicted_index) == len(X[-10:])\n" ]
[ [ "pandas.infer_freq" ] ]
PlanNoa/video_super_resolution
[ "534e6e6b55d652c61306df4bc11e83a456855dd4" ]
[ "my_packages/FlowProjection/networks/FlowNetSD.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\n\nfrom .submodules import conv, deconv, i_conv, predict_flow\n\n\nclass FlowNetSD(nn.Module):\n def __init__(self, batchNorm=True):\n super(FlowNetSD, self).__init__()\n\n self.batchNorm = batchNorm\n self.conv0 = conv(self.batchNorm, 6, 64)\n self.conv1 = conv(self.batchNorm, 64, 64, stride=2)\n self.conv1_1 = conv(self.batchNorm, 64, 128)\n self.conv2 = conv(self.batchNorm, 128, 128, stride=2)\n self.conv2_1 = conv(self.batchNorm, 128, 128)\n self.conv3 = conv(self.batchNorm, 128, 256, stride=2)\n self.conv3_1 = conv(self.batchNorm, 256, 256)\n self.conv4 = conv(self.batchNorm, 256, 512, stride=2)\n self.conv4_1 = conv(self.batchNorm, 512, 512)\n self.conv5 = conv(self.batchNorm, 512, 512, stride=2)\n self.conv5_1 = conv(self.batchNorm, 512, 512)\n self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)\n self.conv6_1 = conv(self.batchNorm, 1024, 1024)\n\n self.deconv5 = deconv(1024, 512)\n self.deconv4 = deconv(1026, 256)\n self.deconv3 = deconv(770, 128)\n self.deconv2 = deconv(386, 64)\n\n self.inter_conv5 = i_conv(self.batchNorm, 1026, 512)\n self.inter_conv4 = i_conv(self.batchNorm, 770, 256)\n self.inter_conv3 = i_conv(self.batchNorm, 386, 128)\n self.inter_conv2 = i_conv(self.batchNorm, 194, 64)\n\n self.predict_flow6 = predict_flow(1024)\n self.predict_flow5 = predict_flow(512)\n self.predict_flow4 = predict_flow(256)\n self.predict_flow3 = predict_flow(128)\n self.predict_flow2 = predict_flow(64)\n\n self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1)\n self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1)\n self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1)\n self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if m.bias is not None:\n init.uniform_(m.bias)\n init.xavier_uniform_(m.weight)\n\n if isinstance(m, nn.ConvTranspose2d):\n if m.bias is not None:\n init.uniform_(m.bias)\n init.xavier_uniform_(m.weight)\n self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')\n\n def forward(self, x):\n out_conv0 = self.conv0(x)\n out_conv1 = self.conv1_1(self.conv1(out_conv0))\n out_conv2 = self.conv2_1(self.conv2(out_conv1))\n\n out_conv3 = self.conv3_1(self.conv3(out_conv2))\n out_conv4 = self.conv4_1(self.conv4(out_conv3))\n out_conv5 = self.conv5_1(self.conv5(out_conv4))\n out_conv6 = self.conv6_1(self.conv6(out_conv5))\n\n flow6 = self.predict_flow6(out_conv6)\n flow6_up = self.upsampled_flow6_to_5(flow6)\n out_deconv5 = self.deconv5(out_conv6)\n\n concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1)\n out_interconv5 = self.inter_conv5(concat5)\n flow5 = self.predict_flow5(out_interconv5)\n\n flow5_up = self.upsampled_flow5_to_4(flow5)\n out_deconv4 = self.deconv4(concat5)\n\n concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1)\n out_interconv4 = self.inter_conv4(concat4)\n flow4 = self.predict_flow4(out_interconv4)\n flow4_up = self.upsampled_flow4_to_3(flow4)\n out_deconv3 = self.deconv3(concat4)\n\n concat3 = torch.cat((out_conv3, out_deconv3, flow4_up), 1)\n out_interconv3 = self.inter_conv3(concat3)\n flow3 = self.predict_flow3(out_interconv3)\n flow3_up = self.upsampled_flow3_to_2(flow3)\n out_deconv2 = self.deconv2(concat3)\n\n concat2 = torch.cat((out_conv2, out_deconv2, flow3_up), 1)\n out_interconv2 = self.inter_conv2(concat2)\n flow2 = self.predict_flow2(out_interconv2)\n\n if self.training:\n return flow2, flow3, flow4, flow5, flow6\n else:\n return flow2,\n" ]
[ [ "torch.nn.init.xavier_uniform_", "torch.nn.init.uniform_", "torch.nn.Upsample", "torch.cat", "torch.nn.ConvTranspose2d" ] ]
OpenMLCo/Yolo-OCR
[ "33fdc86316674458285bb78f55dc643e557c2d1c", "33fdc86316674458285bb78f55dc643e557c2d1c" ]
[ "Yolo-OCR/extract_info_cedula.py", "Yolo-OCR/extract_info_RUT.py" ]
[ "# Extraer bounding boxes\nfrom pytesseract import Output\nimport pytesseract\n# import imutils\n# import argparse\nimport os\nimport glob\nimport random\nimport darknet\n# import time\nimport cv2\nimport numpy as np\nimport darknet\n# import matplotlib.pyplot as plt\n\n# def parser():\n# parser = argparse.ArgumentParser(description=\"YOLO Object Detection\")\n# parser.add_argument(\"--input_file\", type=str, default=\"\",\n# help=\"image source. It can be a single image, a\"\n# \"txt with paths to them, or a folder. Image valid\"\n# \" formats are jpg, jpeg or png.\"\n# \"If no input is given, \")\n# parser.add_argument(\"--batch_size\", default=1, type=int,\n# help=\"number of images to be processed at the same time\")\n# parser.add_argument(\"--weights\", default=\"yolov4.weights\",\n# help=\"yolo weights path\")\n# parser.add_argument(\"--config_file\", default=\"./cfg/yolov4.cfg\",\n# help=\"path to config file\")\n# parser.add_argument(\"--data_file\", default=\"./cfg/coco.data\",\n# help=\"path to data file\")\n# parser.add_argument(\"--thresh\", type=float, default=.25,\n# help=\"remove detections with lower confidence\")\n# return parser.parse_args()\n\ndef check_batch_shape(images, batch_size):\n \"\"\"\n Image sizes should be the same width and height\n \"\"\"\n shapes = [image.shape for image in images]\n if len(set(shapes)) > 1:\n raise ValueError(\"Images don't have same shape\")\n if len(shapes) > batch_size:\n raise ValueError(\"Batch size higher than number of images\")\n return shapes[0]\n\n\ndef load_images(images_path):\n \"\"\"\n If image path is given, return it directly\n For txt file, read it and return each line as image path\n In other case, it's a folder, return a list with names of each\n jpg, jpeg and png file\n \"\"\"\n input_path_extension = images_path.split('.')[-1]\n if input_path_extension in ['jpg', 'jpeg', 'png']:\n return [images_path]\n elif input_path_extension == \"txt\":\n with open(images_path, \"r\") as f:\n return f.read().splitlines()\n else:\n return glob.glob(\n os.path.join(images_path, \"*.jpg\")) + \\\n glob.glob(os.path.join(images_path, \"*.png\")) + \\\n glob.glob(os.path.join(images_path, \"*.jpeg\"))\n\n\ndef prepare_batch(images, network, channels=3):\n width = darknet.network_width(network)\n height = darknet.network_height(network)\n\n darknet_images = []\n for image in images:\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_resized = cv2.resize(image_rgb, (width, height),\n interpolation=cv2.INTER_LINEAR)\n custom_image = image_resized.transpose(2, 0, 1)\n darknet_images.append(custom_image)\n\n batch_array = np.concatenate(darknet_images, axis=0)\n batch_array = np.ascontiguousarray(batch_array.flat, dtype=np.float32)/255.0\n darknet_images = batch_array.ctypes.data_as(darknet.POINTER(darknet.c_float))\n return darknet.IMAGE(width, height, channels, darknet_images)\n\n\ndef image_detection(image, network, class_names, class_colors, thresh):\n # Darknet doesn't accept numpy images.\n # Create one with image we reuse for each detect\n width = darknet.network_width(network)\n height = darknet.network_height(network)\n darknet_image = darknet.make_image(width, height, 3)\n\n # image = cv2.imread(image_path)\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_resized = cv2.resize(image_rgb, (width, height),\n interpolation=cv2.INTER_LINEAR)\n\n darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())\n detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)\n darknet.free_image(darknet_image)\n image = darknet.draw_boxes(detections, image_resized, class_colors)\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections\n\n\ndef batch_detection(network, images, class_names, class_colors,\n thresh=0.25, hier_thresh=.5, nms=.45, batch_size=4):\n image_height, image_width, _ = check_batch_shape(images, batch_size)\n darknet_images = prepare_batch(images, network)\n batch_detections = darknet.network_predict_batch(network, darknet_images, batch_size, image_width,\n image_height, thresh, hier_thresh, None, 0, 0)\n batch_predictions = []\n for idx in range(batch_size):\n num = batch_detections[idx].num\n detections = batch_detections[idx].dets\n if nms:\n darknet.do_nms_obj(detections, num, len(class_names), nms)\n predictions = darknet.remove_negatives(detections, class_names, num)\n images[idx] = darknet.draw_boxes(predictions, images[idx], class_colors)\n batch_predictions.append(predictions)\n darknet.free_batch_detections(batch_detections, batch_size)\n return images, batch_predictions\n\n\ndef image_classification(image, network, class_names):\n width = darknet.network_width(network)\n height = darknet.network_height(network)\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_resized = cv2.resize(image_rgb, (width, height),\n interpolation=cv2.INTER_LINEAR)\n darknet_image = darknet.make_image(width, height, 3)\n darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())\n detections = darknet.predict_image(network, darknet_image)\n predictions = [(name, detections[idx]) for idx, name in enumerate(class_names)]\n darknet.free_image(darknet_image)\n return sorted(predictions, key=lambda x: -x[1])\n\n\ndef convert2relative(image, bbox):\n \"\"\"\n YOLO format use relative coordinates for annotation\n \"\"\"\n x, y, w, h = bbox\n height, width, _ = image.shape\n return x/width, y/height, w/width, h/height\n\ndef save_annotations(name, image, detections, class_names):\n \"\"\"\n Files saved with image_name.txt and relative coordinates\n \"\"\"\n file_name = os.path.splitext(name)[0] + \".txt\"\n with open(file_name, \"w\") as f:\n for label, confidence, bbox in detections:\n x, y, w, h = convert2relative(image, bbox)\n label = class_names.index(label)\n f.write(\"{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\\n\".format(label, x, y, w, h, float(confidence)))\n\n\ndef batch_detection_example():\n args = parser()\n check_arguments_errors(args)\n batch_size = 3\n random.seed(3) # deterministic bbox colors\n network, class_names, class_colors = darknet.load_network(\n args.config_file,\n args.data_file,\n args.weights,\n batch_size=batch_size\n )\n image_names = ['data/horses.jpg', 'data/horses.jpg', 'data/eagle.jpg']\n images = [cv2.imread(image) for image in image_names]\n images, detections, = batch_detection(network, images, class_names,\n class_colors, batch_size=batch_size)\n for name, image in zip(image_names, images):\n cv2.imwrite(name.replace(\"data/\", \"\"), image)\n print(detections)\n\ndef extract_info_cedula(image, detections, img_cedula_raw):\n# img_cedula_raw = cv2.imread(image_name)\n img_cedula_raw = cv2.cvtColor(img_cedula_raw, cv2.COLOR_BGR2RGB)\n hmax,wmax,_=img_cedula_raw.shape\n requeriments=['nombres', 'numero', 'apellidos', 'cedula']\n names = [detection[0] for detection in detections]\n names = np.unique(names)\n if set(requeriments)-set(names) != set():\n log = 'Extracted box {}'.format(names)\n else:\n names_to_extract_info=['nombres', 'numero', 'apellidos']\n log = 'Extracted box {}'.format(names)\n results_dict={}\n for j in range(len(detections)):\n x, y, w, h = convert2relative(image, detections[j][-1])\n if not detections[j][0] in names_to_extract_info:\n continue\n xmin=int((x-(w/2))*wmax)-int((0.05*(x-(w/2))*wmax))\n ymin=int((y-(h/2))*hmax)\n xmax=int((x+(w/2))*wmax)+int((0.05*(x+(w/2))*wmax))\n ymax=int((y+(h/2))*hmax)\n roi=img_cedula_raw[ymin:ymax,xmin:xmax,:]\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n custom_config = r'--psm 6 --l spa'\n #text = pytesseract.image_to_string(gray,config=custom_config)\n d = pytesseract.image_to_data(gray,config=custom_config,output_type=Output.DICT)\n #binary2=gray\n n_boxes = len(d['text'])\n extracted_text=[]\n ylist=[]\n hlist=[]\n #plt.imshow(gray,'gray')\n #plt.show()\n for i in range(n_boxes):\n if int(d['conf'][i]) >= 0:\n (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])\n extracted_text.append(d['text'][i])\n ylist.append(y)\n hlist.append(h)\n #binary2 = cv2.rectangle(binary2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n idx=[]\n for yy in ylist:\n if abs(np.min(ylist)-yy)< np.max(hlist)/2:\n idx.append(True)\n else:\n idx.append(False)\n if detections[j][0]=='numero':\n real_info = [''.join(e for e in text if e.isnumeric()) for text,idx in zip(extracted_text,idx) if idx]\n real_info = ''.join(real_info)\n else:\n real_info = [''.join(e for e in text if e.isalpha()) for text,idx in zip(extracted_text,idx) if idx]\n real_info = ' '.join(real_info)\n real_info = [info for info in real_info.split(' ') if len(info)>2]\n real_info = ' '.join(real_info)\n results_dict[detections[j][0]]=real_info.upper()\n return results_dict, log\n\nclass main_cedula():\n def __init__(self,config_file,data_file,weights,\n thresh):\n self.config_file=config_file\n self.data_file=data_file\n self.weights=weights\n self.thresh=thresh\n\n def load_darknet(self,):\n random.seed(0) # deterministic bbox colors\n self.network, self.class_names, self.class_colors = darknet.load_network(\n self.config_file,\n self.data_file,\n self.weights,\n batch_size=1\n )\n\n def main_cedula_run(self,image_raw):\n image, detections = image_detection(\n image_raw, self.network, self.class_names, self.class_colors, self.thresh\n )\n results_dict, log = extract_info_cedula(image, detections, image_raw)\n #print(results_dict)\n return results_dict, log \n\n# if __name__ == \"__main__\":\n# # unconmment next line for an example of batch processing\n# # batch_detection_example()\n# main()", "# Extraer bounding boxes\nfrom pytesseract import Output\nimport pytesseract\nimport imutils\nimport argparse\nimport os\nimport glob\nimport random\nimport darknet\nimport time\nimport cv2\nimport numpy as np\nimport darknet\nimport matplotlib.pyplot as plt\n\ndef parser():\n parser = argparse.ArgumentParser(description=\"YOLO Object Detection\")\n parser.add_argument(\"--input_file\", type=str, default=\"\",\n help=\"image source. It can be a single image, a\"\n \"txt with paths to them, or a folder. Image valid\"\n \" formats are jpg, jpeg or png.\"\n \"If no input is given, \")\n parser.add_argument(\"--batch_size\", default=1, type=int,\n help=\"number of images to be processed at the same time\")\n parser.add_argument(\"--weights\", default=\"yolov4.weights\",\n help=\"yolo weights path\")\n parser.add_argument(\"--config_file\", default=\"./cfg/yolov4.cfg\",\n help=\"path to config file\")\n parser.add_argument(\"--data_file\", default=\"./cfg/coco.data\",\n help=\"path to data file\")\n parser.add_argument(\"--thresh\", type=float, default=.25,\n help=\"remove detections with lower confidence\")\n return parser.parse_args()\n\ndef check_batch_shape(images, batch_size):\n \"\"\"\n Image sizes should be the same width and height\n \"\"\"\n shapes = [image.shape for image in images]\n if len(set(shapes)) > 1:\n raise ValueError(\"Images don't have same shape\")\n if len(shapes) > batch_size:\n raise ValueError(\"Batch size higher than number of images\")\n return shapes[0]\n\n\ndef load_images(images_path):\n \"\"\"\n If image path is given, return it directly\n For txt file, read it and return each line as image path\n In other case, it's a folder, return a list with names of each\n jpg, jpeg and png file\n \"\"\"\n input_path_extension = images_path.split('.')[-1]\n if input_path_extension in ['jpg', 'jpeg', 'png']:\n return [images_path]\n elif input_path_extension == \"txt\":\n with open(images_path, \"r\") as f:\n return f.read().splitlines()\n else:\n return glob.glob(\n os.path.join(images_path, \"*.jpg\")) + \\\n glob.glob(os.path.join(images_path, \"*.png\")) + \\\n glob.glob(os.path.join(images_path, \"*.jpeg\"))\n\n\ndef prepare_batch(images, network, channels=3):\n width = darknet.network_width(network)\n height = darknet.network_height(network)\n\n darknet_images = []\n for image in images:\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_resized = cv2.resize(image_rgb, (width, height),\n interpolation=cv2.INTER_LINEAR)\n custom_image = image_resized.transpose(2, 0, 1)\n darknet_images.append(custom_image)\n\n batch_array = np.concatenate(darknet_images, axis=0)\n batch_array = np.ascontiguousarray(batch_array.flat, dtype=np.float32)/255.0\n darknet_images = batch_array.ctypes.data_as(darknet.POINTER(darknet.c_float))\n return darknet.IMAGE(width, height, channels, darknet_images)\n\n\ndef image_detection(image, network, class_names, class_colors, thresh):\n # Darknet doesn't accept numpy images.\n # Create one with image we reuse for each detect\n width = darknet.network_width(network)\n height = darknet.network_height(network)\n darknet_image = darknet.make_image(width, height, 3)\n\n #image = cv2.imread(image_path)\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_resized = cv2.resize(image_rgb, (width, height),\n interpolation=cv2.INTER_LINEAR)\n\n darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())\n detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)\n darknet.free_image(darknet_image)\n image = darknet.draw_boxes(detections, image_resized, class_colors)\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections\n\n\ndef batch_detection(network, images, class_names, class_colors,\n thresh=0.25, hier_thresh=.5, nms=.45, batch_size=4):\n image_height, image_width, _ = check_batch_shape(images, batch_size)\n darknet_images = prepare_batch(images, network)\n batch_detections = darknet.network_predict_batch(network, darknet_images, batch_size, image_width,\n image_height, thresh, hier_thresh, None, 0, 0)\n batch_predictions = []\n for idx in range(batch_size):\n num = batch_detections[idx].num\n detections = batch_detections[idx].dets\n if nms:\n darknet.do_nms_obj(detections, num, len(class_names), nms)\n predictions = darknet.remove_negatives(detections, class_names, num)\n images[idx] = darknet.draw_boxes(predictions, images[idx], class_colors)\n batch_predictions.append(predictions)\n darknet.free_batch_detections(batch_detections, batch_size)\n return images, batch_predictions\n\n\ndef image_classification(image, network, class_names):\n width = darknet.network_width(network)\n height = darknet.network_height(network)\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_resized = cv2.resize(image_rgb, (width, height),\n interpolation=cv2.INTER_LINEAR)\n darknet_image = darknet.make_image(width, height, 3)\n darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())\n detections = darknet.predict_image(network, darknet_image)\n predictions = [(name, detections[idx]) for idx, name in enumerate(class_names)]\n darknet.free_image(darknet_image)\n return sorted(predictions, key=lambda x: -x[1])\n\n\ndef convert2relative(image, bbox):\n \"\"\"\n YOLO format use relative coordinates for annotation\n \"\"\"\n x, y, w, h = bbox\n height, width, _ = image.shape\n return x/width, y/height, w/width, h/height\n\ndef save_annotations(name, image, detections, class_names):\n \"\"\"\n Files saved with image_name.txt and relative coordinates\n \"\"\"\n file_name = os.path.splitext(name)[0] + \".txt\"\n with open(file_name, \"w\") as f:\n for label, confidence, bbox in detections:\n x, y, w, h = convert2relative(image, bbox)\n label = class_names.index(label)\n f.write(\"{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\\n\".format(label, x, y, w, h, float(confidence)))\n\n\ndef batch_detection_example():\n args = parser()\n check_arguments_errors(args)\n batch_size = 3\n random.seed(3) # deterministic bbox colors\n network, class_names, class_colors = darknet.load_network(\n args.config_file,\n args.data_file,\n args.weights,\n batch_size=batch_size\n )\n image_names = ['data/horses.jpg', 'data/horses.jpg', 'data/eagle.jpg']\n images = [cv2.imread(image) for image in image_names]\n images, detections, = batch_detection(network, images, class_names,\n class_colors, batch_size=batch_size)\n for name, image in zip(image_names, images):\n cv2.imwrite(name.replace(\"data/\", \"\"), image)\n print(detections)\n\ndef extract_info_cedula(image, detections, img_cedula_raw):\n # img_cedula_raw = cv2.imread(image_name)\n img_cedula_raw = cv2.cvtColor(img_cedula_raw, cv2.COLOR_BGR2RGB)\n hmax,wmax,_=img_cedula_raw.shape\n requeriments=['NIT', 'RS']\n names = [detection[0] for detection in detections]\n names = np.unique(names)\n if set(requeriments)-set(names) != set():\n log = 'Extracted box {}'.format(names)\n else:\n names_to_extract_info=['NIT', 'RS']\n log = 'Extracted box {}'.format(names)\n results_dict={}\n for j in range(len(detections)):\n x, y, w, h = convert2relative(image, detections[j][-1])\n if not detections[j][0] in names_to_extract_info:\n continue\n #print(x, y, w, h)\n if (x-(w/2))<0:\n xtmp = 0\n else:\n xtmp = (x-(w/2))\n \n xmin=int(xtmp*wmax)-int((0.05*(xtmp)*wmax))\n ymin=int((y-(h/2))*hmax)\n xmax=int((x+(w/2))*wmax)+int((0.05*(x+(w/2))*wmax))\n ymax=int((y+(h/2))*hmax)\n roi=img_cedula_raw[ymin:ymax,xmin:xmax,:]\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n custom_config = r'--psm 6 --l spa'\n #text = pytesseract.image_to_string(gray,config=custom_config)\n d = pytesseract.image_to_data(gray,config=custom_config,output_type=Output.DICT)\n #binary2=gray\n n_boxes = len(d['text'])\n extracted_text=[]\n ylist=[]\n hlist=[]\n #wlist = []\n #plt.imshow(gray,'gray')\n #plt.show()\n for i in range(n_boxes):\n if int(d['conf'][i]) >= 0:\n (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])\n extracted_text.append(d['text'][i])\n ylist.append(y)\n hlist.append(h)\n #wlist.append(w)\n #binary2 = cv2.rectangle(binary2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n idx=[]\n con = 0 \n for yy in ylist:\n if (np.min(ylist)+hlist[np.argmin(ylist[1::])])<yy:#< np.max(hlist)/2:\n idx.append(True)\n else:\n idx.append(False)\n con+=1\n if detections[j][0]=='numero':\n real_info = [''.join(e for e in text if e.isnumeric()) for text,idx in zip(extracted_text,idx) if idx]\n real_info = ''.join(real_info)\n else:\n real_info = [''.join(e for e in text if e.isalnum()) for text,idx in zip(extracted_text,idx) if idx]\n real_info = ' '.join(real_info)\n real_info = [info for info in real_info.split(' ') if len(info)>2]\n real_info = ' '.join(real_info)\n results_dict[detections[j][0]]=real_info.upper()\n return results_dict, log\n\n\nclass main_rut():\n def __init__(self,config_file,data_file,weights,\n thresh):\n self.config_file=config_file\n self.data_file=data_file\n self.weights=weights\n self.thresh=thresh\n\n def load_darknet(self,):\n random.seed(0) # deterministic bbox colors\n self.network, self.class_names, self.class_colors = darknet.load_network(\n self.config_file,\n self.data_file,\n self.weights,\n batch_size=1\n )\n\n def main_cedula_run(self,image_raw):\n image, detections = image_detection(\n image_raw, self.network, self.class_names, self.class_colors, self.thresh)\n results_dict, log = extract_info_cedula(image, detections, image_raw)\n #print(results_dict)\n return results_dict, log" ]
[ [ "numpy.ascontiguousarray", "numpy.max", "numpy.min", "numpy.concatenate", "numpy.unique" ], [ "numpy.ascontiguousarray", "numpy.argmin", "numpy.min", "numpy.concatenate", "numpy.unique" ] ]
earlbabson/torchflare
[ "15db06d313a53a3ec4640869335ba87730562b28" ]
[ "tests/mixers/test_mixers.py" ]
[ "from torchflare.batch_mixers.mixers import cutmix, mixup, get_collate_fn\nimport torch\n\n\nx = torch.randn(4, 3, 256, 256)\ntargets = torch.tensor([0, 1, 0, 1])\n\nds = torch.utils.data.TensorDataset(x, targets)\n\n\ndef test_mixup():\n dl = torch.utils.data.DataLoader(ds, batch_size=2)\n batch = next(iter(dl))\n op, y = mixup(batch=batch, alpha=0.35)\n\n assert torch.is_tensor(op) is True\n assert isinstance(y, (tuple, list)) is True\n\n targets_a, targets_b, lam = y\n assert torch.is_tensor(targets_a) is True\n assert torch.is_tensor(targets_b) is True\n assert isinstance(lam, (int, float)) is True\n\n\ndef test_cutmix():\n dl = torch.utils.data.DataLoader(ds, batch_size=2)\n batch = next(iter(dl))\n op, y = cutmix(batch=batch, alpha=0.35)\n\n assert torch.is_tensor(op) is True\n assert isinstance(y, (tuple, list)) is True\n\n targets_a, targets_b, lam = y\n assert torch.is_tensor(targets_a) is True\n assert torch.is_tensor(targets_b) is True\n assert isinstance(lam, (int, float)) is True\n\n\ndef test_collate_fn_mixup():\n\n mixup_collate_fn = get_collate_fn(mixer_name=\"mixup\", alpha=0.35)\n dl = torch.utils.data.DataLoader(ds, batch_size=2, collate_fn=mixup_collate_fn)\n op, y = next(iter(dl))\n\n assert torch.is_tensor(op) is True\n assert isinstance(y, (tuple, list)) is True\n\n targets_a, targets_b, lam = y\n assert torch.is_tensor(targets_a) is True\n assert torch.is_tensor(targets_b) is True\n assert isinstance(lam, (int, float)) is True\n\n\ndef test_collate_fn_cutmix():\n mixup_collate_fn = get_collate_fn(mixer_name=\"cutmix\", alpha=0.35)\n dl = torch.utils.data.DataLoader(ds, batch_size=2, collate_fn=mixup_collate_fn)\n op, y = next(iter(dl))\n\n assert torch.is_tensor(op) is True\n assert isinstance(y, (tuple, list)) is True\n\n targets_a, targets_b, lam = y\n assert torch.is_tensor(targets_a) is True\n assert torch.is_tensor(targets_b) is True\n assert isinstance(lam, (int, float)) is True\n" ]
[ [ "torch.utils.data.DataLoader", "torch.randn", "torch.tensor", "torch.is_tensor", "torch.utils.data.TensorDataset" ] ]
tansaku/examples
[ "cc121d3354ff7f9814b6eee881dce6e6c55d0e68" ]
[ "tensorflow_examples/lite/model_maker/core/task/audio_classifier.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"APIs to train an audio classification model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_examples.lite.model_maker.core.api import mm_export\nfrom tensorflow_examples.lite.model_maker.core.export_format import ExportFormat\nfrom tensorflow_examples.lite.model_maker.core.task import classification_model\nfrom tensorflow_examples.lite.model_maker.core.task import model_util\nfrom tensorflow_examples.lite.model_maker.core.task.model_spec import audio_spec\n\n\n@mm_export('audio_classifier.create')\ndef create(train_data,\n model_spec,\n validation_data=None,\n batch_size=32,\n epochs=5,\n model_dir=None,\n do_train=True,\n train_whole_model=False):\n \"\"\"Loads data and retrains the model.\n\n Args:\n train_data: A instance of audio_dataloader.DataLoader class.\n model_spec: Specification for the model.\n validation_data: Validation DataLoader. If None, skips validation process.\n batch_size: Number of samples per training step. If `use_hub_library` is\n False, it represents the base learning rate when train batch size is 256\n and it's linear to the batch size.\n epochs: Number of epochs for training.\n model_dir: The location of the model checkpoint files.\n do_train: Whether to run training.\n train_whole_model: Boolean. By default, only the classification head is\n trained. When True, the base model is also trained.\n\n Returns:\n An instance of AudioClassifier class.\n \"\"\"\n if not isinstance(model_spec, audio_spec.BaseSpec):\n model_spec = model_spec.get(model_spec, model_dir=model_dir)\n task = AudioClassifier(\n model_spec,\n train_data.index_to_label,\n shuffle=True,\n train_whole_model=train_whole_model)\n if do_train:\n task.train(train_data, validation_data, epochs, batch_size)\n return task\n\n\n@mm_export('audio_classifier.AudioClassifier')\nclass AudioClassifier(classification_model.ClassificationModel):\n \"\"\"Audio classifier for training/inference and exporing.\"\"\"\n\n # TODO(b/171848856): Add TFJS export.\n DEFAULT_EXPORT_FORMAT = (ExportFormat.LABEL, ExportFormat.TFLITE)\n ALLOWED_EXPORT_FORMAT = (ExportFormat.LABEL, ExportFormat.TFLITE,\n ExportFormat.SAVED_MODEL)\n\n def _get_dataset_and_steps(self, data, batch_size, is_training):\n if not data:\n return None, 0\n # TODO(b/171449557): Put this into DataLoader.\n input_fn, steps = self._get_input_fn_and_steps(\n data, batch_size, is_training=is_training)\n dataset = tf.distribute.get_strategy().distribute_datasets_from_function(\n input_fn)\n return dataset, steps\n\n def train(self, train_data, validation_data, epochs, batch_size):\n # TODO(b/171449557): Upstream this to the parent class.\n if len(train_data) < batch_size:\n raise ValueError('The size of the train_data (%d) couldn\\'t be smaller '\n 'than batch_size (%d). To solve this problem, set '\n 'the batch_size smaller or increase the size of the '\n 'train_data.' % (len(train_data), batch_size))\n\n with self.model_spec.strategy.scope():\n train_ds, _ = self._get_dataset_and_steps(\n train_data, batch_size, is_training=True)\n validation_ds, _ = self._get_dataset_and_steps(\n validation_data, batch_size, is_training=False)\n\n self.model = self.model_spec.create_model(\n train_data.num_classes, train_whole_model=self.train_whole_model)\n\n # Display model summary\n self.model.summary()\n\n return self.model_spec.run_classifier(\n self.model,\n epochs,\n train_ds,\n validation_ds,\n callbacks=self._keras_callbacks(self.model_spec.model_dir))\n\n def _export_tflite(self, tflite_filepath, quantization_config='default'):\n \"\"\"Converts the retrained model to tflite format and saves it.\n\n Args:\n tflite_filepath: File path to save tflite model.\n quantization_config: Configuration for post-training quantization.\n \"\"\"\n if quantization_config == 'default':\n quantization_config = self.model_spec.get_default_quantization_config()\n\n # Allow model_spec to override this method.\n fn = getattr(self.model_spec, 'export_tflite', None)\n if not callable(fn):\n fn = model_util.export_tflite\n fn(self.model, tflite_filepath, quantization_config)\n\n def confusion_matrix(self, data, batch_size=32):\n # TODO(b/171449557): Consider moving this to ClassificationModel\n ds = data.gen_dataset(\n batch_size, is_training=False, preprocess=self.preprocess)\n predicated = []\n truth = []\n for item, label in ds:\n if tf.rank(label) == 2: # One-hot encoded labels (batch, num_classes)\n truth.extend(tf.math.argmax(label, axis=-1))\n predicated.extend(tf.math.argmax(self.model.predict(item), axis=-1))\n else:\n truth.extend(label)\n predicated.extend(self.model.predict(item))\n\n return tf.math.confusion_matrix(\n labels=truth, predictions=predicated, num_classes=data.num_classes)\n" ]
[ [ "tensorflow.math.confusion_matrix", "tensorflow.distribute.get_strategy", "tensorflow.rank", "tensorflow.math.argmax" ] ]
thanhhvnqb/FCOS
[ "6e089528d909e56bb7348b56a2ab8f788bf9d2ed" ]
[ "tools/train_net.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nr\"\"\"\nBasic training script for PyTorch\n\"\"\"\n\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom fcos_core.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom fcos_core.config import cfg\nfrom fcos_core.data import make_data_loader\nfrom fcos_core.solver import make_lr_scheduler\nfrom fcos_core.solver import make_optimizer\nfrom fcos_core.engine.inference import inference\nfrom fcos_core.engine.trainer import do_train\nfrom fcos_core.modeling.detector import build_detection_model\nfrom fcos_core.utils.checkpoint import DetectronCheckpointer\nfrom fcos_core.utils.collect_env import collect_env_info\nfrom fcos_core.utils.comm import synchronize, \\\n get_rank, is_pytorch_1_1_0_or_later\nfrom fcos_core.utils.imports import import_file\nfrom fcos_core.utils.logger import setup_logger\nfrom fcos_core.utils.miscellaneous import mkdir, save_config\n\n\ndef train(cfg, local_rank, distributed):\n model = build_detection_model(cfg)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n if cfg.MODEL.USE_SYNCBN:\n assert is_pytorch_1_1_0_or_later(), \\\n \"SyncBatchNorm is only available in pytorch >= 1.1.0\"\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n optimizer = make_optimizer(cfg, model)\n scheduler = make_lr_scheduler(cfg, optimizer)\n\n if distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[local_rank],\n output_device=local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,\n )\n\n arguments = {}\n arguments[\"iteration\"] = 0\n\n output_dir = cfg.OUTPUT_DIR\n\n save_to_disk = get_rank() == 0\n checkpointer = DetectronCheckpointer(cfg, model, optimizer, scheduler, output_dir, save_to_disk)\n extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n arguments.update(extra_checkpoint_data)\n\n data_loader = make_data_loader(\n cfg,\n is_train=True,\n is_distributed=distributed,\n start_iter=arguments[\"iteration\"],\n )\n\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n\n do_train(\n model,\n data_loader,\n optimizer,\n scheduler,\n checkpointer,\n device,\n checkpoint_period,\n arguments,\n )\n\n return model\n\n\ndef run_test(cfg, model, distributed):\n if distributed:\n model = model.module\n torch.cuda.empty_cache() # TODO check if it helps\n iou_types = (\"bbox\", )\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\", )\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\", )\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.FCOS_ON or cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n )\n synchronize()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Training\")\n parser.add_argument(\"--config-file\", default=\"\", metavar=\"FILE\", help=\"path to config file\", type=str)\n parser.add_argument(\"--netname\", default=\"mpprcnn\", help=\"datetime of training\", type=str)\n parser.add_argument(\"--date\", help=\"datetime of training\", type=str)\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\"--skip-test\", dest=\"skip_test\", help=\"Do not test the final model\", action=\"store_true\")\n parser.add_argument(\"opts\", default=None, nargs=argparse.REMAINDER, \\\n help=\"Modify config options using the command-line\")\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = num_gpus > 1\n\n if args.distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\", init_method=\"env://\")\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n output_dir = os.path.join(cfg.OUTPUT_DIR, args.netname, args.date + \"/\")\n cfg.OUTPUT_DIR = output_dir\n cfg.freeze()\n\n if output_dir:\n mkdir(output_dir)\n\n logger = setup_logger(\"fcos_core\", output_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(args)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, \"r\") as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n output_config_path = os.path.join(output_dir, 'config.yml')\n logger.info(\"Saving config into: {}\".format(output_config_path))\n # save overloaded model config in the output directory\n save_config(cfg, output_config_path)\n\n model = train(cfg, args.local_rank, args.distributed)\n\n if not args.skip_test:\n run_test(cfg, model, args.distributed)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.cuda.empty_cache", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.distributed.init_process_group", "torch.nn.parallel.DistributedDataParallel", "torch.device", "torch.cuda.set_device" ] ]
nschloe/pynosh
[ "331454b29246e6c009878589aad2dccb9fda6c30" ]
[ "pynosh/magnetic_vector_potentials.py" ]
[ "\"\"\"Module that provides magnetic vector potentials.\"\"\"\nimport numpy\n\n\ndef constant_field(X, B):\n \"\"\"Converts a spatially constant magnetic field B at X\n into a corresponding potential.\n \"\"\"\n # This is one particular choice that works.\n return 0.5 * numpy.cross(B, X)\n\n\ndef magnetic_dipole(x, x0, m):\n \"\"\"Magnetic vector potential for the static dipole at x0 with orientation\n m.\n \"\"\"\n r = x - x0\n # npsum(...) = ||r||^3 row-wise;\n # numpy.cross acts on rows by default;\n # The \".T\" magic makes sure that each row of numpy.cross(m, r)\n # gets divided by the corresponding entry in ||r||^3.\n return (numpy.cross(m, r).T / numpy.sum(numpy.abs(r) ** 2, axis=-1) ** (3. / 2)).T\n\n\ndef magnetic_dot(X, radius, heights):\n \"\"\"Magnetic vector potential corresponding to the field that is induced by\n a cylindrical magnetic dot, centered at (0,0,0.5*(height0+height1)), with\n the radius `radius` for objects in the x-y-plane. The potential is derived\n by interpreting the dot as an infinitesimal collection of magnetic dipoles,\n hence\n\n A(x) = \\int_{dot} A_{dipole}(x-r) dr.\n\n Support for input valued (x,y,z), z!=0, is pending.\n \"\"\"\n # Span a cartesian grid over the sample, and integrate over it.\n # For symmetry, choose a number that is divided by 4.\n n_phi = 100\n # Choose such that the quads at radius/2 are approximately squares.\n n_radius = int(round(n_phi / numpy.pi))\n\n dr = radius / n_radius\n\n A = numpy.zeros((len(X), 3))\n\n # What we want to have is the value of\n #\n # I(X) := \\int_{dot} \\|X-XX\\|^{-3/2} (m\\times(X-XX)) dXX\n #\n # with\n #\n # X := (x, y, z)^T,\n # XX := (xx, yy, zz)^T\n #\n # The integral in zz-direction (height) can be calculated analytically,\n # such that\n #\n # I = \\int_{disk}\n # [ - (z-zz) / (r2D*sqrt(r3D)) ]_{zz=h_0}^{h_1}\n # ( -(y-yy), x-xx, 0)^T dxx dyy.\n #\n # The integral over the disk is then approximated numerically by\n # the summation over little disk segments.\n # An alternative is to use cylindrical coordinates.\n #\n X_dist = numpy.empty((X.shape[0], 2))\n for i_phi in range(n_phi):\n beta = 2.0 * numpy.pi / n_phi * i_phi\n sin_beta = numpy.sin(beta)\n cos_beta = numpy.cos(beta)\n for i_radius in range(n_radius):\n rad = radius / n_radius * (i_radius + 0.5)\n # r = squared distance between grid point X to the\n # point (x,y) on the magnetic dot\n X_dist[:, 0] = X[:, 0] - rad * cos_beta\n X_dist[:, 1] = X[:, 1] - rad * sin_beta\n\n # r = x_dist * x_dist + y_dist * y_dist\n # Note that X_dist indeed only has two components.\n R = numpy.sum(X_dist ** 2, axis=1)\n ind = numpy.nonzero(R > 1.0e-15)\n\n # 3D distance to point on lower edge (xi,yi,height0)\n # and upper edge ( (xi,yi,height1), respectively\n R_3D = [\n numpy.sqrt(R[ind] + heights[0] ** 2),\n numpy.sqrt(R[ind] + heights[1] ** 2),\n ]\n # Volume of circle segment = pi*angular_width * r^2,\n # so the volume of a building brick of the discretization is\n # V = pi/n_phi * [(r+dr/2)^2 - (r-dr/2)^2]\n # = pi/n_phi * 2 * r * dr.\n Alpha = (\n (heights[1] / R_3D[1] - heights[0] / R_3D[0])\n / R[ind]\n * numpy.pi\n / n_phi\n * (2.0 * rad * dr)\n ) # volume\n # ax += y_dist * alpha\n # ay -= x_dist * alpha\n A[ind, 0] += X_dist[ind, 1] * Alpha\n A[ind, 1] -= X_dist[ind, 0] * Alpha\n return A\n" ]
[ [ "numpy.sum", "numpy.empty", "numpy.cross", "numpy.cos", "numpy.abs", "numpy.sqrt", "numpy.sin", "numpy.nonzero" ] ]
naganandy/G-MPNN-R
[ "04564c059e6e8cfc08edb27403dfe6bb89ba8bab" ]
[ "MPNNR/model/utils.py" ]
[ "import torch, numpy as np, scipy.sparse as sp\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\n\n\n\ndef adjacency(H):\n \"\"\"\n construct adjacency for recursive hypergraph\n arguments:\n H: recursive hypergraph\n \"\"\"\n A = np.eye(H['n'])\n E = H['D0']\n \n for k in tqdm(E):\n e = list(E[k])\n for u in e:\n A[k][u], A[u][k] = 1, 1\n for v in e:\n if u != v: A[u][v], A[v][u] = 1, 1\n\n E = H['D1']\n for k in tqdm(E):\n e = list(E[k])\n for u in e:\n for v in e:\n if u != v: A[u][v], A[v][u] = 1, 1\n\n \n return ssm2tst(symnormalise(sp.csr_matrix(A)))\n\n\n\ndef symnormalise(M):\n \"\"\"\n symmetrically normalise sparse matrix\n arguments:\n M: scipy sparse matrix\n returns:\n D^{-1/2} M D^{-1/2} \n where D is the diagonal node-degree matrix\n \"\"\"\n \n d = np.array(M.sum(1))\n \n dhi = np.power(d, -1/2).flatten()\n dhi[np.isinf(dhi)] = 0.\n DHI = sp.diags(dhi) # D half inverse i.e. D^{-1/2}\n \n return (DHI.dot(M)).dot(DHI) \n\n\n\ndef ssm2tst(M):\n \"\"\"\n converts a scipy sparse matrix (ssm) to a torch sparse tensor (tst)\n arguments:\n M: scipy sparse matrix\n returns:\n a torch sparse tensor of M\n \"\"\"\n\n M = M.tocoo().astype(np.float32)\n \n indices = torch.from_numpy(np.vstack((M.row, M.col))).long()\n values = torch.from_numpy(M.data)\n shape = torch.Size(M.shape)\n \n return torch.sparse.FloatTensor(indices, values, shape)\n\n\n\ndef normalise(M):\n \"\"\"\n row-normalise sparse matrix\n arguments:\n M: scipy sparse matrix\n returns:\n D^{-1} M \n where D is the diagonal node-degree matrix \n \"\"\"\n \n d = np.array(M.sum(1))\n \n di = np.power(d, -1).flatten()\n di[np.isinf(di)] = 0.\n DI = sp.diags(di) # D inverse i.e. D^{-1}\n \n return DI.dot(M)" ]
[ [ "numpy.vstack", "numpy.eye", "torch.Size", "numpy.isinf", "scipy.sparse.csr_matrix", "scipy.sparse.diags", "torch.from_numpy", "torch.sparse.FloatTensor", "numpy.power" ] ]
quantumiracle/robolite
[ "b3166a1c51a1118706177f4a4e7401e7c2c6c404" ]
[ "robosuite/models/grippers/panda_gripper.py" ]
[ "\"\"\"\nGripper for Franka's Panda (has two fingers).\n\"\"\"\nimport numpy as np\nfrom robosuite.utils.mjcf_utils import xml_path_completion\nfrom robosuite.models.grippers.gripper import Gripper\n\n\nclass PandaGripperBase(Gripper):\n \"\"\"\n Gripper for Franka's Panda (has two fingers).\n \"\"\"\n\n def __init__(self, path=None):\n if path:\n super().__init__(path)\n else:\n super().__init__(xml_path_completion(\"grippers/panda_gripper.xml\"))\n\n def format_action(self, action):\n return action\n\n @property\n def init_qpos(self):\n return np.array([0.020833, -0.020833])\n\n @property\n def joints(self):\n return [\"finger_joint1\", \"finger_joint2\"]\n\n @property\n def dof(self):\n return 2\n\n @property\n def visualization_sites(self):\n return [\"grip_site\", \"grip_site_cylinder\"]\n\n def contact_geoms(self):\n return [\n \"hand_collision\",\n \"finger1_collision\",\n \"finger2_collision\",\n \"finger1_tip_collision\",\n \"finger2_tip_collision\",\n ]\n\n @property\n def left_finger_geoms(self):\n return [\n \"finger1_tip_collision\",\n ]\n\n @property\n def right_finger_geoms(self):\n return [\n \"finger2_tip_collision\",\n ]\n\n\nclass PandaGripper(PandaGripperBase):\n \"\"\"\n Modifies PandaGripperBase to only take one action.\n \"\"\"\n\n def format_action(self, action):\n \"\"\"\n 1 => closed, -1 => open\n \"\"\"\n assert len(action) == 1\n return np.array([-1 * action[0], 1 * action[0]])\n\n @property\n def dof(self):\n return 1\n" ]
[ [ "numpy.array" ] ]
kalyanramu/WeedDetection
[ "ab8c94391a6faddccfa8760837745f9dcb20a8b8" ]
[ "train.py" ]
[ "#! /usr/bin/env python\n\nimport argparse\nimport os\nimport numpy as np\nimport json\nfrom voc import parse_voc_annotation\nfrom yolo import create_yolov3_model, dummy_loss\nfrom generator import BatchGenerator\nfrom utils.utils import normalize, evaluate, makedirs\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom keras.optimizers import Adam\nfrom callbacks import CustomModelCheckpoint, CustomTensorBoard\nfrom utils.multi_gpu_model import multi_gpu_model\nimport tensorflow as tf\nimport keras\nfrom keras.models import load_model\n\ndef create_training_instances(\n train_annot_folder,\n train_image_folder,\n train_cache,\n valid_annot_folder,\n valid_image_folder,\n valid_cache,\n labels,\n):\n # parse annotations of the training set\n train_ints, train_labels = parse_voc_annotation(train_annot_folder, train_image_folder, train_cache, labels)\n print(\"Train labels found: \", train_labels)\n # parse annotations of the validation set, if any, otherwise split the training set\n if os.path.exists(valid_annot_folder):\n valid_ints, valid_labels = parse_voc_annotation(valid_annot_folder, valid_image_folder, valid_cache, labels)\n else:\n print(\"valid_annot_folder not exists. Spliting the trainining set.\")\n\n train_valid_split = int(0.8*len(train_ints))\n np.random.seed(0)\n np.random.shuffle(train_ints)\n np.random.seed()\n\n valid_ints = train_ints[train_valid_split:]\n train_ints = train_ints[:train_valid_split]\n\n # compare the seen labels with the given labels in config.json\n if len(labels) > 0:\n overlap_labels = set(labels).intersection(set(train_labels.keys()))\n\n print('Seen labels: \\t' + str(train_labels) + '\\n')\n print('Given labels: \\t' + str(labels))\n\n # return None, None, None if some given label is not in the dataset\n if len(overlap_labels) < len(labels):\n print('Some labels have no annotations! Please revise the list of labels in the config.json.')\n return None, None, None\n else:\n print('No labels are provided. Train on all seen labels.')\n print(train_labels)\n labels = train_labels.keys()\n\n max_box_per_image = max([len(inst['object']) for inst in (train_ints + valid_ints)])\n\n return train_ints, valid_ints, sorted(labels), max_box_per_image\n\ndef create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):\n makedirs(tensorboard_logs)\n \n early_stop = EarlyStopping(\n monitor = 'loss', \n min_delta = 0.01, \n patience = 5, \n mode = 'min', \n verbose = 1\n )\n checkpoint = CustomModelCheckpoint(\n model_to_save = model_to_save,\n filepath = saved_weights_name,# + '{epoch:02d}.h5', \n monitor = 'loss', \n verbose = 1, \n save_best_only = True, \n mode = 'min', \n period = 1\n )\n reduce_on_plateau = ReduceLROnPlateau(\n monitor = 'loss',\n factor = 0.1,\n patience = 2,\n verbose = 1,\n mode = 'min',\n epsilon = 0.01,\n cooldown = 0,\n min_lr = 0\n )\n tensorboard = CustomTensorBoard(\n log_dir = tensorboard_logs,\n write_graph = True,\n write_images = True,\n ) \n return [early_stop, checkpoint, reduce_on_plateau, tensorboard]\n\ndef create_model(\n nb_class, \n anchors, \n max_box_per_image, \n max_grid, batch_size, \n warmup_batches, \n ignore_thresh, \n multi_gpu, \n saved_weights_name, \n lr,\n grid_scales,\n obj_scale,\n noobj_scale,\n xywh_scale,\n class_scale \n):\n if multi_gpu > 1:\n with tf.device('/cpu:0'):\n template_model, infer_model = create_yolov3_model(\n nb_class = nb_class, \n anchors = anchors, \n max_box_per_image = max_box_per_image, \n max_grid = max_grid, \n batch_size = batch_size//multi_gpu, \n warmup_batches = warmup_batches,\n ignore_thresh = ignore_thresh,\n grid_scales = grid_scales,\n obj_scale = obj_scale,\n noobj_scale = noobj_scale,\n xywh_scale = xywh_scale,\n class_scale = class_scale\n )\n else:\n template_model, infer_model = create_yolov3_model(\n nb_class = nb_class, \n anchors = anchors, \n max_box_per_image = max_box_per_image, \n max_grid = max_grid, \n batch_size = batch_size, \n warmup_batches = warmup_batches,\n ignore_thresh = ignore_thresh,\n grid_scales = grid_scales,\n obj_scale = obj_scale,\n noobj_scale = noobj_scale,\n xywh_scale = xywh_scale,\n class_scale = class_scale\n ) \n\n # load the pretrained weight if exists, otherwise load the backend weight only\n if os.path.exists(saved_weights_name): \n print(\"\\nLoading pretrained weights.\\n\")\n template_model.load_weights(saved_weights_name)\n else:\n template_model.load_weights(\"backend.h5\", by_name=True) \n\n if multi_gpu > 1:\n train_model = multi_gpu_model(template_model, gpus=multi_gpu)\n else:\n train_model = template_model \n\n optimizer = Adam(lr=lr, clipnorm=0.001)\n train_model.compile(loss=dummy_loss, optimizer=optimizer) \n\n return train_model, infer_model\n\ndef _main_(args):\n config_path = args.conf\n\n with open(config_path) as config_buffer: \n config = json.loads(config_buffer.read())\n\n ###############################\n # Parse the annotations \n ###############################\n train_ints, valid_ints, labels, max_box_per_image = create_training_instances(\n config['train']['train_annot_folder'],\n config['train']['train_image_folder'],\n config['train']['cache_name'],\n config['valid']['valid_annot_folder'],\n config['valid']['valid_image_folder'],\n config['valid']['cache_name'],\n config['model']['labels']\n )\n print('\\nTraining on: \\t' + str(labels) + '\\n')\n\n ###############################\n # Create the generators \n ############################### \n train_generator = BatchGenerator(\n instances = train_ints, \n anchors = config['model']['anchors'], \n labels = labels, \n downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3\n max_box_per_image = max_box_per_image,\n batch_size = config['train']['batch_size'],\n min_net_size = config['model']['min_input_size'],\n max_net_size = config['model']['max_input_size'], \n shuffle = True, \n jitter = 0.1, #0.3 default\n norm = normalize\n )\n \n valid_generator = BatchGenerator(\n instances = valid_ints, \n anchors = config['model']['anchors'], \n labels = labels, \n downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3\n max_box_per_image = max_box_per_image,\n batch_size = config['train']['batch_size'],\n min_net_size = config['model']['min_input_size'],\n max_net_size = config['model']['max_input_size'], \n shuffle = True, \n jitter = 0.0, \n norm = normalize\n )\n\n ###############################\n # Create the model \n ###############################\n if os.path.exists(config['train']['saved_weights_name']): \n config['train']['warmup_epochs'] = 0\n warmup_batches = config['train']['warmup_epochs'] * (config['train']['train_times']*len(train_generator)) \n\n os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']\n multi_gpu = len(config['train']['gpus'].split(','))\n\n train_model, infer_model = create_model(\n nb_class = len(labels), \n anchors = config['model']['anchors'], \n max_box_per_image = max_box_per_image, \n max_grid = [config['model']['max_input_size'], config['model']['max_input_size']], \n batch_size = config['train']['batch_size'], \n warmup_batches = warmup_batches,\n ignore_thresh = config['train']['ignore_thresh'],\n multi_gpu = multi_gpu,\n saved_weights_name = config['train']['saved_weights_name'],\n lr = config['train']['learning_rate'],\n grid_scales = config['train']['grid_scales'],\n obj_scale = config['train']['obj_scale'],\n noobj_scale = config['train']['noobj_scale'],\n xywh_scale = config['train']['xywh_scale'],\n class_scale = config['train']['class_scale'],\n )\n\n ###############################\n # Kick off the training\n ###############################\n callbacks = create_callbacks(config['train']['saved_weights_name'], config['train']['tensorboard_dir'], infer_model)\n\n train_model.fit_generator(\n generator = train_generator, \n steps_per_epoch = len(train_generator) * config['train']['train_times'], \n epochs = config['train']['nb_epochs'] + config['train']['warmup_epochs'], \n verbose = 2 if config['train']['debug'] else 1,\n callbacks = callbacks, \n workers = 4,\n max_queue_size = 4\n )\n\n # make a GPU version of infer_model for evaluation\n if multi_gpu > 1:\n infer_model = load_model(config['train']['saved_weights_name'])\n\n ###############################\n # Run the evaluation\n ############################### \n # compute mAP for all the classes\n average_precisions = evaluate(infer_model, valid_generator)\n\n # print the score\n for label, average_precision in average_precisions.items():\n print(labels[label] + ': {:.4f}'.format(average_precision))\n print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions))) \n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='train and evaluate YOLO_v3 model on any dataset')\n argparser.add_argument('-c', '--conf', help='path to configuration file') \n\n args = argparser.parse_args()\n _main_(args)\n" ]
[ [ "tensorflow.device", "numpy.random.shuffle", "numpy.random.seed" ] ]
plarr2020-team1/mannequinchallenge
[ "4aff68aedad8619a2ec557f9162cc9692181318c" ]
[ "mannequinchallenge/infer.py" ]
[ "import torch\nimport numpy as np\nfrom mannequinchallenge.options.train_options import TrainOptions\nfrom mannequinchallenge.loaders import aligned_data_loader\nfrom mannequinchallenge.models import pix2pix_model\n\nmodel = None\n\nclass DictX(dict):\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError as k:\n raise AttributeError(k)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __delattr__(self, key):\n try:\n del self[key]\n except KeyError as k:\n raise AttributeError(k)\n\n def __repr__(self):\n return '<DictX ' + dict.__repr__(self) + '>'\n\nopt = DictX({\n 'input': 'single_view',\n \"simple_keypoints\": 0,\n \"mode\": \"Ours_Bilinear\",\n \"human_data_term\": 0,\n 'batchSize': 8,\n 'loadSize': 286,\n 'fineSize': 256,\n 'output_nc': 3,\n 'ngf': 64,\n 'ndf': 64,\n 'which_model_netG': 'unet_256',\n 'gpu_ids': '0,1,2,3',\n 'name': 'test_local',\n 'model': 'pix2pix',\n 'nThreads': 2,\n 'checkpoints_dir': './monoculardepth/mannequinchallenge/checkpoints/',\n 'norm': 'instance',\n 'display_winsize': 256,\n 'display_id': 1, \n 'identity': 0,\n 'max_dataset_size': float(\"inf\"),\n 'display_freq': 100,\n 'print_freq': 100,\n 'save_latest_freq': 5000,\n 'save_epoch_freq': 5,\n 'phase': 'train',\n 'which_epoch': 'latest',\n 'niter': 100,\n 'niter_decay': 100,\n 'lr_decay_epoch': 8,\n 'lr_policy': 'step',\n 'beta1': 0.5,\n 'lr': 0.0004,\n 'lambda_A': 10.0,\n 'lambda_B': 10.0,\n 'pool_size': 50,\n 'isTrain': False\n})\n\ndef infer_depth(img):\n global model\n BATCH_SIZE = 1\n\n # opt = TrainOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch\n\n video_data_loader = aligned_data_loader.PLARRDataLoader(img, BATCH_SIZE)\n video_dataset = video_data_loader.load_data()\n\n if model == None:\n model = pix2pix_model.Pix2PixModel(opt)\n model.switch_to_eval()\n\n if torch.cuda.is_available():\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\n for i, data in enumerate(video_dataset):\n stacked_img = data[0]\n disp_img = model.run_PLARR(stacked_img)\n disp_img = disp_img.resize(img.size)\n disp_array = np.array(disp_img)\n return disp_array, disp_img" ]
[ [ "numpy.array", "torch.cuda.is_available" ] ]
KISMED-TUDa/ECG_Classification
[ "7df7b6d28287f592536cdbf01b6aec73e7b045ef" ]
[ "scripts/spectrogram_example.py" ]
[ "from scipy.signal import spectrogram\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torchaudio\n\nfrom wettbewerb import load_references\n\nif __name__ == '__main__':\n ecg_leads = load_references(\"../data/training/\")[0]\n for ecg_lead_ in ecg_leads:\n if ecg_lead_.shape[0] == 18000:\n ecg_lead = torch.from_numpy(ecg_lead_).float()\n break\n print(ecg_lead.shape)\n s = torchaudio.transforms.Spectrogram(n_fft=64, win_length=64, hop_length=32, power=2, normalized=False)(ecg_lead)\n s = torch.log(s.clamp(min=1e-08))\n print(s.shape)\n plt.imshow(s, aspect=\"auto\")\n plt.show()\n print(s.shape)\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "torch.from_numpy" ] ]
nstfk/SentEval
[ "2bd42ae700fcfc4fb11b0ad55988ac95742d5334" ]
[ "examples/infersent.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n\"\"\"\nInferSent models. See https://github.com/facebookresearch/InferSent.\n\"\"\"\n\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport sys\nimport os\nimport torch\nimport logging\nimport sys\n\nprint(sys.argv[1:])\n# get models.py from InferSent repo\nfrom models import InferSent\n\n# Set up logger\n\nlogging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)\nlogging.info(\"\\n\\n\\nPATH_TO_DATA: \" + str(sys.argv[1]) + \"\\nPATH_TO_W2V: \" + str(sys.argv[2]) + \"\\nMODEL_PATH: \" + str(\n sys.argv[3]) + \"\\n\\n\")\n\n# Set PATHs\nPATH_SENTEVAL = '../'\nPATH_TO_DATA = sys.argv[1] # '../data'\nPATH_TO_W2V = sys.argv[2] # 'fasttext/crawl-300d-2M.vec'# 'glove/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2\nMODEL_PATH = sys.argv[3] # 'infersent2.pkl'\nV = int(sys.argv[4]) # 2 # version of InferSent\n\nassert os.path.isfile(MODEL_PATH) and os.path.isfile(PATH_TO_W2V), \\\n 'Set MODEL and GloVe PATHs'\n\n# define senteval params\nparams_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}\n\nif (len(sys.argv)>5):\n nhid = int(sys.argv[5])\nelse:\n nhid=0\n\n#params_senteval['classifier'] = {'nhid':nhid , 'optim': 'rmsprop', 'batch_size': 128,'tenacity': 3, 'epoch_size': 2}\nparams_senteval['classifier'] ={'nhid': 0, 'optim': 'adam','batch_size': 64, 'tenacity': 5,'epoch_size': 4}\n\n\n# import senteval\nsys.path.insert(0, PATH_SENTEVAL)\nimport senteval\n\n\ndef prepare(params, samples):\n params.infersent.build_vocab([' '.join(s) for s in samples], tokenize=False)\n\n\ndef batcher(params, batch):\n sentences = [' '.join(s) for s in batch]\n embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)\n return embeddings\n\n\n\"\"\"\nEvaluation of trained model on Transfer Tasks (SentEval)\n\"\"\"\n \nif __name__ == \"__main__\":\n # Load InferSent model\n params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,\n 'pool_type': 'max', 'dpout_model': 0.0, 'version': V}\n model = InferSent(params_model)\n model.load_state_dict(torch.load(MODEL_PATH))\n model.set_w2v_path(PATH_TO_W2V)\n\n params_senteval['infersent'] = model.cuda()\n\n se = senteval.engine.SE(params_senteval, batcher, prepare)\n transfer_tasks = ['MEDNLI','ClinicalSTS','BIOSSES','ClinicalSTS2']\n results = se.eval(transfer_tasks)\n print(results)\n" ]
[ [ "torch.load" ] ]
oscarkey/safe-exploration
[ "32f0582a7b54ab7d4c1d415afbcf5e9554e8bcec" ]
[ "safe_exploration/episode_runner.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 29 11:11:23 2017\n\n@author: tkoller\n\"\"\"\nimport time\nimport warnings\n\nimport numpy as np\nfrom scipy.spatial.qhull import ConvexHull\n\nfrom . import utils_ellipsoid\nfrom .safempc_cem import MpcResult\nfrom .sampling_models import MonteCarloSafetyVerification\nfrom .utils import generate_initial_samples, unavailable\nfrom .utils_config import create_solver, create_env\nfrom .utils_sacred import SacredAggregatedMetrics\n\ntry:\n import matplotlib.pyplot as plt\n _has_matplotlib = True\nexcept:\n _has_matplotlib = False\n\n\ndef run_episodic(conf, metrics: SacredAggregatedMetrics, visualize=False):\n \"\"\" Run episode setting \"\"\"\n\n warnings.warn(\"Need to check relative dynamics\")\n\n X_all = []\n y_all = []\n cc_all = []\n exit_codes_all = []\n safety_failure_all = []\n for k in range(conf.n_scenarios):\n\n env = create_env(conf, conf.env_name, conf.env_options)\n solver, safe_policy = create_solver(conf, env)\n\n solver.init_solver(conf.cost)\n if conf.init_mode is None:\n X = None\n y = None\n have_initial_samples = False\n else:\n X, y = generate_initial_samples(env, conf, conf.relative_dynamics, solver,\n safe_policy)\n if conf.plot_initial_samples:\n axes = plt.axes()\n plotted = env.plot_states(axes, [X[:, :env.n_s]], includes_initial_samples=True)\n if plotted:\n plt.show()\n solver.update_model(X, y, opt_hyp=True, reinitialize_solver=True, replace_old=False)\n metrics.save_array(X, f'initial_samples_{k}')\n have_initial_samples = True\n\n X_list = [X]\n y_list = [y]\n exit_codes_k = []\n safety_failure_k = []\n cc_k = []\n\n episode = 0\n total_steps = 0\n while total_steps < conf.total_steps:\n print(f'Starting episode {episode+1} in scenario {k+1}/{conf.n_scenarios} '\n f'after {total_steps}/{conf.total_steps} steps')\n\n # If we have nearly reached the maximum number of desired steps, restrict the episode length.\n max_episode_steps = min(conf.n_steps, conf.total_steps - total_steps)\n\n xx, yy, cc, exit_codes_i, safety_failure = do_rollout(\n env, max_episode_steps, scenario_id=k, episode_id=episode, metrics=metrics,\n cost=conf.rl_immediate_cost,\n solver=solver,\n plot_ellipsoids=conf.plot_ellipsoids,\n plot_trajectory=conf.plot_trajectory,\n save_plots_to_sacred=conf.save_plots_to_sacred,\n plot_episode_trajectory=conf.plot_episode_trajectory,\n render=conf.render,\n obs_frequency=conf.obs_frequency)\n\n if X is None:\n X = xx\n y = yy\n else:\n X = np.vstack((X, xx))\n y = np.vstack((y, yy))\n\n X_list += [xx]\n y_list += [yy]\n cc_k += [cc]\n exit_codes_k += [exit_codes_i]\n safety_failure_k += [safety_failure]\n\n metrics.save_array(xx, f'states_actions_{k}_{episode}')\n\n if have_initial_samples:\n states_excl_initial_samples = np.vstack(X_list[1:])[:, :env.n_s]\n else:\n states_excl_initial_samples = np.vstack(X_list)[:, :env.n_s]\n metrics.log_scalar('sample_variance', states_excl_initial_samples.var(), episode)\n if states_excl_initial_samples.shape[0] >= 3:\n sample_volume = ConvexHull(states_excl_initial_samples).volume\n else:\n sample_volume = 0.\n metrics.log_scalar('sample_volume', sample_volume, episode)\n\n if conf.plot_states:\n axes = plt.axes()\n states = [x[:, :env.n_s] for x in X_list]\n plotted = env.plot_states(axes, states, have_initial_samples)\n if plotted:\n if conf.save_plots_to_sacred:\n metrics.save_figure(plt.gcf(), f'training_points_{k}_{episode}')\n plt.clf()\n else:\n plt.show()\n\n training_start_time = time.time()\n solver.update_model(X, y, opt_hyp=conf.opt_hyp, reinitialize_solver=True)\n training_end_time = time.time()\n\n metrics.log_scalar('training_time', training_end_time - training_start_time, episode)\n metrics.log_scalar('num_samples', X.shape[0], episode)\n\n # Returned states does not include initial state (why?).\n total_steps += xx.shape[0] + 1\n episode += 1\n\n exit_codes_all += [exit_codes_k]\n safety_failure_all += [safety_failure_k]\n cc_all += [cc_k]\n X_all += [X_list]\n y_all += [y_list]\n\n metrics.flush()\n\n if not conf.data_savepath is None:\n savepath_data = \"{}/{}\".format(conf.save_path, conf.data_savepath)\n a, b = solver.lin_model\n np.savez(savepath_data, X=X, y=y, a=a, b=b, init_mode=conf.init_mode)\n\n if conf.save_results:\n save_name_results = conf.save_name_results\n if save_name_results is None:\n save_name_results = \"results_episode\"\n\n savepath_results = conf.save_path + \"/\" + save_name_results\n\n results_dict = dict()\n results_dict[\"cc_all\"] = cc_all\n results_dict[\"X_all\"] = X_all\n results_dict[\"y_all\"] = y_all\n results_dict[\"exit_codes\"] = exit_codes_all\n results_dict[\"safety_failure_all\"] = safety_failure_all\n\n np.save(savepath_results, results_dict)\n\n # TO-DO: may wanna do this aswell\n # gp_dict = gp.to_dict()\n # save_data_gp_path = \"{}/res_gp\".format(save_path)\n # np.save(save_data_gp_path,gp_dict)\n\n\n@unavailable(not _has_matplotlib, \"matplotlib\", conditionals=[\"plot_ellipsoids,plot_trajectory\"])\ndef do_rollout(env, n_steps, scenario_id: int, episode_id: int, metrics: SacredAggregatedMetrics, solver=None,\n cost=None,\n plot_trajectory=True, save_plots_to_sacred=False,\n verbosity=1, sampling_verification=False,\n plot_ellipsoids=False, plot_episode_trajectory=False, render=False,\n check_system_safety=False, savedir_trajectory_plots=None, mean=None,\n std=None, obs_frequency=1):\n \"\"\" Perform a rollout on the system\n\n \"\"\"\n\n state = env.reset(mean, std)\n\n xx = np.zeros((1, env.n_s + env.n_u))\n yy = np.zeros((1, env.n_s))\n exit_codes = np.zeros((1, 1))\n obs = state\n\n cc = []\n n_successful = 0\n mpc_results = []\n total_time_in_solver = 0\n env_result = -1\n safety_failure = False\n if plot_trajectory:\n fig, ax = env.plot_safety_bounds()\n\n ell = None\n\n if sampling_verification:\n gp = solver.gp\n sampler = MonteCarloSafetyVerification(gp)\n\n if check_system_safety:\n n_inside = 0\n n_test_safety = 0\n\n for i in range(n_steps):\n p_traj = None\n q_traj = None\n k_fb = None\n k_ff = None\n\n if solver is None:\n action = env.random_action()\n exit_code = 5\n else:\n t_start_solver = time.time()\n action, mpc_result = solver.get_action(state) # ,lqr_only = True)\n t_end_solver = time.time()\n\n t_solver = t_end_solver - t_start_solver\n total_time_in_solver += t_solver\n\n exit_code = 1 if mpc_result in (MpcResult.FOUND_SOLUTION, MpcResult.PREVIOUS_SOLUTION) else 0\n mpc_results.append(mpc_result)\n\n if verbosity > 0:\n print((\"total time solver in ms: {}\".format(t_solver)))\n\n action, next_state, observation, done, env_result = env.step(action)\n if not cost is None:\n c = [cost(next_state)]\n cc += c\n if verbosity > 0:\n print((\"Immediate cost for current step: {}\".format(c)))\n if verbosity > 0:\n print((\"\\n==== Applied normalized action at time step {} ====\".format(i)))\n print(action)\n print(\"\\n==== Next state (normalized) ====\")\n print(next_state)\n print(\"==========================\\n\")\n if render:\n env.render()\n\n # Plot the trajectory planned by the MPC solver\n if plot_trajectory:\n if not solver is None and plot_ellipsoids and solver.has_openloop:\n p_traj, q_traj, k_fb, k_ff = solver.get_trajectory_openloop(\n state, get_controls=True)\n\n if not ell is None:\n for j in range(len(ell)):\n ell[j].remove()\n ax, ell = env.plot_ellipsoid_trajectory(p_traj, q_traj, ax=ax,\n color=\"r\")\n fig.canvas.draw()\n # plt.draw()\n\n plt.show(block=False)\n plt.pause(0.5)\n ax = env.plot_state(ax)\n fig.canvas.draw()\n plt.show(block=False)\n plt.pause(0.2)\n if not savedir_trajectory_plots is None:\n save_name = \"img_step_{}.png\".format(i)\n save_path = \"{}/{}\".format(savedir_trajectory_plots, save_name)\n plt.savefig(save_path)\n\n # Verify whether the GP distribution is inside the ellipsoid over multiple\n # steps via sampling\n if sampling_verification:\n if p_traj is None:\n p_traj, q_traj, k_fb, k_ff = solver.get_trajectory_openloop(\n state,\n get_controls=True)\n\n _, s_all = sampler.sample_n_step(state[:, None], k_fb, k_ff, p_traj,\n n_samples=300)\n safety_ratio, _ = sampler.inside_ellipsoid_ratio(s_all, q_traj, p_traj)\n if verbosity > 0:\n print((\"\\n==== GP samples inside Safety Ellipsoids (time step {}) \"\n \"====\".format(i)))\n print(safety_ratio)\n print(\"==========================\\n\")\n\n # check if the true system is inside the one-step ellipsoid by checking if the\n # next state is inside p,q ellipsoid\n if not solver is None:\n if check_system_safety:\n if p_traj is None:\n p_traj, q_traj, k_fb, k_ff = solver.get_trajectory_openloop(\n state,\n get_controls=True)\n bool_inside = utils_ellipsoid.sample_inside_ellipsoid(\n next_state, p_traj[0, :, None], q_traj[0])\n\n n_test_safety += 1\n if bool_inside:\n n_inside += 1\n if verbosity > 0:\n print((\n \"\\n==== Next state inside uncertainty ellipsoid:{}\"\n \" ====\\n\".format(bool_inside)))\n\n state_action = np.hstack((state, action))\n xx = np.vstack((xx, state_action))\n yy = np.vstack((yy, observation))\n\n exit_codes = np.vstack((exit_codes, exit_code))\n n_successful += 1\n state = next_state\n if done:\n safety_failure = True\n break\n\n metrics.log_scalar('episode_length', n_successful, episode_id)\n metrics.log_scalar('mpc_found_solution_count', mpc_results.count(MpcResult.FOUND_SOLUTION), episode_id)\n metrics.log_scalar('mpc_previous_solution_count', mpc_results.count(MpcResult.PREVIOUS_SOLUTION), episode_id)\n metrics.log_scalar('safe_controller_fallback_count', mpc_results.count(MpcResult.SAFE_CONTROLLER), episode_id)\n metrics.log_scalar('mean_time_in_solver', float(total_time_in_solver) / n_successful, episode_id)\n metrics.log_scalar('env_result', env_result, episode_id)\n metrics.log_non_scalars(env.collect_metrics(), episode_id)\n if solver is not None:\n metrics.log_non_scalars(solver.collect_metrics(), episode_id)\n\n if plot_episode_trajectory:\n axes = plt.axes()\n plotted = env.plot_current_trajectory(axes)\n if plotted:\n if save_plots_to_sacred:\n metrics.save_figure(plt.gcf(), f'trajectories_{scenario_id}_{episode_id}')\n plt.clf()\n else:\n plt.show()\n\n if n_successful == 0:\n warnings.warn(\"Agent survived 0 steps, cannot collect data\")\n xx = []\n yy = []\n exit_codes = []\n cc = []\n else:\n xx = xx[1:-1:obs_frequency, :]\n yy = yy[1:-1:obs_frequency, :]\n exit_codes = exit_codes[1:, :]\n\n print((\"Agent survived {} steps\".format(n_successful)))\n if verbosity > 0:\n print(\"========== State/Action Trajectory ===========\")\n print(xx)\n if check_system_safety and n_test_safety > 0:\n print(\"\\n======= percentage system steps inside safety bounds =======\")\n print((float(n_inside) / n_test_safety))\n return xx, yy, cc, exit_codes, safety_failure\n" ]
[ [ "numpy.vstack", "numpy.save", "matplotlib.pyplot.pause", "scipy.spatial.qhull.ConvexHull", "numpy.zeros", "matplotlib.pyplot.savefig", "numpy.savez", "matplotlib.pyplot.gcf", "matplotlib.pyplot.clf", "matplotlib.pyplot.axes", "numpy.hstack", "matplotlib.pyplot.show" ] ]
norton-chris/MARS-Net
[ "6f671837d0629422680c78adf9b643894debae70" ]
[ "models/debug_utils.py" ]
[ "'''\r\nAuthor Junbong Jang\r\nDate 9/2/2020\r\n\r\nContains debugging functions useful for deep learning research\r\n\r\n'''\r\nimport sys\r\nsys.path.append('..')\r\nsys.path.append('../data_handle')\r\nfrom UserParams import UserParams\r\nfrom data_processor import get_std_mean_from_images\r\n\r\nimport math\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport h5py\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import backend as K\r\nfrom functools import wraps\r\n\r\n\r\ndef log_function_call(a_func):\r\n \"\"\"decorator to log which function was called\"\"\"\r\n @wraps(a_func)\r\n def wrapTheFunction(*args, **kwargs):\r\n print(a_func.__name__, \"is called @@@@@@@@@@@@@@@@@@\")\r\n return a_func(*args, **kwargs)\r\n\r\n return wrapTheFunction\r\n\r\n\r\ndef check_loaded_weights(weights_path):\r\n loaded_weights = h5py.File(weights_path)\r\n allKeys = list(loaded_weights.keys())\r\n\r\n print('check_loaded_weights')\r\n print(loaded_weights[allKeys[0]].name)\r\n for hi in loaded_weights[allKeys[0]]:\r\n print(hi)\r\n\r\n print()\r\n for hi in loaded_weights[allKeys[0]]['functional_1']:\r\n print(hi)\r\n print()\r\n # print(loaded_weights[allKeys[0]]['conv1_1_W'])\r\n # print(loaded_weights[allKeys[0]]['conv1_1_W'][:])\r\n\r\n\r\ndef get_flops():\r\n # https://stackoverflow.com/questions/49525776/how-to-calculate-a-mobilenet-flops-in-keras\r\n # https://github.com/tensorflow/tensorflow/issues/32809\r\n session = tf.compat.v1.Session()\r\n graph = tf.compat.v1.get_default_graph()\r\n\r\n with graph.as_default():\r\n with session.as_default():\r\n run_meta = tf.compat.v1.RunMetadata()\r\n opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()\r\n\r\n # We use the Keras session graph in the call to the profiler.\r\n flops = tf.compat.v1.profiler.profile(graph=graph, run_meta=run_meta, cmd='op', options=opts)\r\n\r\n return flops.total_float_ops # Prints the \"flops\" of the model.\r\n\r\n\r\n# For training set, check if all the files have raw image and labeling\r\ndef check_namespace(img_path, msk_path, img_format):\r\n valid_list = []\r\n for file in self.namespace:\r\n if os.path.isfile(img_path + file) and os.path.isfile(msk_path + file) and file.endswith(img_format):\r\n valid_list.append(file)\r\n\r\n return valid_list\r\n\r\n\r\ndef calc_receptive_field(n_in, jump_in, r_in, center_in, orig_img_size, padding, kernel_size, stride):\r\n # refered to https://medium.com/mlreview/a-guide-to-receptive-field-arithmetic-for-convolutional-neural-networks-e0f514068807\r\n n_out = math.floor((n_in + 2*padding - kernel_size)/stride) + 1 # number of features in one dimension\r\n jump_out = jump_in * stride # distance between two consecutive feature\r\n r_out = r_in + (kernel_size - 1) * jump_in # receptive field\r\n\r\n # center coordinate of the first or last feature\r\n if center_in > orig_img_size / 2:\r\n center_out = center_in - ((kernel_size-1)/2.0 - padding) * jump_in\r\n elif center_in < orig_img_size / 2:\r\n center_out = center_in + ((kernel_size-1)/2.0 - padding) * jump_in\r\n else:\r\n center_out = center_in\r\n return n_out, jump_out, r_out, center_out\r\n\r\n\r\ndef calc_receptive_field_demo():\r\n r_in = 1\r\n jump_in = 1\r\n n_in = 128\r\n orig_img_size = n_in\r\n center_in = 0.5\r\n for layer_type in ['conv', 'conv', 'maxpool',\r\n 'conv', 'conv', 'maxpool',\r\n 'conv', 'conv', 'conv', 'conv', 'maxpool',\r\n 'conv', 'conv', 'conv', 'conv', 'maxpool',\r\n 'conv', 'conv', 'conv', 'conv']:\r\n if layer_type == 'conv':\r\n kernel_size = 3\r\n stride = 1\r\n padding = 1\r\n elif layer_type == 'maxpool':\r\n kernel_size = 2\r\n stride = 2\r\n padding = 0\r\n\r\n n_in, jump_in, r_in, center_in = calc_receptive_field(n_in, jump_in, r_in, center_in, orig_img_size, padding,\r\n kernel_size, stride)\r\n print(layer_type, 'n:', n_in, ' jump:', jump_in, ' r:', r_in, ' center_in:', center_in)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # test gpu for tensorflow\r\n # from tensorflow.python.client import device_lib\r\n # print(device_lib.list_local_devices())\r\n\r\n # --------------------------------------------------\r\n # test numpy\r\n first_array = np.asarray([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]])\r\n second_array = np.asarray([[1, 5, 5], [6, 6, 2]])\r\n\r\n print(first_array, first_array.shape)\r\n first_array[1, second_array > 4] = 5\r\n print(first_array)\r\n # new_first_array = np.moveaxis(first_array, 0, -1)\r\n # print(new_first_array, new_first_array.shape)\r\n\r\n # --------------------------------------------------\r\n # save_path = 'results/debugger/'\r\n # constants = UserParams('predict')\r\n # frame = constants.frame_list[0]\r\n # dataset_name = constants.dataset[0]\r\n #\r\n # temp_data = np.load(constants.get_crop_path() + dataset_name + '_' + str(frame) + '_split0_train_mask.npz')\r\n # temp_img = temp_data['arr_0']\r\n # temp_mask = temp_data['arr_1']\r\n #\r\n # print(temp_img.shape, temp_mask.shape)\r\n # show_cropped_image(temp_img, temp_mask, dataset_name, save_path)\r\n\r\n\r\n" ]
[ [ "tensorflow.compat.v1.profiler.profile", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation", "numpy.asarray", "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.RunMetadata" ] ]
grasswolfs/Paddle
[ "0c2fff447c7d5b0bbad473a1590872c5343e1e56" ]
[ "python/paddle/fluid/tests/unittests/dist_fleet_ctr.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDistribute CTR model for test fleet api\n\"\"\"\n\nfrom __future__ import print_function\n\nimport shutil\nimport tempfile\nimport time\n\nimport paddle\nimport paddle.fluid as fluid\nimport os\nimport numpy as np\n\nimport ctr_dataset_reader\nfrom test_dist_fleet_base import runtime_main, FleetDistRunnerBase\n\n# Fix seed for test\nfluid.default_startup_program().random_seed = 1\nfluid.default_main_program().random_seed = 1\n\n\nclass TestDistCTR2x2(FleetDistRunnerBase):\n \"\"\"\n For test CTR model, using Fleet api\n \"\"\"\n\n def net(self, batch_size=4, lr=0.01):\n \"\"\"\n network definition\n\n Args:\n batch_size(int): the size of mini-batch for training\n lr(float): learning rate of training\n Returns:\n avg_cost: LoDTensor of cost.\n \"\"\"\n dnn_input_dim, lr_input_dim, train_file_path = ctr_dataset_reader.prepare_data(\n )\n dnn_data = fluid.layers.data(\n name=\"dnn_data\",\n shape=[-1, 1],\n dtype=\"int64\",\n lod_level=1,\n append_batch_size=False)\n lr_data = fluid.layers.data(\n name=\"lr_data\",\n shape=[-1, 1],\n dtype=\"int64\",\n lod_level=1,\n append_batch_size=False)\n label = fluid.layers.data(\n name=\"click\",\n shape=[-1, 1],\n dtype=\"int64\",\n lod_level=0,\n append_batch_size=False)\n\n datas = [dnn_data, lr_data, label]\n\n # build dnn model\n dnn_layer_dims = [128, 128, 64, 32, 1]\n dnn_embedding = fluid.layers.embedding(\n is_distributed=False,\n input=dnn_data,\n size=[dnn_input_dim, dnn_layer_dims[0]],\n param_attr=fluid.ParamAttr(\n name=\"deep_embedding\",\n initializer=fluid.initializer.Constant(value=0.01)),\n is_sparse=True)\n dnn_pool = fluid.layers.sequence_pool(\n input=dnn_embedding, pool_type=\"sum\")\n dnn_out = dnn_pool\n for i, dim in enumerate(dnn_layer_dims[1:]):\n fc = fluid.layers.fc(\n input=dnn_out,\n size=dim,\n act=\"relu\",\n param_attr=fluid.ParamAttr(\n initializer=fluid.initializer.Constant(value=0.01)),\n name='dnn-fc-%d' % i)\n dnn_out = fc\n\n # build lr model\n lr_embbding = fluid.layers.embedding(\n is_distributed=False,\n input=lr_data,\n size=[lr_input_dim, 1],\n param_attr=fluid.ParamAttr(\n name=\"wide_embedding\",\n initializer=fluid.initializer.Constant(value=0.01)),\n is_sparse=True)\n lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type=\"sum\")\n\n merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)\n\n predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')\n acc = fluid.layers.accuracy(input=predict, label=label)\n auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict,\n label=label)\n cost = fluid.layers.cross_entropy(input=predict, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n\n self.feeds = datas\n self.train_file_path = train_file_path\n self.avg_cost = avg_cost\n self.predict = predict\n\n return avg_cost\n\n def check_model_right(self, dirname):\n model_filename = os.path.join(dirname, \"__model__\")\n\n with open(model_filename, \"rb\") as f:\n program_desc_str = f.read()\n\n program = fluid.Program.parse_from_string(program_desc_str)\n with open(os.path.join(dirname, \"__model__.proto\"), \"w\") as wn:\n wn.write(str(program))\n\n def do_pyreader_training(self, fleet):\n \"\"\"\n do training using dataset, using fetch handler to catch variable\n Args:\n fleet(Fleet api): the fleet object of Parameter Server, define distribute training role\n \"\"\"\n dnn_input_dim, lr_input_dim, train_file_path = ctr_dataset_reader.prepare_data(\n )\n\n exe = fluid.Executor(fluid.CPUPlace())\n\n fleet.init_worker()\n exe.run(fleet.startup_program)\n\n thread_num = 2\n batch_size = 128\n filelist = []\n for _ in range(thread_num):\n filelist.append(train_file_path)\n\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n ctr_dataset_reader.CtrReader()._reader_creator(filelist),\n buf_size=batch_size * 100),\n batch_size=batch_size)\n self.reader.decorate_sample_list_generator(train_reader)\n\n compiled_prog = fluid.compiler.CompiledProgram(\n fleet.main_program).with_data_parallel(\n loss_name=self.avg_cost.name,\n build_strategy=self.strategy.get_build_strategy(),\n exec_strategy=self.strategy.get_execute_strategy())\n\n for epoch_id in range(1):\n self.reader.start()\n try:\n pass_start = time.time()\n while True:\n loss_val = exe.run(program=compiled_prog,\n fetch_list=[self.avg_cost.name])\n loss_val = np.mean(loss_val)\n print(\"TRAIN ---> pass: {} loss: {}\\n\".format(epoch_id,\n loss_val))\n pass_time = time.time() - pass_start\n except fluid.core.EOFException:\n self.reader.reset()\n\n model_dir = tempfile.mkdtemp()\n fleet.save_inference_model(\n exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost)\n self.check_model_right(model_dir)\n shutil.rmtree(model_dir)\n fleet.stop_worker()\n\n def do_dataset_training(self, fleet):\n dnn_input_dim, lr_input_dim, train_file_path = ctr_dataset_reader.prepare_data(\n )\n\n exe = fluid.Executor(fluid.CPUPlace())\n\n fleet.init_worker()\n exe.run(fleet.startup_program)\n\n thread_num = 2\n batch_size = 128\n filelist = []\n for _ in range(thread_num):\n filelist.append(train_file_path)\n\n # config dataset\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_batch_size(batch_size)\n dataset.set_use_var(self.feeds)\n pipe_command = 'python ctr_dataset_reader.py'\n dataset.set_pipe_command(pipe_command)\n\n dataset.set_filelist(filelist)\n dataset.set_thread(thread_num)\n\n for epoch_id in range(1):\n pass_start = time.time()\n dataset.set_filelist(filelist)\n exe.train_from_dataset(\n program=fleet.main_program,\n dataset=dataset,\n fetch_list=[self.avg_cost],\n fetch_info=[\"cost\"],\n print_period=2,\n debug=False)\n pass_time = time.time() - pass_start\n\n res_dict = dict()\n res_dict['loss'] = self.avg_cost\n\n class FH(fluid.executor.FetchHandler):\n def handle(self, res_dict):\n for key in res_dict:\n v = res_dict[key]\n print(\"{}: \\n {}\\n\".format(key, v))\n\n for epoch_id in range(1):\n pass_start = time.time()\n dataset.set_filelist(filelist)\n exe.train_from_dataset(\n program=fleet.main_program,\n dataset=dataset,\n fetch_handler=FH(var_dict=res_dict, period_secs=2),\n debug=False)\n pass_time = time.time() - pass_start\n\n model_dir = tempfile.mkdtemp()\n fleet.save_inference_model(\n exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost)\n self.check_model_right(model_dir)\n shutil.rmtree(model_dir)\n fleet.stop_worker()\n\n\nif __name__ == \"__main__\":\n runtime_main(TestDistCTR2x2)\n" ]
[ [ "numpy.mean" ] ]
launis/areadata
[ "8cf0e30ec489ce9655fcd9829284d1ec70e7360d" ]
[ "create_neuro_prediction.py" ]
[ "def plot_history(hist):\r\n import matplotlib.pyplot as plt\r\n\r\n plt.figure()\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Mean Squared Error')\r\n plt.plot(hist['epoch'], hist['mean_squared_error'],\r\n label='Train Error')\r\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\r\n label = 'Val Error')\r\n plt.legend()\r\n plt.show()\r\n \r\n \r\ndef create_neuro_prediction(train, test, target, mainpath, numeric_features=[], categorical_features=[], scaled=True, test_size = 0.2, Skfold=False):\r\n \r\n from sklearn.metrics import mean_squared_error\r\n from sklearn.model_selection import train_test_split \r\n import numpy as np\r\n import pandas as pd\r\n\r\n import tensorflow as tf \r\n \r\n from prepare_and_scale_data import prepare_and_scale_data\r\n from get_compiled_model import get_compiled_model\r\n from create_tensorpad_path import create_tensorpad_path\r\n \r\n #split the initial train dataframe to test/train dataframes\r\n \r\n data, train_scaled, train_non_scaled, test_scaled, test_non_scaled = prepare_and_scale_data(train, test, numeric_features, categorical_features)\r\n y_train = data[target]\r\n \r\n if scaled:\r\n X_train, X_test, y_train, y_test = train_test_split(train_scaled, y_train, test_size=test_size)\r\n test = test_scaled\r\n else:\r\n X_train, X_test, y_train, y_test = train_test_split(train_non_scaled, y_train, test_size=test_size)\r\n test = test_non_scaled\r\n\r\n \r\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=test_size)\r\n\r\n # Prepare the training dataset\r\n train_dataset = tf.data.Dataset.from_tensor_slices((X_train.values, y_train.values))\r\n train_dataset = train_dataset.shuffle(buffer_size=1024).batch(32)\r\n\r\n # Prepare the test dataset\r\n test_dataset = tf.data.Dataset.from_tensor_slices((X_test.values, y_test.values))\r\n test_dataset = test_dataset.batch(32)\r\n\r\n # Prepare the validation dataset\r\n val_dataset = tf.data.Dataset.from_tensor_slices((X_val.values, y_val.values))\r\n val_dataset = val_dataset.batch(32)\r\n\r\n log_path, log_dir = create_tensorpad_path(mainpath)\r\n model, callbacks = get_compiled_model(X_train, target, log_dir)\r\n\r\n history = model.fit(train_dataset, epochs=50, validation_data=val_dataset, callbacks=callbacks)\r\n\r\n result = model.evaluate(test_dataset)\r\n print(dict(zip(model.metrics_names, result)))\r\n\r\n\r\n pred_train = model.predict(X_train)\r\n print(np.sqrt(mean_squared_error(y_train,pred_train)))\r\n\r\n pred = model.predict(X_test)\r\n print(np.sqrt(mean_squared_error(y_test,pred)))\r\n\r\n hist = pd.DataFrame(history.history)\r\n hist['epoch'] = history.epoch\r\n\r\n if scaled:\r\n pred_all = model.predict(test_scaled)\r\n else:\r\n pred_all = model.predict(test_non_scaled)\r\n\r\n pred_df = pd.DataFrame(pred_all, columns = target)\r\n\r\n for t in target:\r\n data.loc[:, \"Ennustettu \" + t] = pred_df[t]\r\n return(data, test, model, hist, log_path)" ]
[ [ "matplotlib.pyplot.legend", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.figure", "pandas.DataFrame", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.plot", "tensorflow.data.Dataset.from_tensor_slices", "matplotlib.pyplot.xlabel" ] ]
gregordecristoforo/3ppy
[ "6a86152746d4ac8a707273cc100239b9fe004a54" ]
[ "model/point_model.py" ]
[ "from typing import Callable, Tuple, Union\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom model.forcing import (\n Forcing,\n StandardForcingGenerator,\n ForcingGenerator,\n PulseParameters,\n)\nfrom model.pulse_shape import (\n ShortPulseGenerator,\n ExponentialShortPulseGenerator,\n PulseGenerator,\n)\nfrom scipy.signal import fftconvolve\n\n\n__COMMON_DISTRIBUTIONS__ = [\"exp\", \"deg\"]\n\n\ndef _get_common_distribution(\n distribution_name: str, average: float\n) -> Callable[[int], np.ndarray]:\n if distribution_name == \"exp\":\n return lambda k: np.random.default_rng().exponential(scale=average, size=k)\n elif distribution_name == \"deg\":\n return lambda k: average * np.ones(k)\n else:\n raise NotImplementedError\n\n\nclass PointModel:\n \"\"\"PointModel is a container for all model parameters and is responsible of\n generating a realization of the process through make_realization.\n\n Uses a ForcingGenerator to generate the forcing, this is by default\n a StandardForcingGenerator.\n \"\"\"\n\n def __init__(self, gamma: float, total_duration: float, dt: float):\n self.gamma = gamma\n self.T = total_duration\n self.dt = dt\n self._times: np.ndarray = np.arange(0, total_duration, dt)\n self._forcing_generator: ForcingGenerator = StandardForcingGenerator()\n self._pulse_generator: ShortPulseGenerator = ExponentialShortPulseGenerator()\n self._last_used_forcing: Forcing = None\n self._noise = None\n\n def make_realization(self) -> Tuple[np.ndarray, np.ndarray]:\n result = np.zeros(len(self._times))\n forcing = self._forcing_generator.get_forcing(self._times, gamma=self.gamma)\n\n for k in tqdm(range(forcing.total_pulses), position=0, leave=True):\n pulse_parameters = forcing.get_pulse_parameters(k)\n self._add_pulse_to_signal(result, pulse_parameters)\n\n if self._noise is not None:\n result += self._discretize_noise(forcing)\n\n self._last_used_forcing = forcing\n\n return self._times, result\n\n def get_last_used_forcing(self) -> Forcing:\n \"\"\"\n Returns the latest used forcing. If several realizations of the process are run only the latest forcing will be\n available.\n -------\n \"\"\"\n return self._last_used_forcing\n\n def set_custom_forcing_generator(self, forcing_generator: ForcingGenerator):\n self._forcing_generator = forcing_generator\n\n def set_amplitude_distribution(\n self, amplitude_distribution: str, average_amplitude: float = 1.0\n ):\n \"\"\"Sets the amplitude distribution to be used by the forcing.\n\n Args:\n amplitude_distribution: str\n 'exp': exponential with scale parameter average_amplitude\n 'deg': degenerate with location average_amplitude\n average_amplitude: float, defaults to 1.\n \"\"\"\n if amplitude_distribution in __COMMON_DISTRIBUTIONS__:\n self._forcing_generator.set_amplitude_distribution(\n _get_common_distribution(amplitude_distribution, average_amplitude)\n )\n else:\n raise NotImplementedError\n\n def set_duration_distribution(\n self, duration_distribution: str, average_duration: float = 1.0\n ):\n \"\"\"Sets the amplitude distribution to be used by the forcing.\n\n Args:\n duration_distribution: str\n 'exp': exponential with scale parameter average_duration\n 'deg': degenerate with location average_duration\n average_duration: float, defaults to 1.\n \"\"\"\n if duration_distribution in __COMMON_DISTRIBUTIONS__:\n self._forcing_generator.set_amplitude_distribution(\n _get_common_distribution(duration_distribution, average_duration)\n )\n else:\n raise NotImplementedError\n\n def set_pulse_shape(\n self, pulse_generator: Union[PulseGenerator, ShortPulseGenerator]\n ):\n \"\"\"\n Parameters\n ----------\n pulse_shape Instance of PulseShape, get_pulse will be called for each pulse when making a realization.\n \"\"\"\n self._pulse_generator = pulse_generator\n\n def add_noise(\n self,\n noise_to_signal_ratio: float,\n seed: Union[None, int] = None,\n noise_type: str = \"additive\",\n ) -> None:\n \"\"\"\n Specifies noise for realization.\n Parameters\n ----------\n noise_to_signal_ratio: float, defined as X_rms/S_rms where X is noise and S is signal.\n seed: None or int, seed for the noise generator\n noise_type: str\n \"additive\": additive noise\n \"dynamic\": dynamic noise (only applicable for constant duration times)\n \"both\": both additive and dynamic noise\n \"\"\"\n assert noise_type in {\"additive\", \"dynamic\", \"both\"}\n assert seed is None or isinstance(seed, int)\n assert noise_to_signal_ratio >= 0\n\n self._noise_type = noise_type\n self._noise_random_number_generator = np.random.RandomState(seed=seed)\n mean_amplitude = self._forcing_generator.get_forcing(\n self._times, gamma=self.gamma\n ).amplitudes.mean()\n self._sigma = np.sqrt(noise_to_signal_ratio * self.gamma) * mean_amplitude\n\n self._noise = np.zeros(len(self._times))\n\n def _discretize_noise(self, forcing: Forcing) -> np.ndarray:\n \"\"\"Discretizes noise for the realization\"\"\"\n\n if self._noise_type in {\"additive\", \"both\"}:\n self._noise += self._sigma * self._noise_random_number_generator.normal(\n size=len(self._times)\n )\n\n if self._noise_type in {\"dynamic\", \"both\"}:\n durations = forcing.durations\n pulse_duration_constant = np.all(durations == durations[0])\n assert (\n pulse_duration_constant\n ), \"Dynamic noise is only applicable for constant duration times.\"\n\n kern = self._pulse_generator.get_pulse(\n np.arange(-self._times[-1] / 2, self._times[-1] / 2, self.dt),\n durations[0],\n )\n dW = self._noise_random_number_generator.normal(\n scale=np.sqrt(2 * self.dt), size=len(self._times)\n )\n self._noise += self._sigma * fftconvolve(dW, kern, \"same\")\n\n return self._noise\n\n def _add_pulse_to_signal(\n self, signal: np.ndarray, pulse_parameters: PulseParameters\n ):\n \"\"\"\n Adds a pulse to the provided signal array. Uses self._pulse_generator to generate the pulse shape, this can\n either be a ps.PulseGenerator or a ps.ShortPulseGenerator.\n Parameters\n ----------\n signal Signal array under construction\n pulse_parameters Parameters of the current pulse\n\n \"\"\"\n if isinstance(self._pulse_generator, PulseGenerator):\n signal += pulse_parameters.amplitude * self._pulse_generator.get_pulse(\n self._times - pulse_parameters.arrival_time,\n pulse_parameters.duration,\n )\n return\n\n if isinstance(self._pulse_generator, ShortPulseGenerator):\n cutoff = self._pulse_generator.get_cutoff(pulse_parameters.duration)\n from_index = max(int((pulse_parameters.arrival_time - cutoff) / self.dt), 0)\n to_index = min(\n int((pulse_parameters.arrival_time + cutoff) / self.dt),\n len(self._times),\n )\n\n pulse = pulse_parameters.amplitude * self._pulse_generator.get_pulse(\n self._times[from_index:to_index] - pulse_parameters.arrival_time,\n pulse_parameters.duration,\n )\n signal[from_index:to_index] += pulse\n return\n\n raise NotImplementedError(\n \"Pulse shape has to inherit from PulseShape or ShortPulseShape\"\n )\n" ]
[ [ "scipy.signal.fftconvolve", "numpy.ones", "numpy.random.default_rng", "numpy.arange", "numpy.random.RandomState", "numpy.all", "numpy.sqrt" ] ]
sondisonda/camera_calibration
[ "92cc1c97c2c2960f1e265342884c3dac8d063708" ]
[ "projections/lidar_camera_projection/lidar_camera_project.py" ]
[ "import os\n\nimport matplotlib.pyplot as plt\nimport open3d\n\nfrom utils import *\n\n\ndef render_image_with_boxes(img, objects, calib):\n \"\"\"\n Show image with 3D boxes\n \"\"\"\n # projection matrix\n P_rect2cam2 = calib['P2'].reshape((3, 4))\n\n img1 = np.copy(img)\n for obj in objects:\n if obj.type == 'DontCare':\n continue\n box3d_pixelcoord = map_box_to_image(obj, P_rect2cam2)\n img1 = draw_projected_box3d(img1, box3d_pixelcoord)\n\n plt.imshow(img1)\n plt.yticks([])\n plt.xticks([])\n plt.show()\n\n\ndef render_lidar_with_boxes(pc_velo, objects, calib, img_width, img_height):\n # projection matrix (project from velo2cam2)\n proj_velo2cam2 = project_velo_to_cam2(calib)\n\n # apply projection\n pts_2d = project_to_image(pc_velo.transpose(), proj_velo2cam2)\n\n # Filter lidar points to be within image FOV\n inds = np.where((pts_2d[0, :] < img_width) & (pts_2d[0, :] >= 0) &\n (pts_2d[1, :] < img_height) & (pts_2d[1, :] >= 0) &\n (pc_velo[:, 0] > 0)\n )[0]\n imgfov_pc_velo = pc_velo[inds, :]\n\n # create open3d point cloud and axis\n mesh_frame = open3d.geometry.TriangleMesh.create_coordinate_frame(size=2, origin=[0, 0, 0])\n pcd = open3d.geometry.PointCloud()\n pcd.points = open3d.utility.Vector3dVector(imgfov_pc_velo)\n entities_to_draw = [pcd, mesh_frame]\n\n # Projection matrix\n proj_cam2_2_velo = project_cam2_to_velo(calib)\n\n # Draw objects on lidar\n for obj in objects:\n if obj.type == 'DontCare':\n continue\n\n # Project boxes from camera to lidar coordinate\n boxes3d_pts = project_camera_to_lidar(obj.in_camera_coordinate(), proj_cam2_2_velo)\n\n # Open3d boxes\n boxes3d_pts = open3d.utility.Vector3dVector(boxes3d_pts.T)\n box = open3d.geometry.OrientedBoundingBox.create_from_points(boxes3d_pts)\n box.color = [1, 0, 0]\n entities_to_draw.append(box)\n\n # Draw\n open3d.visualization.draw_geometries([*entities_to_draw],\n front=[-0.9945, 0.03873, 0.0970],\n lookat=[38.4120, 0.6139, 0.48500],\n up=[0.095457, -0.0421, 0.99453],\n zoom=0.33799\n )\n\n\ndef render_lidar_on_image(pts_velo, img, calib, img_width, img_height):\n # projection matrix (project from velo2cam2)\n proj_velo2cam2 = project_velo_to_cam2(calib)\n print(\"Uwaga\")\n print(proj_velo2cam2)\n # apply projection\n pts_2d = project_to_image(pts_velo.transpose(), proj_velo2cam2)\n\n # Filter lidar points to be within image FOV\n inds = np.where((pts_2d[0, :] < img_width) & (pts_2d[0, :] >= 0) &\n (pts_2d[1, :] < img_height) & (pts_2d[1, :] >= 0) &\n (pc_velo[:, 0] > 0)\n )[0]\n\n # Filter out pixels points\n imgfov_pc_pixel = pts_2d[:, inds]\n\n # Retrieve depth from lidar\n imgfov_pc_velo = pts_velo[inds, :]\n imgfov_pc_velo = np.hstack((imgfov_pc_velo, np.ones((imgfov_pc_velo.shape[0], 1))))\n imgfov_pc_cam2 = proj_velo2cam2 @ imgfov_pc_velo.transpose()\n\n cmap = plt.cm.get_cmap('hsv', 256)\n cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255\n\n for i in range(imgfov_pc_pixel.shape[1]):\n depth = imgfov_pc_cam2[2, i]\n color = cmap[int(640.0 / depth), :]\n cv2.circle(img, (int(np.round(imgfov_pc_pixel[0, i])),\n int(np.round(imgfov_pc_pixel[1, i]))),\n 2, color=tuple(color), thickness=-1)\n plt.imshow(img)\n plt.yticks([])\n plt.xticks([])\n plt.show()\n return img\n\n\nif __name__ == '__main__':\n # Load image, calibration file, label bbox\n rgb = cv2.cvtColor(cv2.imread(os.path.join('data/000114_image.png')), cv2.COLOR_BGR2RGB)\n img_height, img_width, img_channel = rgb.shape\n\n # Load calibration\n calib = read_calib_file('data/000114_calib.txt')\n\n # Load labels\n labels = load_label('data/000114_label.txt')\n\n # Load Lidar PC\n pc_velo = load_velo_scan('data/000114.bin')[:, :3]\n\n #render_image_with_boxes(rgb, labels, calib)\n #render_lidar_with_boxes(pc_velo, labels, calib, img_width=img_width, img_height=img_height)\n render_lidar_on_image(pc_velo, rgb, calib, img_width, img_height)\n" ]
[ [ "matplotlib.pyplot.xticks", "matplotlib.pyplot.imshow", "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.show", "matplotlib.pyplot.yticks" ] ]
naviocean/SimpleCVReproduction
[ "9939f8340c54dbd69b0017cecad875dccf428f26", "9939f8340c54dbd69b0017cecad875dccf428f26" ]
[ "NAS/AngleNAS/DARTS/shrinking/shrinking.py", "NAS/AngleNAS/NAS-Bench-201/exps/algos/SPOS.py" ]
[ "import os\nimport time\nimport numpy as np\nimport pickle\nimport torch\nimport torch.nn as nn\nfrom super_model import Network_ImageNet\nfrom torch.autograd import Variable\nfrom config import config\nimport sys\nsys.setrecursionlimit(10000)\nimport functools\nimport copy\nprint=functools.partial(print,flush=True)\nfrom angle import get_angle\n\nsys.path.append(\"../..\")\nfrom utils import *\n\n# Make sure each child model is sampled only once\ndef legal(cand, vis_dict):\n if len(cand) == 0:\n return False \n assert isinstance(cand,tuple)\n if cand not in vis_dict:\n vis_dict[cand]={}\n info=vis_dict[cand]\n if 'visited' in info:\n return False\n info['visited']=True\n vis_dict[cand]=info\n return True\n\n# Randomly sample finite number of child models containing current operator\ndef get_random_extend(num, extend_operator, vis_dict, operations):\n def get_random_cand_(extend_operator, operations):\n edge, extend_op = extend_operator\n rng, cell_rng = [], []\n\n for op in operations:\n k = np.random.randint(len(op))\n select_op = op[k]\n cell_rng.append(select_op)\n\n for _ in range(config.layers):\n rng.append(copy.deepcopy(cell_rng))\n rng = check_cand(rng, operations, config.edges)\n \n if extend_op is not None:\n for i in range(config.layers):\n rng[i][edge] = extend_op\n rng = np.reshape(rng, -1)\n return tuple(rng)\n\n max_iters = num*100\n candidates = []\n i = 0\n while i<num and max_iters>0: \n max_iters-=1\n cand = get_random_cand_(extend_operator, operations)\n if not legal(cand, vis_dict):\n continue\n candidates.append(cand)\n i+=1\n print('random {}/{}'.format(len(candidates),num))\n now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\n print('{} random_num = {}'.format(now, len(candidates)))\n return candidates\n\ndef compute_scores(base_model, model, operations, extend_operators, vis_dict_slice, vis_dict):\n candidates = []\n # 1000 child models are collected for each operator\n for idx, extend_operator in enumerate(extend_operators):\n info = vis_dict_slice[extend_operator]\n if config.sample_num-len(info['cand_pool']) > 0:\n random_n = config.sample_num - len(info['cand_pool'])\n cands = get_random_extend(random_n, extend_operator, vis_dict, operations)\n for cand in cands:\n cand_ = np.reshape(cand, [config.layers, -1])\n for j, c in enumerate(cand_[0]): \n extend_operator_ = (j, c)\n if extend_operator_ in vis_dict_slice:\n info = vis_dict_slice[extend_operator_]\n if cand not in info['cand_pool']:\n info['cand_pool'].append(cand)\n if cand not in candidates:\n candidates.append(cand)\n\n # Compute angles of all candidate architecures\n for i, cand in enumerate(candidates):\n info=vis_dict[cand]\n info['angle'] = get_angle(base_model, model, cand)\n print('idx: {}, angle: {}'.format(i, info['angle']))\n\n # Caculate sum of angles for each operator\n for cand in candidates:\n cand_info = vis_dict[cand]\n cand_ = np.reshape(cand, [config.layers, -1])\n for j, c in enumerate(cand_[0]): \n extend_operator_ = (j, c)\n if extend_operator_ in vis_dict_slice:\n slice_info = vis_dict_slice[extend_operator_]\n if cand in slice_info['cand_pool'] and slice_info['count'] < config.sample_num:\n slice_info['angle'] += cand_info['angle']\n slice_info['count'] += 1\n\n # The score of each operator is acquired by averaging the angle of child models containing it\n for extend_operator in extend_operators:\n if vis_dict_slice[extend_operator]['count'] > 0:\n vis_dict_slice[extend_operator]['angle'] = vis_dict_slice[extend_operator]['angle'] * 1. / vis_dict_slice[extend_operator]['count'] \n\ndef drop_operators(extend_operators, vis_dict_slice, operations, drop_iter):\n # Each operator is ranked according to its score\n extend_operators.sort(key=lambda x:vis_dict_slice[x]['angle'], reverse=False)\n for idx, cand in enumerate(extend_operators):\n info = vis_dict_slice[cand]\n print('Iter={}, shrinking: top {} cand={}, angle={}, count={}'.format(drop_iter+1, idx+1, cand, info['angle'], info['count']))\n \n # ABS removes one operator for each edge each time\n num, drop_ops = 0, []\n for j in range(len(operations)):\n for idx, cand in enumerate(extend_operators):\n edge, op = cand\n if edge == j:\n print('no.{} drop_op={}'.format(num+1, cand))\n drop_ops.append(cand)\n operations[edge].remove(op)\n extend_operators.remove(cand)\n num += 1\n break\n return operations, drop_ops\n\n# Algorithm 2\ndef ABS(base_model, model, operations, iters):\n vis_dict_slice, vis_dict = {}, {}\n print('|=> Iters={}, shrinking: operations={}'.format(iters, operations))\n # At least one operator is preserved in each edge\n # Each operator is identified by its edge and type\n extend_operators = []\n for edge, op in enumerate(operations):\n if len(op) > 1:\n for op_ in op:\n cand = tuple([edge, op_])\n vis_dict_slice[cand]={}\n info=vis_dict_slice[cand]\n info['angle'] = 0.\n info['count'] = 0.\n info['cand_pool'] = []\n extend_operators.append(cand)\n\n compute_scores(base_model, model, operations, extend_operators, vis_dict_slice, vis_dict)\n operations, drop_ops = drop_operators(extend_operators, vis_dict_slice, operations, iters)\n print('Iter={}, shrinking: drop_ops={}, operations={}'.format(iters, drop_ops, operations))\n return operations\n", "##################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #\n######################################################################################\n# One-Shot Neural Architecture Search via Self-Evaluated Template Network, ICCV 2019 #\n######################################################################################\nimport os, sys, time, glob, random, argparse\nimport numpy as np\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nlib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()\nif str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))\nfrom config_utils import load_config, dict2config, configure2str\nfrom datasets import get_datasets, get_nas_search_loaders\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler\nfrom utils import get_model_infos, obtain_accuracy\nfrom log_utils import AverageMeter, time_string, convert_secs2time\nfrom models import get_cell_based_tiny_net, get_search_spaces, get_sub_search_spaces\nfrom nas_102_api import NASBench102API as API\nimport random\n\ndef search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger):\n data_time, batch_time = AverageMeter(), AverageMeter()\n base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n end = time.time()\n network.train()\n for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):\n scheduler.update(None, 1.0 * step / len(xloader))\n base_targets = base_targets.cuda(non_blocking=True)\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n \n # update the weights\n network.module.set_cal_mode( 'urs' )\n network.zero_grad()\n _, logits = network(base_inputs)\n base_loss = criterion(logits, base_targets)\n base_loss.backward()\n w_optimizer.step()\n # record\n base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))\n base_losses.update(base_loss.item(), base_inputs.size(0))\n base_top1.update (base_prec1.item(), base_inputs.size(0))\n base_top5.update (base_prec5.item(), base_inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if step % print_freq == 0 or step + 1 == len(xloader):\n Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))\n Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)\n Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)\n Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)\n logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)\n #print (nn.functional.softmax(network.module.arch_parameters, dim=-1))\n #print (network.module.arch_parameters)\n return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg\n\n\ndef get_best_arch(xloader, network, n_samples):\n with torch.no_grad():\n network.eval()\n archs, valid_accs = network.module.get_all_archs(), []\n random.shuffle(archs)\n archs = archs[:len(archs)//10]\n loader_iter = iter(xloader)\n for i, sampled_arch in enumerate(archs):\n print(i, sampled_arch)\n network.module.set_cal_mode('dynamic', sampled_arch)\n try:\n inputs, targets = next(loader_iter)\n except:\n loader_iter = iter(xloader)\n inputs, targets = next(loader_iter)\n\n _, logits = network(inputs)\n val_top1, val_top5 = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5))\n\n valid_accs.append( val_top1.item() )\n #print ('--- {:}/{:} : {:} : {:}'.format(i, len(archs), sampled_arch, val_top1))\n\n best_idx = np.argmax(valid_accs)\n best_arch, best_valid_acc = archs[best_idx], valid_accs[best_idx]\n return best_arch, best_valid_acc\n\n\ndef valid_func(xloader, network, criterion):\n data_time, batch_time = AverageMeter(), AverageMeter()\n arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()\n end = time.time()\n with torch.no_grad():\n network.eval()\n for step, (arch_inputs, arch_targets) in enumerate(xloader):\n arch_targets = arch_targets.cuda(non_blocking=True)\n # measure data loading time\n data_time.update(time.time() - end)\n # prediction\n _, logits = network(arch_inputs)\n arch_loss = criterion(logits, arch_targets)\n # record\n arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))\n arch_losses.update(arch_loss.item(), arch_inputs.size(0))\n arch_top1.update (arch_prec1.item(), arch_inputs.size(0))\n arch_top5.update (arch_prec5.item(), arch_inputs.size(0))\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n return arch_losses.avg, arch_top1.avg, arch_top5.avg\n\n\ndef main(xargs):\n assert torch.cuda.is_available(), 'CUDA is not available.'\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.set_num_threads( xargs.workers )\n prepare_seed(xargs.rand_seed)\n logger = prepare_logger(args)\n\n train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)\n config = load_config(xargs.config_path, {'class_num': class_num, 'xshape': xshape}, logger)\n search_loader, _, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', \\\n (config.batch_size, config.test_batch_size), xargs.workers)\n logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size))\n logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))\n\n search_space = get_sub_search_spaces('cell', xargs.search_space_name)\n logger.log('search_space={}'.format(search_space))\n model_config = dict2config({'name': 'SETN', 'C': xargs.channel, 'N': xargs.num_cells,\n 'max_nodes': xargs.max_nodes, 'num_classes': class_num,\n 'space' : search_space,\n 'affine' : False, 'track_running_stats': bool(xargs.track_running_stats)}, None)\n logger.log('search space : {:}'.format(search_space))\n search_model = get_cell_based_tiny_net(model_config)\n \n w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)\n a_optimizer = torch.optim.Adam(search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay)\n logger.log('w-optimizer : {:}'.format(w_optimizer))\n logger.log('a-optimizer : {:}'.format(a_optimizer))\n logger.log('w-scheduler : {:}'.format(w_scheduler))\n logger.log('criterion : {:}'.format(criterion))\n flop, param = get_model_infos(search_model, xshape)\n #logger.log('{:}'.format(search_model))\n logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param))\n logger.log('search-space : {:}'.format(search_space))\n if xargs.arch_nas_dataset is None:\n api = None\n else:\n api = API(xargs.arch_nas_dataset)\n logger.log('{:} create API = {:} done'.format(time_string(), api))\n\n last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best')\n network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()\n\n if last_info.exists(): # automatically resume from previous checkpoint\n logger.log(\"=> loading checkpoint of the last-info '{:}' start\".format(last_info))\n last_info = torch.load(last_info)\n start_epoch = last_info['epoch']\n checkpoint = torch.load(last_info['last_checkpoint'])\n genotypes = checkpoint['genotypes']\n valid_accuracies = checkpoint['valid_accuracies']\n search_model.load_state_dict( checkpoint['search_model'] )\n w_scheduler.load_state_dict ( checkpoint['w_scheduler'] )\n w_optimizer.load_state_dict ( checkpoint['w_optimizer'] )\n a_optimizer.load_state_dict ( checkpoint['a_optimizer'] )\n logger.log(\"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.\".format(last_info, start_epoch))\n else:\n logger.log(\"=> do not find the last-info file : {:}\".format(last_info))\n init_genotype, _ = get_best_arch(valid_loader, network, xargs.select_num)\n start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {-1: init_genotype}\n\n # start training\n start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup\n for epoch in range(start_epoch, total_epoch):\n w_scheduler.update(epoch, 0.0)\n need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch-epoch), True) )\n epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)\n logger.log('\\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr())))\n\n search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \\\n = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger)\n search_time.update(time.time() - start_time)\n logger.log('[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum))\n logger.log('[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_a_loss, search_a_top1, search_a_top5))\n\n genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num)\n network.module.set_cal_mode('dynamic', genotype)\n valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype))\n\n # check the best accuracy\n valid_accuracies[epoch] = valid_a_top1\n\n genotypes[epoch] = genotype\n logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch]))\n # save checkpoint\n save_path = save_checkpoint({'epoch' : epoch + 1,\n 'args' : deepcopy(xargs),\n 'search_model': search_model.state_dict(),\n 'w_optimizer' : w_optimizer.state_dict(),\n 'a_optimizer' : a_optimizer.state_dict(),\n 'w_scheduler' : w_scheduler.state_dict(),\n 'genotypes' : genotypes,\n 'valid_accuracies' : valid_accuracies},\n model_base_path, logger)\n last_info = save_checkpoint({\n 'epoch': epoch + 1,\n 'args' : deepcopy(args),\n 'last_checkpoint': save_path,\n }, logger.path('info'), logger)\n with torch.no_grad():\n logger.log('arch-parameters :\\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() ))\n if api is not None: logger.log('{:}'.format(api.query_by_arch( genotypes[epoch] )))\n # measure elapsed time\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n # the final post procedure : count the time\n start_time = time.time()\n genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num)\n search_time.update(time.time() - start_time)\n network.module.set_cal_mode('dynamic', genotype)\n valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)\n logger.log('Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.'.format(genotype, valid_a_top1))\n\n logger.log('\\n' + '-'*100)\n # check the performance from the architecture dataset\n logger.log('SPOS : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(total_epoch, search_time.sum, genotype))\n if api is not None: logger.log('{:}'.format( api.query_by_arch(genotype) ))\n logger.close()\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"SPOS\")\n parser.add_argument('--data_path', type=str, help='Path to dataset')\n parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')\n # channels and number-of-cells\n parser.add_argument('--search_space_name', type=str, help='The search space name.')\n parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')\n parser.add_argument('--channel', type=int, help='The number of channels.')\n parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')\n parser.add_argument('--select_num', type=int, help='The number of selected architectures to evaluate.')\n parser.add_argument('--track_running_stats',type=int, choices=[0,1],help='Whether use track_running_stats or not in the BN layer.')\n parser.add_argument('--config_path', type=str, help='The path of the configuration.')\n # architecture leraning rate\n parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\n parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\n # log\n parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')\n parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')\n parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')\n parser.add_argument('--rand_seed', type=int, help='manual seed')\n args = parser.parse_args()\n if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)\n main(args)" ]
[ [ "numpy.reshape" ], [ "torch.load", "torch.nn.functional.softmax", "torch.no_grad", "numpy.argmax", "torch.set_num_threads", "torch.cuda.is_available", "torch.nn.DataParallel" ] ]
bnelo12/wavenet_vocoder
[ "68de8b8abf37fb3eec41817704f06c859925f7a5" ]
[ "train.py" ]
[ "\"\"\"Trainining script for WaveNet vocoder\n\nusage: train.py [options]\n\noptions:\n --dump-root=<dir> Directory contains preprocessed features.\n --checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints].\n --hparams=<parmas> Hyper parameters [default: ].\n --preset=<json> Path of preset parameters (json).\n --checkpoint=<path> Restore model from checkpoint path if given.\n --restore-parts=<path> Restore part of the model.\n --log-event-path=<name> Log event path.\n --reset-optimizer Reset optimizer.\n --speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets.\n -h, --help Show this help message and exit\n\"\"\"\nfrom docopt import docopt\n\nimport sys\n\nimport os\nfrom os.path import dirname, join, expanduser, exists\nfrom tqdm import tqdm\nfrom datetime import datetime\nimport random\nimport json\nfrom glob import glob\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport lrschedule\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch import optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils import data as data_utils\nfrom torch.utils.data.sampler import Sampler\n\nfrom nnmnkwii import preprocessing as P\nfrom nnmnkwii.datasets import FileSourceDataset, FileDataSource\n\nimport librosa.display\n\nfrom tensorboardX import SummaryWriter\nfrom matplotlib import cm\nfrom warnings import warn\n\nfrom wavenet_vocoder import WaveNet\nfrom wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_raw, is_scalar_input\nfrom wavenet_vocoder.mixture import discretized_mix_logistic_loss\nfrom wavenet_vocoder.mixture import sample_from_discretized_mix_logistic\nfrom wavenet_vocoder.mixture import mix_gaussian_loss\nfrom wavenet_vocoder.mixture import sample_from_mix_gaussian\n\nimport audio\nfrom hparams import hparams, hparams_debug_string\n\nglobal_step = 0\nglobal_test_step = 0\nglobal_epoch = 0\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n cudnn.benchmark = True\n\n\ndef sanity_check(model, c, g):\n if model.has_speaker_embedding():\n if g is None:\n raise RuntimeError(\n \"WaveNet expects speaker embedding, but speaker-id is not provided\")\n else:\n if g is not None:\n raise RuntimeError(\n \"WaveNet expects no speaker embedding, but speaker-id is provided\")\n\n if model.local_conditioning_enabled():\n if c is None:\n raise RuntimeError(\"WaveNet expects conditional features, but not given\")\n else:\n if c is not None:\n raise RuntimeError(\"WaveNet expects no conditional features, but given\")\n\n\ndef maybe_set_epochs_based_on_max_steps(hp, steps_per_epoch):\n nepochs = hp.nepochs\n max_train_steps = hp.max_train_steps\n if max_train_steps is not None:\n epochs = int(np.ceil(max_train_steps / steps_per_epoch))\n hp.nepochs = epochs\n print(\"info; Number of epochs is set based on max_train_steps: {}\".format(epochs))\n\n\ndef _pad(seq, max_len, constant_values=0):\n return np.pad(seq, (0, max_len - len(seq)),\n mode='constant', constant_values=constant_values)\n\n\ndef _pad_2d(x, max_len, b_pad=0, constant_values=0):\n x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],\n mode=\"constant\", constant_values=constant_values)\n return x\n\n# from: https://github.com/keras-team/keras/blob/master/keras/utils/np_utils.py\n# to avoid keras dependency\n\n\ndef to_categorical(y, num_classes=None, dtype='float32'):\n \"\"\"Converts a class vector (integers) to binary class matrix.\n E.g. for use with categorical_crossentropy.\n # Arguments\n y: class vector to be converted into a matrix\n (integers from 0 to num_classes).\n num_classes: total number of classes.\n dtype: The data type expected by the input, as a string\n (`float32`, `float64`, `int32`...)\n # Returns\n A binary matrix representation of the input. The classes axis\n is placed last.\n # Example\n ```python\n # Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:\n > labels\n array([0, 2, 1, 2, 0])\n # `to_categorical` converts this into a matrix with as many\n # columns as there are classes. The number of rows\n # stays the same.\n > to_categorical(labels)\n array([[ 1., 0., 0.],\n [ 0., 0., 1.],\n [ 0., 1., 0.],\n [ 0., 0., 1.],\n [ 1., 0., 0.]], dtype=float32)\n ```\n \"\"\"\n\n y = np.array(y, dtype='int')\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=dtype)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical\n\n\n# TODO: I know this is too ugly...\nclass _NPYDataSource(FileDataSource):\n def __init__(self, dump_root, col, typ=\"\", speaker_id=None, max_steps=8000,\n cin_pad=0, hop_size=256):\n self.dump_root = dump_root\n self.col = col\n self.lengths = []\n self.speaker_id = speaker_id\n self.multi_speaker = False\n self.speaker_ids = None\n self.max_steps = max_steps\n self.cin_pad = cin_pad\n self.hop_size = hop_size\n self.typ = typ\n\n def collect_files(self):\n meta = join(self.dump_root, \"train.txt\")\n if not exists(meta):\n paths = sorted(glob(join(self.dump_root, \"*-{}.npy\".format(self.typ))))\n return paths\n\n with open(meta, \"rb\") as f:\n lines = f.readlines()\n l = lines[0].decode(\"utf-8\").split(\"|\")\n assert len(l) == 4 or len(l) == 5\n self.multi_speaker = len(l) == 5\n self.lengths = list(\n map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines))\n\n paths_relative = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines))\n paths = list(map(lambda f: join(self.dump_root, f), paths_relative))\n\n # Exclude small files (assuming lenghts are in frame unit)\n # TODO: consider this for multi-speaker\n if self.max_steps is not None:\n idx = np.array(self.lengths) * self.hop_size > self.max_steps + 2 * self.cin_pad * self.hop_size\n if idx.sum() != len(self.lengths):\n print(\"{} short samples are omitted for training.\".format(len(self.lengths) - idx.sum()))\n self.lengths = list(np.array(self.lengths)[idx])\n paths = list(np.array(paths)[idx])\n\n if self.multi_speaker:\n speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines))\n self.speaker_ids = speaker_ids\n if self.speaker_id is not None:\n # Filter by speaker_id\n # using multi-speaker dataset as a single speaker dataset\n indices = np.array(speaker_ids) == self.speaker_id\n paths = list(np.array(paths)[indices])\n self.lengths = list(np.array(self.lengths)[indices])\n # aha, need to cast numpy.int64 to int\n self.lengths = list(map(int, self.lengths))\n self.multi_speaker = False\n\n if self.multi_speaker:\n speaker_ids_np = list(np.array(self.speaker_ids)[indices])\n self.speaker_ids = list(map(int, speaker_ids_np))\n assert len(paths) == len(self.speaker_ids)\n\n return paths\n\n def collect_features(self, path):\n return np.load(path)\n\n\nclass RawAudioDataSource(_NPYDataSource):\n def __init__(self, dump_root, **kwargs):\n super(RawAudioDataSource, self).__init__(dump_root, 0, \"wave\", **kwargs)\n\n\nclass MelSpecDataSource(_NPYDataSource):\n def __init__(self, dump_root, **kwargs):\n super(MelSpecDataSource, self).__init__(dump_root, 1, \"feats\", **kwargs)\n\n\nclass PartialyRandomizedSimilarTimeLengthSampler(Sampler):\n \"\"\"Partially randomized sampler\n\n 1. Sort by lengths\n 2. Pick a small patch and randomize it\n 3. Permutate mini-batches\n \"\"\"\n\n def __init__(self, lengths, batch_size=8, batch_group_size=None):\n self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))\n\n self.batch_size = batch_size\n if batch_group_size is None:\n batch_group_size = min(batch_size * 8, len(self.lengths))\n if batch_group_size % batch_size != 0:\n batch_group_size -= batch_group_size % batch_size\n\n self.batch_group_size = batch_group_size\n assert batch_group_size % batch_size == 0\n\n def __iter__(self):\n indices = self.sorted_indices.numpy()\n batch_group_size = self.batch_group_size\n s, e = 0, 0\n bins = []\n for i in range(len(indices) // batch_group_size):\n s = i * batch_group_size\n e = s + batch_group_size\n group = indices[s:e]\n random.shuffle(group)\n bins += [group]\n\n # Permutate batches\n random.shuffle(bins)\n binned_idx = np.stack(bins).reshape(-1)\n\n # Handle last elements\n s += batch_group_size\n if s < len(indices):\n last_bin = indices[len(binned_idx):]\n random.shuffle(last_bin)\n binned_idx = np.concatenate([binned_idx, last_bin])\n\n return iter(torch.tensor(binned_idx).long())\n\n def __len__(self):\n return len(self.sorted_indices)\n\n\nclass PyTorchDataset(object):\n def __init__(self, X, Mel):\n self.X = X\n self.Mel = Mel\n # alias\n self.multi_speaker = X.file_data_source.multi_speaker\n\n def __getitem__(self, idx):\n if self.Mel is None:\n mel = None\n else:\n mel = self.Mel[idx]\n\n raw_audio = self.X[idx]\n if self.multi_speaker:\n speaker_id = self.X.file_data_source.speaker_ids[idx]\n else:\n speaker_id = None\n\n # (x,c,g)\n return raw_audio, mel, speaker_id\n\n def __len__(self):\n return len(self.X)\n\n\ndef sequence_mask(sequence_length, max_len=None):\n if max_len is None:\n max_len = sequence_length.data.max()\n batch_size = sequence_length.size(0)\n seq_range = torch.arange(0, max_len).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n if sequence_length.is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = sequence_length.unsqueeze(1) \\\n .expand_as(seq_range_expand)\n return (seq_range_expand < seq_length_expand).float()\n\n\n# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4\n# https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\nclass ExponentialMovingAverage(object):\n def __init__(self, decay):\n self.decay = decay\n self.shadow = {}\n\n def register(self, name, val):\n self.shadow[name] = val.clone()\n\n def update(self, name, x):\n assert name in self.shadow\n update_delta = self.shadow[name] - x\n self.shadow[name] -= (1.0 - self.decay) * update_delta\n\n\ndef clone_as_averaged_model(device, model, ema):\n assert ema is not None\n averaged_model = build_model().to(device)\n averaged_model.load_state_dict(model.state_dict())\n for name, param in averaged_model.named_parameters():\n if name in ema.shadow:\n param.data = ema.shadow[name].clone()\n return averaged_model\n\n\nclass MaskedCrossEntropyLoss(nn.Module):\n def __init__(self):\n super(MaskedCrossEntropyLoss, self).__init__()\n self.criterion = nn.CrossEntropyLoss(reduction='none')\n\n def forward(self, input, target, lengths=None, mask=None, max_len=None):\n if lengths is None and mask is None:\n raise RuntimeError(\"Should provide either lengths or mask\")\n\n # (B, T, 1)\n if mask is None:\n mask = sequence_mask(lengths, max_len).unsqueeze(-1)\n\n # (B, T, D)\n mask_ = mask.expand_as(target)\n losses = self.criterion(input, target)\n return ((losses * mask_).sum()) / mask_.sum()\n\n\nclass DiscretizedMixturelogisticLoss(nn.Module):\n def __init__(self):\n super(DiscretizedMixturelogisticLoss, self).__init__()\n\n def forward(self, input, target, lengths=None, mask=None, max_len=None):\n if lengths is None and mask is None:\n raise RuntimeError(\"Should provide either lengths or mask\")\n\n # (B, T, 1)\n if mask is None:\n mask = sequence_mask(lengths, max_len).unsqueeze(-1)\n\n # (B, T, 1)\n mask_ = mask.expand_as(target)\n\n losses = discretized_mix_logistic_loss(\n input, target, num_classes=hparams.quantize_channels,\n log_scale_min=hparams.log_scale_min, reduce=False)\n assert losses.size() == target.size()\n return ((losses * mask_).sum()) / mask_.sum()\n\n\nclass MixtureGaussianLoss(nn.Module):\n def __init__(self):\n super(MixtureGaussianLoss, self).__init__()\n\n def forward(self, input, target, lengths=None, mask=None, max_len=None):\n if lengths is None and mask is None:\n raise RuntimeError(\"Should provide either lengths or mask\")\n\n # (B, T, 1)\n if mask is None:\n mask = sequence_mask(lengths, max_len).unsqueeze(-1)\n\n # (B, T, 1)\n mask_ = mask.expand_as(target)\n\n losses = mix_gaussian_loss(\n input, target, log_scale_min=hparams.log_scale_min, reduce=False)\n assert losses.size() == target.size()\n return ((losses * mask_).sum()) / mask_.sum()\n\n\ndef ensure_divisible(length, divisible_by=256, lower=True):\n if length % divisible_by == 0:\n return length\n if lower:\n return length - length % divisible_by\n else:\n return length + (divisible_by - length % divisible_by)\n\n\ndef assert_ready_for_upsampling(x, c, cin_pad):\n assert len(x) == (len(c) - 2 * cin_pad) * audio.get_hop_size()\n\n\ndef collate_fn(batch):\n \"\"\"Create batch\n\n Args:\n batch(tuple): List of tuples\n - x[0] (ndarray,int) : list of (T,)\n - x[1] (ndarray,int) : list of (T, D)\n - x[2] (ndarray,int) : list of (1,), speaker id\n Returns:\n tuple: Tuple of batch\n - x (FloatTensor) : Network inputs (B, C, T)\n - y (LongTensor) : Network targets (B, T, 1)\n \"\"\"\n\n local_conditioning = len(batch[0]) >= 2 and hparams.cin_channels > 0\n global_conditioning = len(batch[0]) >= 3 and hparams.gin_channels > 0\n\n if hparams.max_time_sec is not None:\n max_time_steps = int(hparams.max_time_sec * hparams.sample_rate)\n elif hparams.max_time_steps is not None:\n max_time_steps = hparams.max_time_steps\n else:\n max_time_steps = None\n\n # Time resolution adjustment\n cin_pad = hparams.cin_pad\n if local_conditioning:\n new_batch = []\n for idx in range(len(batch)):\n x, c, g = batch[idx]\n if hparams.upsample_conditional_features:\n assert_ready_for_upsampling(x, c, cin_pad=0)\n if max_time_steps is not None:\n max_steps = ensure_divisible(max_time_steps, audio.get_hop_size(), True)\n if len(x) > max_steps:\n max_time_frames = max_steps // audio.get_hop_size()\n s = np.random.randint(cin_pad, len(c) - max_time_frames - cin_pad)\n ts = s * audio.get_hop_size()\n x = x[ts:ts + audio.get_hop_size() * max_time_frames]\n c = c[s - cin_pad:s + max_time_frames + cin_pad, :]\n assert_ready_for_upsampling(x, c, cin_pad=cin_pad)\n else:\n x, c = audio.adjust_time_resolution(x, c)\n if max_time_steps is not None and len(x) > max_time_steps:\n s = np.random.randint(cin_pad, len(x) - max_time_steps - cin_pad)\n x = x[s:s + max_time_steps]\n c = c[s - cin_pad:s + max_time_steps + cin_pad, :]\n assert len(x) == len(c)\n new_batch.append((x, c, g))\n batch = new_batch\n else:\n new_batch = []\n for idx in range(len(batch)):\n x, c, g = batch[idx]\n x = audio.trim(x)\n if max_time_steps is not None and len(x) > max_time_steps:\n s = np.random.randint(0, len(x) - max_time_steps)\n if local_conditioning:\n x, c = x[s:s + max_time_steps], c[s:s + max_time_steps, :]\n else:\n x = x[s:s + max_time_steps]\n new_batch.append((x, c, g))\n batch = new_batch\n\n # Lengths\n input_lengths = [len(x[0]) for x in batch]\n max_input_len = max(input_lengths)\n\n # (B, T, C)\n # pad for time-axis\n if is_mulaw_quantize(hparams.input_type):\n padding_value = P.mulaw_quantize(0, mu=hparams.quantize_channels - 1)\n x_batch = np.array([_pad_2d(to_categorical(\n x[0], num_classes=hparams.quantize_channels),\n max_input_len, 0, padding_value) for x in batch], dtype=np.float32)\n else:\n x_batch = np.array([_pad_2d(x[0].reshape(-1, 1), max_input_len)\n for x in batch], dtype=np.float32)\n assert len(x_batch.shape) == 3\n\n # (B, T)\n if is_mulaw_quantize(hparams.input_type):\n padding_value = P.mulaw_quantize(0, mu=hparams.quantize_channels - 1)\n y_batch = np.array([_pad(x[0], max_input_len, constant_values=padding_value)\n for x in batch], dtype=np.int)\n else:\n y_batch = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.float32)\n assert len(y_batch.shape) == 2\n\n # (B, T, D)\n if local_conditioning:\n max_len = max([len(x[1]) for x in batch])\n c_batch = np.array([_pad_2d(x[1], max_len) for x in batch], dtype=np.float32)\n assert len(c_batch.shape) == 3\n # (B x C x T)\n c_batch = torch.FloatTensor(c_batch).transpose(1, 2).contiguous()\n else:\n c_batch = None\n\n if global_conditioning:\n g_batch = torch.LongTensor([x[2] for x in batch])\n else:\n g_batch = None\n\n # Covnert to channel first i.e., (B, C, T)\n x_batch = torch.FloatTensor(x_batch).transpose(1, 2).contiguous()\n # Add extra axis\n if is_mulaw_quantize(hparams.input_type):\n y_batch = torch.LongTensor(y_batch).unsqueeze(-1).contiguous()\n else:\n y_batch = torch.FloatTensor(y_batch).unsqueeze(-1).contiguous()\n\n input_lengths = torch.LongTensor(input_lengths)\n\n return x_batch, y_batch, c_batch, g_batch, input_lengths\n\n\ndef time_string():\n return datetime.now().strftime('%Y-%m-%d %H:%M')\n\n\ndef save_waveplot(path, y_hat, y_target):\n sr = hparams.sample_rate\n\n plt.figure(figsize=(16, 6))\n plt.subplot(2, 1, 1)\n librosa.display.waveplot(y_target, sr=sr)\n plt.subplot(2, 1, 2)\n librosa.display.waveplot(y_hat, sr=sr)\n plt.tight_layout()\n plt.savefig(path, format=\"png\")\n plt.close()\n\n\ndef eval_model(global_step, writer, device, model, y, c, g, input_lengths, eval_dir, ema=None):\n if ema is not None:\n print(\"Using averaged model for evaluation\")\n model = clone_as_averaged_model(device, model, ema)\n model.make_generation_fast_()\n\n model.eval()\n idx = np.random.randint(0, len(y))\n length = input_lengths[idx].data.cpu().item()\n\n # (T,)\n y_target = y[idx].view(-1).data.cpu().numpy()[:length]\n\n if c is not None:\n if hparams.upsample_conditional_features:\n c = c[idx, :, :length // audio.get_hop_size() + hparams.cin_pad * 2].unsqueeze(0)\n else:\n c = c[idx, :, :length].unsqueeze(0)\n assert c.dim() == 3\n print(\"Shape of local conditioning features: {}\".format(c.size()))\n if g is not None:\n # TODO: test\n g = g[idx]\n print(\"Shape of global conditioning features: {}\".format(g.size()))\n\n # Dummy silence\n if is_mulaw_quantize(hparams.input_type):\n initial_value = P.mulaw_quantize(0, hparams.quantize_channels - 1)\n elif is_mulaw(hparams.input_type):\n initial_value = P.mulaw(0.0, hparams.quantize_channels)\n else:\n initial_value = 0.0\n\n # (C,)\n if is_mulaw_quantize(hparams.input_type):\n initial_input = to_categorical(\n initial_value, num_classes=hparams.quantize_channels).astype(np.float32)\n initial_input = torch.from_numpy(initial_input).view(\n 1, 1, hparams.quantize_channels)\n else:\n initial_input = torch.zeros(1, 1, 1).fill_(initial_value)\n initial_input = initial_input.to(device)\n\n # Run the model in fast eval mode\n with torch.no_grad():\n y_hat = model.incremental_forward(\n initial_input, c=c, g=g, T=length, softmax=True, quantize=True, tqdm=tqdm,\n log_scale_min=hparams.log_scale_min)\n\n if is_mulaw_quantize(hparams.input_type):\n y_hat = y_hat.max(1)[1].view(-1).long().cpu().data.numpy()\n y_hat = P.inv_mulaw_quantize(y_hat, hparams.quantize_channels - 1)\n y_target = P.inv_mulaw_quantize(y_target, hparams.quantize_channels - 1)\n elif is_mulaw(hparams.input_type):\n y_hat = P.inv_mulaw(y_hat.view(-1).cpu().data.numpy(), hparams.quantize_channels)\n y_target = P.inv_mulaw(y_target, hparams.quantize_channels)\n else:\n y_hat = y_hat.view(-1).cpu().data.numpy()\n\n # Save audio\n os.makedirs(eval_dir, exist_ok=True)\n path = join(eval_dir, \"step{:09d}_predicted.wav\".format(global_step))\n librosa.output.write_wav(path, y_hat, sr=hparams.sample_rate)\n path = join(eval_dir, \"step{:09d}_target.wav\".format(global_step))\n librosa.output.write_wav(path, y_target, sr=hparams.sample_rate)\n\n # save figure\n path = join(eval_dir, \"step{:09d}_waveplots.png\".format(global_step))\n save_waveplot(path, y_hat, y_target)\n\n\ndef save_states(global_step, writer, y_hat, y, input_lengths, checkpoint_dir=None):\n print(\"Save intermediate states at step {}\".format(global_step))\n idx = np.random.randint(0, len(y_hat))\n length = input_lengths[idx].data.cpu().item()\n\n # (B, C, T)\n if y_hat.dim() == 4:\n y_hat = y_hat.squeeze(-1)\n\n if is_mulaw_quantize(hparams.input_type):\n # (B, T)\n y_hat = F.softmax(y_hat, dim=1).max(1)[1]\n\n # (T,)\n y_hat = y_hat[idx].data.cpu().long().numpy()\n y = y[idx].view(-1).data.cpu().long().numpy()\n\n y_hat = P.inv_mulaw_quantize(y_hat, hparams.quantize_channels - 1)\n y = P.inv_mulaw_quantize(y, hparams.quantize_channels - 1)\n else:\n # (B, T)\n if hparams.output_distribution == \"Logistic\":\n y_hat = sample_from_discretized_mix_logistic(\n y_hat, log_scale_min=hparams.log_scale_min)\n elif hparams.output_distribution == \"Normal\":\n y_hat = sample_from_mix_gaussian(\n y_hat, log_scale_min=hparams.log_scale_min)\n else:\n assert False\n\n # (T,)\n y_hat = y_hat[idx].view(-1).data.cpu().numpy()\n y = y[idx].view(-1).data.cpu().numpy()\n\n if is_mulaw(hparams.input_type):\n y_hat = P.inv_mulaw(y_hat, hparams.quantize_channels)\n y = P.inv_mulaw(y, hparams.quantize_channels)\n\n # Mask by length\n y_hat[length:] = 0\n y[length:] = 0\n\n # Save audio\n audio_dir = join(checkpoint_dir, \"intermediate\", \"audio\")\n os.makedirs(audio_dir, exist_ok=True)\n path = join(audio_dir, \"step{:09d}_predicted.wav\".format(global_step))\n librosa.output.write_wav(path, y_hat, sr=hparams.sample_rate)\n path = join(audio_dir, \"step{:09d}_target.wav\".format(global_step))\n librosa.output.write_wav(path, y, sr=hparams.sample_rate)\n\n# workaround for https://github.com/pytorch/pytorch/issues/15716\n# the idea is to return outputs and replicas explicitly, so that making pytorch\n# not to release the nodes (this is a pytorch bug though)\n\n\ndef data_parallel_workaround(model, input):\n device_ids = list(range(torch.cuda.device_count()))\n output_device = device_ids[0]\n replicas = torch.nn.parallel.replicate(model, device_ids)\n inputs = torch.nn.parallel.scatter(input, device_ids)\n replicas = replicas[:len(inputs)]\n outputs = torch.nn.parallel.parallel_apply(replicas, inputs)\n y_hat = torch.nn.parallel.gather(outputs, output_device)\n return y_hat, outputs, replicas\n\n\ndef __train_step(device, phase, epoch, global_step, global_test_step,\n model, optimizer, writer, criterion,\n x, y, c, g, input_lengths,\n checkpoint_dir, eval_dir=None, do_eval=False, ema=None):\n sanity_check(model, c, g)\n\n # x : (B, C, T)\n # y : (B, T, 1)\n # c : (B, C, T)\n # g : (B,)\n train = (phase == \"train_no_dev\")\n clip_thresh = hparams.clip_thresh\n if train:\n model.train()\n step = global_step\n else:\n model.eval()\n step = global_test_step\n\n # Learning rate schedule\n current_lr = hparams.optimizer_params[\"lr\"]\n if train and hparams.lr_schedule is not None:\n lr_schedule_f = getattr(lrschedule, hparams.lr_schedule)\n current_lr = lr_schedule_f(\n hparams.optimizer_params[\"lr\"], step, **hparams.lr_schedule_kwargs)\n for param_group in optimizer.param_groups:\n param_group['lr'] = current_lr\n optimizer.zero_grad()\n\n # Prepare data\n x, y = x.to(device), y.to(device)\n input_lengths = input_lengths.to(device)\n c = c.to(device) if c is not None else None\n g = g.to(device) if g is not None else None\n\n # (B, T, 1)\n mask = sequence_mask(input_lengths, max_len=x.size(-1)).unsqueeze(-1)\n mask = mask[:, 1:, :]\n\n # Apply model: Run the model in regular eval mode\n # NOTE: softmax is handled in F.cross_entrypy_loss\n # y_hat: (B x C x T)\n\n if use_cuda:\n # multi gpu support\n # you must make sure that batch size % num gpu == 0\n y_hat, _outputs, _replicas = data_parallel_workaround(model, (x, c, g, False))\n else:\n y_hat = model(x, c, g, False)\n\n if is_mulaw_quantize(hparams.input_type):\n # wee need 4d inputs for spatial cross entropy loss\n # (B, C, T, 1)\n y_hat = y_hat.unsqueeze(-1)\n loss = criterion(y_hat[:, :, :-1, :], y[:, 1:, :], mask=mask)\n else:\n loss = criterion(y_hat[:, :, :-1], y[:, 1:, :], mask=mask)\n\n if train and step > 0 and step % hparams.checkpoint_interval == 0:\n save_states(step, writer, y_hat, y, input_lengths, checkpoint_dir)\n save_checkpoint(device, model, optimizer, step, checkpoint_dir, epoch, ema)\n\n if do_eval:\n # NOTE: use train step (i.e., global_step) for filename\n eval_model(global_step, writer, device, model, y, c, g, input_lengths, eval_dir, ema)\n\n # Update\n if train:\n loss.backward()\n if clip_thresh > 0:\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_thresh)\n optimizer.step()\n # update moving average\n if ema is not None:\n for name, param in model.named_parameters():\n if name in ema.shadow:\n ema.update(name, param.data)\n\n # Logs\n writer.add_scalar(\"{} loss\".format(phase), float(loss.item()), step)\n if train:\n if clip_thresh > 0:\n writer.add_scalar(\"gradient norm\", grad_norm, step)\n writer.add_scalar(\"learning rate\", current_lr, step)\n\n return loss.item()\n\n\ndef train_loop(device, model, data_loaders, optimizer, writer, checkpoint_dir=None):\n if is_mulaw_quantize(hparams.input_type):\n criterion = MaskedCrossEntropyLoss()\n else:\n if hparams.output_distribution == \"Logistic\":\n criterion = DiscretizedMixturelogisticLoss()\n elif hparams.output_distribution == \"Normal\":\n criterion = MixtureGaussianLoss()\n else:\n raise RuntimeError(\n \"Not supported output distribution type: {}\".format(\n hparams.output_distribution))\n\n if hparams.exponential_moving_average:\n ema = ExponentialMovingAverage(hparams.ema_decay)\n for name, param in model.named_parameters():\n if param.requires_grad:\n ema.register(name, param.data)\n else:\n ema = None\n\n global global_step, global_epoch, global_test_step\n while global_epoch < hparams.nepochs:\n for phase, data_loader in data_loaders.items():\n train = (phase == \"train_no_dev\")\n running_loss = 0.\n test_evaluated = False\n for step, (x, y, c, g, input_lengths) in tqdm(enumerate(data_loader)):\n # Whether to save eval (i.e., online decoding) result\n do_eval = False\n eval_dir = join(checkpoint_dir, \"intermediate\", \"{}_eval\".format(phase))\n # Do eval per eval_interval for train\n if train and global_step > 0 \\\n and global_step % hparams.train_eval_interval == 0:\n do_eval = True\n # Do eval for test\n # NOTE: Decoding WaveNet is quite time consuming, so\n # do only once in a single epoch for testset\n if not train and not test_evaluated \\\n and global_epoch % hparams.test_eval_epoch_interval == 0:\n do_eval = True\n test_evaluated = True\n if do_eval:\n print(\"[{}] Eval at train step {}\".format(phase, global_step))\n\n # Do step\n running_loss += __train_step(device,\n phase, global_epoch, global_step, global_test_step, model,\n optimizer, writer, criterion, x, y, c, g, input_lengths,\n checkpoint_dir, eval_dir, do_eval, ema)\n\n # update global state\n if train:\n global_step += 1\n else:\n global_test_step += 1\n\n if global_step >= hparams.max_train_steps:\n print(\"Training reached max train steps ({}). will exit\".format(hparams.max_train_steps))\n return ema\n\n # log per epoch\n averaged_loss = running_loss / len(data_loader)\n writer.add_scalar(\"{} loss (per epoch)\".format(phase),\n averaged_loss, global_epoch)\n print(\"Step {} [{}] Loss: {}\".format(\n global_step, phase, running_loss / len(data_loader)))\n\n global_epoch += 1\n return ema\n\n\ndef save_checkpoint(device, model, optimizer, step, checkpoint_dir, epoch, ema=None):\n checkpoint_path = join(\n checkpoint_dir, \"checkpoint_step{:09d}.pth\".format(global_step))\n optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None\n global global_test_step\n torch.save({\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer_state,\n \"global_step\": step,\n \"global_epoch\": epoch,\n \"global_test_step\": global_test_step,\n }, checkpoint_path)\n print(\"Saved checkpoint:\", checkpoint_path)\n\n import shutil\n latest_pth = join(checkpoint_dir, \"checkpoint_latest.pth\")\n shutil.copyfile(checkpoint_path, latest_pth)\n\n if ema is not None:\n averaged_model = clone_as_averaged_model(device, model, ema)\n checkpoint_path = join(\n checkpoint_dir, \"checkpoint_step{:09d}_ema.pth\".format(global_step))\n torch.save({\n \"state_dict\": averaged_model.state_dict(),\n \"optimizer\": optimizer_state,\n \"global_step\": step,\n \"global_epoch\": epoch,\n \"global_test_step\": global_test_step,\n }, checkpoint_path)\n print(\"Saved averaged checkpoint:\", checkpoint_path)\n\n latest_pth = join(checkpoint_dir, \"checkpoint_latest_ema.pth\")\n shutil.copyfile(checkpoint_path, latest_pth)\n\n\ndef build_model():\n if is_mulaw_quantize(hparams.input_type):\n if hparams.out_channels != hparams.quantize_channels:\n raise RuntimeError(\n \"out_channels must equal to quantize_chennels if input_type is 'mulaw-quantize'\")\n if hparams.upsample_conditional_features and hparams.cin_channels < 0:\n s = \"Upsample conv layers were specified while local conditioning disabled. \"\n s += \"Notice that upsample conv layers will never be used.\"\n warn(s)\n\n upsample_params = hparams.upsample_params\n upsample_params[\"cin_channels\"] = hparams.cin_channels\n upsample_params[\"cin_pad\"] = hparams.cin_pad\n model = WaveNet(\n out_channels=hparams.out_channels,\n layers=hparams.layers,\n stacks=hparams.stacks,\n residual_channels=hparams.residual_channels,\n gate_channels=hparams.gate_channels,\n skip_out_channels=hparams.skip_out_channels,\n cin_channels=hparams.cin_channels,\n gin_channels=hparams.gin_channels,\n n_speakers=hparams.n_speakers,\n dropout=hparams.dropout,\n kernel_size=hparams.kernel_size,\n cin_pad=hparams.cin_pad,\n upsample_conditional_features=hparams.upsample_conditional_features,\n upsample_params=upsample_params,\n scalar_input=is_scalar_input(hparams.input_type),\n output_distribution=hparams.output_distribution,\n )\n return model\n\n\ndef _load(checkpoint_path):\n if use_cuda:\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n\n\ndef load_checkpoint(path, model, optimizer, reset_optimizer):\n global global_step\n global global_epoch\n global global_test_step\n\n print(\"Load checkpoint from: {}\".format(path))\n checkpoint = _load(path)\n model.load_state_dict(checkpoint[\"state_dict\"])\n if not reset_optimizer:\n optimizer_state = checkpoint[\"optimizer\"]\n if optimizer_state is not None:\n print(\"Load optimizer state from {}\".format(path))\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n global_step = checkpoint[\"global_step\"]\n global_epoch = checkpoint[\"global_epoch\"]\n global_test_step = checkpoint.get(\"global_test_step\", 0)\n\n return model\n\n\n# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3\ndef restore_parts(path, model):\n print(\"Restore part of the model from: {}\".format(path))\n state = _load(path)[\"state_dict\"]\n model_dict = model.state_dict()\n valid_state_dict = {k: v for k, v in state.items() if k in model_dict}\n\n try:\n model_dict.update(valid_state_dict)\n model.load_state_dict(model_dict)\n except RuntimeError as e:\n # there should be invalid size of weight(s), so load them per parameter\n print(str(e))\n model_dict = model.state_dict()\n for k, v in valid_state_dict.items():\n model_dict[k] = v\n try:\n model.load_state_dict(model_dict)\n except RuntimeError as e:\n print(str(e))\n warn(\"{}: may contain invalid size of weight. skipping...\".format(k))\n\n\ndef get_data_loaders(dump_root, speaker_id, test_shuffle=True):\n data_loaders = {}\n local_conditioning = hparams.cin_channels > 0\n\n if hparams.max_time_steps is not None:\n max_steps = ensure_divisible(hparams.max_time_steps, audio.get_hop_size(), True)\n else:\n max_steps = None\n\n for phase in [\"train_no_dev\", \"dev\"]:\n train = phase == \"train_no_dev\"\n X = FileSourceDataset(\n RawAudioDataSource(join(dump_root, phase), speaker_id=speaker_id,\n max_steps=max_steps, cin_pad=hparams.cin_pad,\n hop_size=audio.get_hop_size()))\n if local_conditioning:\n Mel = FileSourceDataset(\n MelSpecDataSource(join(dump_root, phase), speaker_id=speaker_id,\n max_steps=max_steps, cin_pad=hparams.cin_pad,\n hop_size=audio.get_hop_size()))\n assert len(X) == len(Mel)\n print(\"Local conditioning enabled. Shape of a sample: {}.\".format(\n Mel[0].shape))\n else:\n Mel = None\n print(\"[{}]: length of the dataset is {}\".format(phase, len(X)))\n\n if train:\n lengths = np.array(X.file_data_source.lengths)\n # Prepare sampler\n sampler = PartialyRandomizedSimilarTimeLengthSampler(\n lengths, batch_size=hparams.batch_size)\n shuffle = False\n # make sure that there's no sorting bugs for https://github.com/r9y9/wavenet_vocoder/issues/130\n sampler_idx = np.asarray(sorted(list(map(lambda s: int(s), sampler))))\n assert (sampler_idx == np.arange(len(sampler_idx), dtype=np.int)).all()\n else:\n sampler = None\n shuffle = test_shuffle\n\n dataset = PyTorchDataset(X, Mel)\n data_loader = data_utils.DataLoader(\n dataset, batch_size=hparams.batch_size, drop_last=True,\n num_workers=hparams.num_workers, sampler=sampler, shuffle=shuffle,\n collate_fn=collate_fn, pin_memory=hparams.pin_memory)\n\n speaker_ids = {}\n if X.file_data_source.multi_speaker:\n for idx, (x, c, g) in enumerate(dataset):\n if g is not None:\n try:\n speaker_ids[g] += 1\n except KeyError:\n speaker_ids[g] = 1\n if len(speaker_ids) > 0:\n print(\"Speaker stats:\", speaker_ids)\n\n data_loaders[phase] = data_loader\n\n return data_loaders\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n print(\"Command line args:\\n\", args)\n checkpoint_dir = args[\"--checkpoint-dir\"]\n checkpoint_path = args[\"--checkpoint\"]\n checkpoint_restore_parts = args[\"--restore-parts\"]\n speaker_id = args[\"--speaker-id\"]\n speaker_id = int(speaker_id) if speaker_id is not None else None\n preset = args[\"--preset\"]\n\n dump_root = args[\"--dump-root\"]\n if dump_root is None:\n dump_root = join(dirname(__file__), \"data\", \"ljspeech\")\n\n log_event_path = args[\"--log-event-path\"]\n reset_optimizer = args[\"--reset-optimizer\"]\n\n # Load preset if specified\n if preset is not None:\n with open(preset) as f:\n hparams.parse_json(f.read())\n # Override hyper parameters\n hparams.parse(args[\"--hparams\"])\n assert hparams.name == \"wavenet_vocoder\"\n print(hparams_debug_string())\n\n fs = hparams.sample_rate\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n output_json_path = join(checkpoint_dir, \"hparams.json\")\n with open(output_json_path, \"w\") as f:\n json.dump(hparams.values(), f, indent=2)\n\n # Dataloader setup\n print(dump_root)\n data_loaders = get_data_loaders(dump_root, speaker_id, test_shuffle=True)\n\n maybe_set_epochs_based_on_max_steps(hparams, len(data_loaders[\"train_no_dev\"]))\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # Model\n model = build_model().to(device)\n\n receptive_field = model.receptive_field\n print(\"Receptive field (samples / ms): {} / {}\".format(\n receptive_field, receptive_field / fs * 1000))\n\n from torch import optim\n Optimizer = getattr(optim, hparams.optimizer)\n optimizer = Optimizer(model.parameters(), **hparams.optimizer_params)\n\n if checkpoint_restore_parts is not None:\n restore_parts(checkpoint_restore_parts, model)\n\n # Load checkpoints\n if checkpoint_path is not None:\n load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)\n\n # Setup summary writer for tensorboard\n if log_event_path is None:\n log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\")\n print(\"TensorBoard event log path: {}\".format(log_event_path))\n writer = SummaryWriter(log_dir=log_event_path)\n\n # Train!\n ema = None\n try:\n ema = train_loop(device, model, data_loaders, optimizer, writer,\n checkpoint_dir=checkpoint_dir)\n except KeyboardInterrupt:\n print(\"Interrupted!\")\n pass\n finally:\n save_checkpoint(\n device, model, optimizer, global_step, checkpoint_dir, global_epoch, ema)\n\n print(\"Finished\")\n\n sys.exit(0)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.parallel.parallel_apply", "torch.nn.functional.softmax", "matplotlib.pyplot.tight_layout", "torch.no_grad", "torch.cuda.is_available", "numpy.stack", "torch.nn.parallel.scatter", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.reshape", "torch.nn.parallel.gather", "torch.cuda.device_count", "torch.arange", "torch.from_numpy", "matplotlib.use", "torch.device", "numpy.load", "torch.load", "numpy.zeros", "numpy.ceil", "torch.zeros", "torch.tensor", "numpy.arange", "numpy.max", "matplotlib.pyplot.close", "torch.nn.parallel.replicate", "torch.FloatTensor", "torch.nn.CrossEntropyLoss", "matplotlib.pyplot.subplot", "numpy.array", "numpy.concatenate", "torch.LongTensor" ] ]
echaussidon/LSS
[ "205ce48a288acacbd41358e6d0215f4aff355049" ]
[ "scripts/SV1/gatherSV_zinfo_alltiles_denali_inpar.py" ]
[ "'''\ngather redshift info across all observations for a given target type\n'''\n\n#standard python\nimport sys\nimport os\nimport shutil\nimport unittest\nfrom datetime import datetime\nimport json\nimport numpy as np\nimport fitsio\nimport glob\nimport argparse\nfrom astropy.table import Table,join,unique,vstack\nfrom matplotlib import pyplot as plt\n\n#sys.path.append('../py')\n\n#from this package\nimport LSS.zcomp.zinfo as zi\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--type\", help=\"tracer type to be selected\")\nparser.add_argument(\"--release\", help=\"what spectro release to use, e.g. blanc or daily\",default='denali') #eventually remove this and just gather everything\nparser.add_argument(\"--basedir\", help=\"base directory for output, default is CSCRATCH\",default=os.environ['CSCRATCH'])\nparser.add_argument(\"--version\", help=\"catalog version; use 'test' unless you know what you are doing!\",default='test')\nparser.add_argument(\"--runmd\", help=\"prod/test; test is for debugging result on first tile\",default='prod')\nparser.add_argument(\"--doh5\", help=\"whether or not to do the N best redshift fits, which adds bit of time\",default='n')\nargs = parser.parse_args()\nprint(args)\n\ntype = args.type\nbasedir = args.basedir\nrelease = args.release\nversion = args.version\n\n\n\nfrom desitarget.sv1 import sv1_targetmask\n\ntarbit = -1\nif type != 'All':\n tarbit = int(np.log2(sv1_targetmask.desi_mask[type]))\n\nprint('gathering all tile data for type '+type +' in '+release)\n\ntp = 'SV1_DESI_TARGET'\nprint('targeting bit, target program type; CHECK THEY ARE CORRECT!')\nprint(tarbit,tp)\n\n\n\n#outputs\n#basedir for official catalogs'/global/cfs/cdirs/desi/survey/catalogs\nsvdir = basedir+'/SV1/'\n\ndirout = svdir+'redshift_comps/'+release+'/'+version+'/'+type\n\nif not os.path.exists(svdir):\n os.mkdir(svdir)\n print('made '+svdir+' directory')\n\nif not os.path.exists(svdir+'redshift_comps'):\n os.mkdir(svdir+'redshift_comps')\n print('made '+svdir+'redshift_comps directory')\n\nif not os.path.exists(svdir+'redshift_comps/'+release):\n os.mkdir(svdir+'redshift_comps/'+release)\n print('made '+svdir+'redshift_comps/'+release+' directory')\n\nif not os.path.exists(svdir+'redshift_comps/'+release+'/'+version):\n os.mkdir(svdir+'redshift_comps/'+release+'/'+version)\n print('made '+svdir+'redshift_comps/'+release+'/'+version+' directory')\n\nif not os.path.exists(dirout):\n os.mkdir(dirout)\n print('made '+dirout)\n\nif not os.path.exists(svdir+'/redshift_comps/logs'):\n os.mkdir(svdir+'/redshift_comps/logs')\n print('made '+svdir+'/redshift_comps/logs')\n\n#set up log file\nlogfn = svdir+'/redshift_comps/logs/log'+datetime.now().isoformat()+'.txt'\nlogf = open(logfn,'w')\nprint('a log of what was run is going to '+logfn)\n\nlogf.write('running gatherSV_zinfo_alltiles_inpar.py from '+os.getcwd()+'\\n\\n')\nlogf.write('arguments were:\\n')\nlogf.write(str(args)+'\\n')\n\n \nexpf = '/global/cfs/cdirs/desi/survey/observations/SV1/sv1-exposures.fits' \nexposures = fitsio.read(expf) #this will be used in depth calculations \ngt = ['BGS+MWS', 'ELG', 'QSO+ELG', 'QSO+LRG','BACKUP','SSV','SCND']\n#location of inputs\ntiledir = '/global/cfs/cdirs/desi/spectro/redux/'+release+'/tiles/'\n\ntiles = np.unique(exposures['TILEID'])\nprint('looking for data in these tiles:')\nprint(tiles)\n\nmfn = svdir+'/redshift_comps/logs/missingexposures.txt'\nfo = open(svdir+'/redshift_comps/logs/missingexposures.txt','w')\nfo.close()\n\ntilew = []\n#for tile in tiles:\n\ndef get_tilezinfo(tile):\n tt = np.unique(exposures['TARGETS'][exposures['TILEID']==tile])[0]\n if np.isin(tt,gt): #that tile used cmx target bits\n tile = str(tile)\n print('going through tile '+tile)\n outf = dirout +'/'+tile+'_'+type+'zinfo.fits'\n if os.path.isfile(outf): \n print(outf+' exists already')\n #tilew.append(tile)\n a = True\n\n else:\n a = zi.comb_subset_vert_denali(tarbit,tp,tile,exposures,outf,tt,mfn=mfn)\n logf.write('compiled data for tile '+str(tile)+' written to '+outf+'\\n')\n\n if a:\n if args.doh5 == 'y':\n print('adding info from hd5 files')\n outfall = dirout +'/'+tile+'_'+type+'zinfo_wh5.fits'\n if os.path.isfile(outfall): \n print(outfall+' exists already') \n else:\n dt = Table.read(outf)\n cols = ['z','zwarn','chi2','deltachi2','spectype','subtype']\n for i in range(1,5):\n\n dt['z_'+str(i)]=np.zeros(len(dt))\n dt['zwarn_'+str(i)]=np.zeros(len(dt))\n dt['chi2_'+str(i)]=np.zeros(len(dt))\n dt['deltachi2_'+str(i)]=np.zeros(len(dt))\n dt['spectype_'+str(i)] = 'GALAXY'\n dt['subtype_'+str(i)] = 'GALAXY'\n for ii in range(0,len(dt)):\n ln = dt[ii]\n \n #if ln['RZR'] != 'N':\n # zfitdir = '/global/cfs/cdirs/desi/users/rongpu/redux/cascades/'+ln['RZR']+'/'+str(ln['TILEID'])\n #else:\n #zfitdir = tiledir+str(ln['TILEID'])+'/'+ln['subset']+'/' \n zfitdir = tiledir+ln['coadd_type']+'/'+str(ln['TILEID'])+'/'+ln['subset'][-8:]+'/' \n \n fl = zfitdir+'/redrock-'+str(ln['PETAL_LOC'])+'-'+str(ln['TILEID'])+'-'+ln['subset']+'.h5'\n \n zfits = zi.get_zfits(fl,ln['TARGETID'])\n for jj in range(1,5):\n for col in cols:\n dt[col+'_'+str(jj)][ii] = zfits[jj][col]\n \n dt.write(outfall,format='fits', overwrite=True) \n print('wrote to '+outfall)\n return a\n\n\n else:\n print('did not find data in '+release +' for tile '+tile) \n return False \n \nif __name__ == '__main__':\n from multiprocessing import Pool\n import sys\n #N = int(sys.argv[2])\n N = 32\n p = Pool(N)\n\n expf = '/global/cfs/cdirs/desi/survey/observations/SV1/sv1-exposures.fits' \n exps = fitsio.read(expf)\n\n tiles = np.unique(exps['TILEID'])\n print('going through '+str(len(tiles))+' tiles')\n\n if args.runmd == 'prod':\n ntile = len(tiles)\n if args.runmd == 'test':\n ntile = 1 \n for j in range(0,ntile,N):\n #get_tilezinfo(tiles[j])\n inds = []\n for i in range(j,j+N):\n if i == len(tiles):\n break\n inds.append(tiles[i])\n p.map(get_tilezinfo,inds)\n\n\n #combine all the tiles\n\n fapp = 'zinfo.fits'\n if args.doh5 == 'y':\n fapp = 'zinfo_wh5.fits'\n \n dt = Table.read(dirout +'/'+str(tiles[0])+'_'+type+fapp)\n dt['TILEID'] = int(tiles[0])\n for i in range(1,len(tiles)):\n tf = dirout +'/'+str(tiles[i])+'_'+type+fapp\n if os.path.isfile(tf): \n dtn = Table.read(dirout +'/'+str(tiles[i])+'_'+type+fapp)\n dtn['TILEID'] = int(tiles[i])\n dt = vstack([dt,dtn])\n else:\n print('did not find file for tile '+str(tiles[i])) \n\n dt.sort('TARGETID')\n col2remove = ['NUMEXP','NUMTILE','LAMBDA_REF','OBJTYPE','NUMTARGET','FIBERFLUX_IVAR_G','FIBERFLUX_IVAR_R','FIBERFLUX_IVAR_Z','DESI_TARGET','BGS_TARGET','MWS_TARGET','HPXPIXEL','NUM_TILEID','NUM_FIBER']\n for col in col2remove:\n try:\n dt.remove_columns([col])\n except:\n print('didnt find column to remove '+col)\n outfall = dirout +'/alltiles_'+type+fapp\n dt.write(outfall,format='fits', overwrite=True) \n print('wrote to '+outfall)\n logf.write('combined all tiles, written to '+outfall)\n " ]
[ [ "numpy.isin", "numpy.log2", "numpy.unique" ] ]
danielzlarson/WishBuilder
[ "201bd1ebdb6299fde36e762bb112a8546f15dc00" ]
[ "GDSC_Expression/parse.py" ]
[ "import pandas as pd\nimport sys, re, math, gzip \nimport numpy as np\n\ncellLine = sys.argv[1]\ndoseResponse = sys.argv[2]\nscreenedComponents = sys.argv[3]\nRACS = sys.argv[4]\nvariants = sys.argv[5]\nexpressionIn = sys.argv[6]\nclinicalOut = sys.argv[7]\ntmpExpression = sys.argv[8]\nfinalExpression = sys.argv[9]\n\n\ndef readSheetToMultipleDfs(xl,sheetname, indecisBetweenTables) :\n line = []\n previous = -1 \n tmpDfsList = []\n for i in indecisBetweenTables :\n if i - previous > 2 :\n nrows = xl.book.sheet_by_name(sheetname).nrows\n df = xl.parse(sheetname, skiprows=previous+1, skip_footer = nrows-i-1).dropna(axis=1, how='all')\n tmpDfsList.append(df)\n previous = i\n return tmpDfsList\n\ndef readXlToListDf(file) :\n xl = pd.ExcelFile(file)\n dfsList = []\n for sheetname in xl.sheet_names :\n df = xl.parse(sheetname)\n if df.empty == True :\n continue\n elif any(math.isnan(s) for s in list(df[df.columns[0]]) if type(s) is float) :\n indecisBetweenTables = list(s for s in range(len(list(df[df.columns[0]]))) if type(list(df[df.columns[0]])[s]) is float) \n indecisBetweenTables.append(len(list(df[df.columns[0]])))\n tmpDfsList = readSheetToMultipleDfs(xl,sheetname,indecisBetweenTables)\n for df in tmpDfsList :\n dfsList.append(df)\n else :\n dfsList.append(df)\n return dfsList\n\ndef checkIfNan(value) :\n if type(value) == float:\n if math.isnan(value) == True:\n return True\n return False\n\nprint(\"making dataframes from excel\")\ncellLineDfs = readXlToListDf(cellLine) # 0.Cell line details 1.COSMIC tissue classification 2.TCGA tissue classification 3.Microsatillite instability data 4.Growth media\ndoseResponseDfs = readXlToListDf(doseResponse) # 0.fitted_dose_response\nscreenedComponentsDfs = readXlToListDf(screenedComponents) # 0.Screened_Compounds\nRACSDfs = readXlToListDf(RACS) # 0.RACS\nvariantsDfs = readXlToListDf(variants) # 0.WES_variants 1.Legend\n\n\n##The first three dictionaries are used to decode the MSI, Cancer type, and Screen Medium in the Cell Line file\n##The fourth dictionary stores information from the DrugID in the screen compounds file to be used when making the dose response.\nprint(\"Making dictionaries\")\n\nTCGADict = {}\nfor i in range(len(cellLineDfs[2][cellLineDfs[2].columns[0]])) :\n TCGADict[cellLineDfs[2][cellLineDfs[2].columns[0]][i]] = cellLineDfs[2][cellLineDfs[2].columns[1]][i]\n\nMIDDict = {}\nfor i in range(len(cellLineDfs[3][cellLineDfs[3].columns[0]])) :\n MIDDict[cellLineDfs[3][cellLineDfs[3].columns[0]][i]] = cellLineDfs[3][cellLineDfs[3].columns[1]][i]\n\nGMDict = {}\nfor i in range(len(cellLineDfs[4][cellLineDfs[4].columns[0]])) :\n GMDict[cellLineDfs[4][cellLineDfs[4].columns[0]][i]] = cellLineDfs[4][cellLineDfs[4].columns[1]][i]\n\nDrugIDDict = {}\nfor i in range(len(screenedComponentsDfs[0][screenedComponentsDfs[0].columns[0]])) :\n drugList = []\n if (type(screenedComponentsDfs[0][screenedComponentsDfs[0].columns[2]][i]) != float) :\n if screenedComponentsDfs[0][screenedComponentsDfs[0].columns[2]][i] == \"-\":\n if (len(screenedComponentsDfs[0][screenedComponentsDfs[0].columns[2]][i].split(\", \")) > 1) :\n drugList = screenedComponentsDfs[0][screenedComponentsDfs[0].columns[2]][i].split(\", \")\n else :\n drugList = screenedComponentsDfs[0][screenedComponentsDfs[0].columns[2]][i].split(\", \")\n drugList.append(screenedComponentsDfs[0][screenedComponentsDfs[0].columns[1]][i])\n DrugIDDict[screenedComponentsDfs[0][screenedComponentsDfs[0].columns[0]][i]] = drugList\n\nEnsembliIDDict = {}\nnumberOfCellLineRows = 0\nfor i in range(len(cellLineDfs[1][cellLineDfs[1].columns[1]])) :\n numberOfCellLineRows = numberOfCellLineRows + 1\n EnsembliIDDict[str(cellLineDfs[1][cellLineDfs[1].columns[1]][i])] = cellLineDfs[1][cellLineDfs[1].columns[0]][i]\n\n\n\n##Write out Expression data to data.tsv.gz\nheadersNotConverted = []\nheaderValues = 0\nindecisOfInterest = []\nheadersList = []\nprint(\"Writing Expression data to data.tsv.gz\")\nwith gzip.open(tmpExpression, 'w') as oF :\n with gzip.open(expressionIn, 'r') as iF :\n headersList = iF.readline().strip('\\n').split('\\t')\n\n first = True\n for i in range(len(headersList)) :\n if first == True:\n first = False\n indecisOfInterest.append(i)\n else :\n headerValues = headerValues + 1\n try :\n headersList[i] = str(EnsembliIDDict[headersList[i]])\n indecisOfInterest.append(i)\n except KeyError :\n headersNotConverted.append(headersList[i])\n first = True\n firstOACp4C = True\n firstSKMEL28 = True\n firstKMH2 = True\n firstOCIAML5 = True\n for i in indecisOfInterest :\n if first == True :\n first = False\n oF.write(\"Sample\")\n elif headersList[i] == \"OACp4C\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstOACp4C == True :\n firstOACp4C = False\n continue\n else :\n oF.write(\"\\t\" + headersList[i])\n elif headersList[i] == \"SK-MEL-28\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstSKMEL28 == True :\n firstSKMEL28 = False\n continue\n else :\n oF.write(\"\\t\" + headersList[i])\n elif headersList[i] == \"KM-H2\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstKMH2 == True :\n firstKMH2 = False\n continue\n else :\n oF.write(\"\\t\" + headersList[i])\n elif headersList[i] == \"OCI-AML5\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstOCIAML5 == True :\n firstOCIAML5 = False\n continue\n else :\n oF.write(\"\\t\" + headersList[i])\n else :\n oF.write(\"\\t\" + headersList[i])\n oF.write((\"\\n\"))\n\n j = 0\n for line in iF :\n j = j + 1\n lineList = line.strip('\\n').split('\\t')\n first = True\n firstOACp4C = True\n firstValueOACp4C = 0\n firstSKMEL28 = True\n firstValueSKMEL28 = 0\n firstKMH2 = True\n firstValueKMH2 = 0\n firstOCIAML5 = True\n firstValueOCIAML5 = 0\n for i in indecisOfInterest :\n if first == True :\n first = False\n oF.write(lineList[i])\n elif headersList[i] == \"OACp4C\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstOACp4C == True :\n firstOACp4C = False\n firstValueOACp4C = float(lineList[i])\n continue\n else :\n averagedValue = (firstValueOACp4C + float(lineList[i])) / 2\n oF.write(\"\\t\" + str(averagedValue))\n elif headersList[i] == \"SK-MEL-28\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstSKMEL28 == True :\n firstSKMEL28 = False\n firstValueSKMEL28 = float(lineList[i])\n continue\n else :\n averagedValue = (firstValueSKMEL28 + float(lineList[i])) / 2\n oF.write(\"\\t\" + str(averagedValue))\n elif headersList[i] == \"KM-H2\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstKMH2 == True :\n firstKMH2 = False\n firstValueKMH2 = float(lineList[i])\n continue\n else :\n averagedValue = (firstValueKMH2 + float(lineList[i])) / 2\n oF.write(\"\\t\" + str(averagedValue))\n elif headersList[i] == \"OCI-AML5\" : #The original dataset has two variables titled OACp4C, this will label only the second in the header so we can average the values on each line and leave the value in this indeci\n if firstOCIAML5 == True :\n firstOCIAML5 = False\n firstValueOCIAML5 = float(lineList[i])\n continue\n else :\n averagedValue = (firstValueOCIAML5 + float(lineList[i])) / 2\n oF.write(\"\\t\" + str(averagedValue))\n else :\n oF.write(\"\\t\" + str(lineList[i]))\n oF.write(\"\\n\")\n\nprint(\"number of values in header: \" + str(headerValues))\nprint(\"number of cellLineRows: \" + str(numberOfCellLineRows))\nprint(\"Headers not converted: \" + str(headersNotConverted))\n\n\nnoDetailsList = []\nnoDoseResposeList = []\nnoRACSList = []\nnoVariants = []\nkeyErrorSet = set()\n\nprint(\"Writing Metadata\")\nwith gzip.open(clinicalOut, 'w') as oFile :\n oFile.write(\"Sample\\tVariable\\tValue\\n\")\n\n first = True\n for headerIndeci in indecisOfInterest :\n if first == True :\n first = False\n continue\n i = list(i for i in range(len(cellLineDfs[1][cellLineDfs[1].columns[0]])) if headersList[headerIndeci] == str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]))\n i = i[0]\n\n ##writing CellLineDetails sheet Cosmic tissue classification info\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[1].columns.values)[2]) + \"\\t\" + str(cellLineDfs[1][cellLineDfs[1].columns[2]][i]) + \"\\n\"))\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[1].columns.values)[3]) + \"\\t\" + str(cellLineDfs[1][cellLineDfs[1].columns[3]][i]) + \"\\n\"))\n \n ##writing CellLineDetails sheet Cell line details - 7 through 12 - map 9 - 11 on to tables in sheet Decode \n #Since they are in a different order, we need to find the index\n j = list(j for j in range(len(cellLineDfs[0][cellLineDfs[0].columns[1]]) - 1) if int(cellLineDfs[0][cellLineDfs[0].columns[1]][j]) == cellLineDfs[1][cellLineDfs[1].columns[1]][i])\n if len(j) > 0 :\n j = j[0]\n\n if(checkIfNan(cellLineDfs[0][cellLineDfs[0].columns[7]][j]) != True) :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[0].columns.values)[7]).replace(\"\\n\",\" \") + \"\\t\" + str(cellLineDfs[0][cellLineDfs[0].columns[7]][j]) + \"\\n\"))\n if(checkIfNan(cellLineDfs[0][cellLineDfs[0].columns[8]][j]) != True) :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[0].columns.values)[8]).replace(\"\\n\",\" \") + \"\\t\" + str(cellLineDfs[0][cellLineDfs[0].columns[8]][j]) + \"\\n\"))\n if(checkIfNan(cellLineDfs[0][cellLineDfs[0].columns[9]][j]) != True) :\n try :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[0].columns.values)[9]).replace(\"\\n\",\" \") + \"\\t\" + str(TCGADict[cellLineDfs[0][cellLineDfs[0].columns[9]][j]]) + \"\\n\"))\n except KeyError :\n if (cellLineDfs[0][cellLineDfs[0].columns[9]][j] == \"COAD/READ\") :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[0].columns.values)[9]).replace(\"\\n\",\" \") + \"\\t\" + \"Colon adenocarcinoma and Rectum adenocarcinoma\" + \"\\n\"))\n else :\n keyErrorSet.add(cellLineDfs[0][cellLineDfs[0].columns[9]][j])\n if(checkIfNan(cellLineDfs[0][cellLineDfs[0].columns[10]][j]) != True) :\n tmpMIS = cellLineDfs[0][cellLineDfs[0].columns[10]][j].split('/')\n tmpMIS = list(MIDDict[i] for i in tmpMIS)\n tmpMIS = \"/\".join(tmpMIS)\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[0].columns.values)[10]).replace(\"\\n\",\" \") + \"\\t\" + str(tmpMIS) + \"\\n\"))\n if(checkIfNan(cellLineDfs[0][cellLineDfs[0].columns[11]][j]) != True) :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[0].columns.values)[11]).replace(\"\\n\",\" \") + \"\\t\" + str(GMDict[cellLineDfs[0][cellLineDfs[0].columns[11]][j]]) + \"\\n\"))\n if(checkIfNan(cellLineDfs[0][cellLineDfs[0].columns[12]][j]) != True) :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + str(list(cellLineDfs[0].columns.values)[12]).replace(\"\\n\",\" \") + \"\\t\" + str(cellLineDfs[0][cellLineDfs[0].columns[12]][j]) + \"\\n\"))\n else :\n noDetailsList.append(i)\n\n ##Writing dose response\n j = list(j for j in range(len(doseResponseDfs[0][doseResponseDfs[0].columns[2]])) if doseResponseDfs[0][doseResponseDfs[0].columns[2]][j] == cellLineDfs[1][cellLineDfs[1].columns[1]][i])\n\n if(len(j) > 0) : \n for k in j : \n drugList = DrugIDDict[doseResponseDfs[0][doseResponseDfs[0].columns[3]][k]]\n \n for drug in drugList :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + \"Drug_\" + str(drug) + \"_\" + str(list(doseResponseDfs[0].columns.values)[4]) + \"\\t\" + str(doseResponseDfs[0][doseResponseDfs[0].columns[4]][k]) + \"\\n\"))\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + \"Drug_\" + str(drug) + \"_\" + str(list(doseResponseDfs[0].columns.values)[5]) + \"\\t\" + str(doseResponseDfs[0][doseResponseDfs[0].columns[5]][k]) + \"\\n\"))\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + \"Drug_\" + str(drug) + \"_\" + str(list(doseResponseDfs[0].columns.values)[6]) + \"\\t\" + str(doseResponseDfs[0][doseResponseDfs[0].columns[6]][k]) + \"\\n\"))\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + \"Drug_\" + str(drug) + \"_\" + str(list(doseResponseDfs[0].columns.values)[7]) + \"\\t\" + str(doseResponseDfs[0][doseResponseDfs[0].columns[7]][k]) + \"\\n\"))\n else :\n noDoseResposeList.append(i)\n\n ##Writing Racs\n j = list(j for j in range(len(RACSDfs[0][RACSDfs[0].columns[1]])) if RACSDfs[0][RACSDfs[0].columns[1]][j] == cellLineDfs[1][cellLineDfs[1].columns[1]][i])\n\n if (len(j)) > 0 :\n for k in j :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + \"RACS_\" + str(RACSDfs[0][RACSDfs[0].columns[5]][k]) + \"\\t\" + str(RACSDfs[0][RACSDfs[0].columns[6]][k]) + \"\\n\"))\n else :\n noRACSList.append(i)\n\n ##Writing WES_Variants\n j = list(j for j in range(len(variantsDfs[0][variantsDfs[0].columns[1]])) if variantsDfs[0][variantsDfs[0].columns[1]][j] == cellLineDfs[1][cellLineDfs[1].columns[1]][i])\n \n if len(j) > 0 :\n for k in j :\n oFile.write((str(cellLineDfs[1][cellLineDfs[1].columns[0]][i]) + \"\\t\" + \"Variant_\" + str(variantsDfs[0][variantsDfs[0].columns[7]][k]) + \"\\t\" + str(variantsDfs[0][variantsDfs[0].columns[3]][k]) + \"\\n\"))\n else :\n noVariants.append(i)\n\n\nprint(\"KeyError result: \" + str(keyErrorSet))\nprint(\"Iterations no details: \" + str(noDetailsList))\nprint(\"Iterations no racs info: \" + str(noRACSList))\nprint(\"Iterations no variants info: \" + str(noVariants))\nprint(\"Iterations no Dose Response: \" + str(noDoseResposeList))\n\nprint(\"transpose the expressionFile\")\nwith gzip.open(tmpExpression, 'r') as f :\n with gzip.open(finalExpression, 'w') as oF : \n data = np.genfromtxt(f,delimiter='\\t',dtype=str)\n for line in data.T :\n stringLine = (\"\\t\").join([str(element) for element in line]) + \"\\n\"\n oF.write((stringLine))\n" ]
[ [ "pandas.ExcelFile", "numpy.genfromtxt" ] ]
zqirui/MLinPractice
[ "70c054903a5238725e802fa887862c35a8253cc3" ]
[ "code/preprocessing/split_data.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSplits the preprocessed data into training, validation, and test set.\n\nCreated on Tue Sep 28 16:45:51 2021\n\n@author: lbechberger\n\"\"\"\n\nfrom code.util import COLUMN_LABEL\nimport os, argparse, csv\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# setting up CLI\nparser = argparse.ArgumentParser(description = \"Splitting the data set\")\nparser.add_argument(\"input_file\", help = \"path to the input csv file\")\nparser.add_argument(\"output_folder\", help = \"path to the output folder\")\nparser.add_argument(\"-s\", '--seed', type = int, help = \"seed for the random number generator\", default = None)\nparser.add_argument(\"-t\", '--test_size', type = float, help = \"relative size of the test set\", default = 0.2)\nparser.add_argument(\"-v\", '--validation_size', type = float, help = \"relative size of the validation set\", default = 0.2)\nargs = parser.parse_args()\n\n# load the data\ndf = pd.read_csv(args.input_file, quoting = csv.QUOTE_NONNUMERIC, lineterminator = \"\\n\")\n\n# split into (training & validation) and test set\nX, X_test = train_test_split(df, test_size = args.test_size, random_state = args.seed, shuffle = True, stratify = df[COLUMN_LABEL])\n\n# split remainder into training and validation\nrelative_validation_size = args.validation_size / (1 - args.test_size)\nX_train, X_val = train_test_split(X, test_size = relative_validation_size, random_state = args.seed, shuffle = True, stratify = X[COLUMN_LABEL])\n\n# store the three data sets separately\nX_train.to_csv(os.path.join(args.output_folder, \"training.csv\"), index = False, quoting = csv.QUOTE_NONNUMERIC, line_terminator = \"\\n\")\nX_val.to_csv(os.path.join(args.output_folder, \"validation.csv\"), index = False, quoting = csv.QUOTE_NONNUMERIC, line_terminator = \"\\n\")\nX_test.to_csv(os.path.join(args.output_folder, \"test.csv\"), index = False, quoting = csv.QUOTE_NONNUMERIC, line_terminator = \"\\n\")\n\nprint(\"Training: {0} examples, Validation: {1} examples, Test: {2} examples\".format(len(X_train), len(X_val), len(X_test)))" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split" ] ]
hirowgit/2B0_python_optmization_course
[ "e1890a41d0daf9a44a4d1e0a6c5d775f8ab7691b" ]
[ "1_SVG_converter_Copper.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom svg.path import parse_path\nfrom svg.path.path import Line\nfrom xml.dom import minidom\n\ndef line_splitter(start, end):\n return (lambda t: (1-t)*start+t*end)\n\ndef cubic_bezier_converter(start, control1, control2, end):\n original_data = np.array([start, control1, control2, end])\n cubic_bezier_matrix = np.array([\n [-1, 3, -3, 1],\n [ 3, -6, 3, 0],\n [-3, 3, 0, 0],\n [ 1, 0, 0, 0]\n ])\n return_data = cubic_bezier_matrix.dot(original_data)\n\n return (lambda t: np.array([t**3, t**2, t, 1]).dot(return_data))\n\n# Learned from\n# https://stackoverflow.com/questions/36971363/how-to-interpolate-svg-path-into-a-pixel-coordinates-not-simply-raster-in-pyth\n\n\ndoc = minidom.parse('LaneMap2.svg')\npath_strings = [path.getAttribute('d') for path\n in doc.getElementsByTagName('path')]\ndoc.unlink()\n\npoints_np_all=[]\npoints_np_all=np.empty((len(path_strings)),dtype=object)\nprint(len(points_np_all))\n#points_np_all[k]=np.array([])\n\nfor k in range(len(path_strings)):\n#for path_string in path_strings:\n path = parse_path(path_strings[k])\n points_np_merge=np.empty((0,2), float)\n #points_np_merge=np.empty(points_np_merge)\n for dat in path:\n\n#path=parse_path(path_strings[block])\n\n#dat=path[key]\n\n if type(dat).__name__=='CubicBezier':\n start_np = np.array([dat.start.real, dat.start.imag])\n control1_np = np.array([dat.control1.real, dat.control1.imag])\n control2_np = np.array([dat.control2.real, dat.control2.imag])\n end_np = np.array([dat.end.real, dat.end.imag])\n converted_curve = cubic_bezier_converter(start_np, control1_np, control2_np, end_np)\n # \n diff_np=start_np-end_np\n n_dots=np.round(np.linalg.norm(diff_np))\n # \n points_np = np.array([converted_curve(t) for t in np.linspace(0, 1, n_dots)])\n elif type(dat).__name__=='Line':\n start_np = np.array([dat.start.real, dat.start.imag])\n end_np = np.array([dat.end.real, dat.end.imag])\n converted_line = line_splitter(start_np,end_np)\n # \n diff_np=start_np-end_np\n n_dots=np.round(np.linalg.norm(diff_np))\n # \n points_np=np.array([converted_line(t) for t in np.linspace(0, 1, n_dots)])\n elif type(dat).__name__=='Move':\n # \n n_dots=1\n # \n start_np = np.array([dat.start.real, dat.start.imag])\n end_np = np.array([dat.end.real, dat.end.imag])\n points_np = np.array([start_np,end_np])\n else:\n points_np=np.array([])\n #points_np_merge=np.concatenate(points_np_merge,points_np)\n points_np_merge=np.append(points_np_merge, points_np, axis=0)\n# if k==0:\n# points_np_merge=points_np\n# else:\n# points_np_merge=np.append(points_np_merge,points_np,axis=0)\n plt.plot(points_np[:, 0], points_np[:, 1], '.-')\n plt.show()\n print(len(points_np))\n print(len(points_np_merge))\n #points_np_all1=points_np_all1.append(points_np_merge)\n #points_np_all=points_np_merge\n points_np_all[k]= points_np_merge\n# points_np_all=points_np_all.append(points_np_merge)\n print(len(points_np_all))\n plt.plot(points_np_merge[:, 0], points_np_merge[:, 1], '.-')\n plt.show()\n\n\nlen(points_np_all)\n\n\n# In[8]:\n\n\nlen(points_np_all)\nfor k in range(len(points_np_all)):\n points_np=points_np_all[k]\n plt.plot(points_np[:, 0], points_np[:, 1], '.-')\nplt.show()\n \n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.append", "numpy.empty", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.plot", "numpy.linspace", "numpy.linalg.norm" ] ]
koukyo1994/atmaCup5
[ "69ee97a3ad6758af27279cc75fcd9f94325eb0e8" ]
[ "src/core/callbacks/feature_loading.py" ]
[ "import pandas as pd\n\nimport src.utils as utils\n\nfrom scipy.sparse import hstack, csr_matrix\n\nfrom src.core.callbacks import Callback, CallbackOrder\nfrom src.core.states import RunningState\n\n\nclass SortColumnsCallback(Callback):\n signature = \"feature_loading\"\n callback_order = CallbackOrder.MIDDLE\n\n def on_feature_loading_end(self, state: RunningState):\n features = state.features\n\n for key in features:\n if isinstance(features[key][\"train\"], pd.DataFrame):\n features[key][\"train\"] = features[key][\"train\"].sort_index(\n axis=1)\n features[key][\"test\"] = features[key][\"test\"].sort_index(\n axis=1)\n\n state.features = features\n\n\nclass ConcatenateFeatureCallback(Callback):\n signature = \"feature_loading\"\n callback_type = CallbackOrder.LOWER\n\n def on_feature_loading_end(self, state: RunningState):\n features = state.features\n\n as_sparse = False\n for feature in features.values():\n if isinstance(feature[\"train\"], dict):\n as_sparse = True\n break\n\n main_feature = {}\n with utils.timer(\"Concatnating `main` features\", state.logger):\n if as_sparse:\n for phase in [\"train\", \"test\"]:\n sparse_matrices = []\n for f in features.values():\n if isinstance(f[phase], pd.DataFrame):\n feature_values = csr_matrix(f[phase].values)\n sparse_matrices.append(feature_values)\n elif isinstance(f[phase], dict):\n sparse_dict = f[phase]\n for sp_mat in sparse_dict.values():\n sparse_matrices.append(sp_mat)\n main_feature[phase] = hstack(sparse_matrices).tocsr()\n else:\n for phase in [\"train\", \"test\"]:\n dfs = []\n for f in features.values():\n dfs.append(f[phase])\n\n main_feature[phase] = pd.concat(dfs, axis=1)\n state.features[\"main\"] = main_feature\n" ]
[ [ "scipy.sparse.hstack", "scipy.sparse.csr_matrix", "pandas.concat" ] ]
lmc00/tfg_en_desarrollo
[ "30e61f4bb3f060f7468b1bb94930fcbe0d0f92ae" ]
[ "scripts/distribucion.py" ]
[ "#Importamos todo lo necesario como en el jupyter 1.0 de Ignacio\nimport os\n\nimport matplotlib.pylab as plt\nimport numpy as np\nfrom tqdm import tqdm\n\nimport imgclas\nfrom imgclas import paths, config\nfrom imgclas.data_utils import load_image, load_data_splits, augment, load_class_names\n\n#Comenzamos a preparar todos los datos\n\nCONF = config.get_conf_dict() #El diccionario con toda la configuracion del yaml\nsplits_dir = paths.get_splits_dir() #base+data+dataset_files\n# Load the training data\nX_train, y_train = load_data_splits(splits_dir=splits_dir,\n im_dir=CONF['general']['images_directory'],\n split_name='train')\n\n# Load the validation data\nif (CONF['training']['use_validation']) and ('val.txt' in os.listdir(splits_dir)):\n X_val, y_val = load_data_splits(splits_dir=splits_dir,\n im_dir=CONF['general']['images_directory'],\n split_name='val')\n#load_data_splits comprueba que exista el fichero que se le pasa (ya sean train,val etc). luego con numpy.genfromtxt\n#obtiene un array donde la primera columna son los path, en la segunda las etiquetas\n#por ultimo retorna un array de numpy con los path absolutos a las fotografias de train o el que le hayas pasado\n#y otro con las etiquetas en formato int32 para saber de qué clase son\nelse:\n print('No validation data.')\n X_val, y_val = None, None\n CONF['training']['use_validation'] = False\n \n# Load the class names\nclass_names = load_class_names(splits_dir=splits_dir)\n\n#Ya tenemos preparado lo básico, así que ahora vamos a calcular la \n#distribución de clases\n\n#Defino algunos parámetros que puedes modificar si te propones añadir los nombres de las etiquetas. \n#Por ejemplo, si quieres que aparezca el nombre de cada clase debajo del bin. Aunque vas a tener que cambiat\n#El figsize porque no te van a entrar sino 83 nombres tan apretados.\nlog_scale = False\nshow_names = True\n\n# Plot the histograms\n #genero los subplots vacios figsize = (lo ancho, lo alto)\n\ndef plot_hist(y, set_name=''):\n fig, ax = plt.subplots(1, figsize=(16,8)) #Genero el subplot vacio\n #figsize = (lo ancho, lo alto)\n n, bins, patches = ax.hist(y, bins=len(class_names), log=log_scale)\n mean, med = np.mean(n), np.median(n)\n ax.axhline(mean, linestyle= '--', color='#ce9b3b', label='mean')\n ax.axhline(med, linestyle= '--', color='#fc0c70', label='median')\n ax.set_title('{} set'.format(set_name))\n ax.legend()\n if show_names:\n ax.set_xticks(range(len(class_names)))\n ax.set_xticklabels(class_names, rotation='vertical')\n\n print('Max {}: {}'.format(set_name, np.amax(n)))\n print('Min {}: {}'.format(set_name, np.amin(n)))\n print('Mean {}: {}'.format(set_name, mean))\n print('Median {}: {}'.format(set_name, med))\n print('\\n')\n #Guardamos el plot de la sesión de canvas con el nombre que quiera el usuario\n nombre_hist = input(\"Ponle nombre al histograma de \" + set_name + \" :\" )\n plt.savefig(nombre_hist,dpi = 100,format = \"png\")\nplot_hist(y_train, set_name='Training')\n\nif y_val is not None:\n plot_hist(y_val, set_name='Validation')\n\n" ]
[ [ "matplotlib.pylab.savefig", "numpy.median", "matplotlib.pylab.subplots", "numpy.amin", "numpy.amax", "numpy.mean" ] ]
sshojiro/malspy
[ "d28932053541f4d2097a1e6feb9fedfe051d5b24" ]
[ "malspy/matrix_factorization.py" ]
[ "\"\"\" Matrix Factorization for Spectrum Imaging Data Analysis\n\"\"\"\n# Author: Motoki Shiga, Gifu University <[email protected]>\n# License: MIT\n#\n\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nclass RandomMF(object):\n \"\"\"Random Matrix Factorization\n Fractorize a data matrix into two low rank matrices C_ and S_ using random numbers.\n \n Parameters\n ----------\n n_components : int\n The number of components decomposed from a data matrix\n random_seed : int, optional (default = 0)\n Random number generator seed control\n\n Attributes\n ----------\n random_seed : int, default 0\n Random number generator seed to control\n C_ : ndarray of shape = (# of spatial data points, n_components)\n Spatial intensity distributions of factorized components\n S_ : ndarray of shape = (# of spectrum channels, n_components)\n Factorized component spectra\n E_ : ndarray of shape = (# of spatial data points in the 1st axis, # of those in 2nd axis)\n Residual spatial image (spatial image of RSME)\n \"\"\"\n\n # constructor\n def __init__(self, n_components, random_seed=0):\n self.n_components = n_components\n self.random_seed = random_seed\n\n def __repr__(self):\n class_name = self.__class__.__name__\n txt = 'n_components=' + str(self.n_components) +\\\n ', random_seed=' + str(self.random_seed)\n return '%s(%s)' % (class_name, txt,)\n\n def __str__(self):\n txt = self.__repr__()\n return txt\n\n def fit(self, X, channel_vals=None, unit_name=None):\n \"\"\"Generate two low rank matrices by random number\n\n Parameters\n ----------\n X: ndarray of shape = (# of spatial data points in the 1st axis, # of those in 2nd axis, # of spectrum channels)\n Data matrix to be decomposed\n channel_vals: ndarray of shape = (# of spectrum channels), optional (default = None)\n The sequence of channel values\n unit_name: string, optional (default = None)\n The unit name of spectrum channel\n\n Returns\n -------\n self: instance of class RandomMF\n \"\"\"\n\n # initialize attributes from the given spectrum imaging data\n if X.ndim == 2:\n self.num_y = 1\n self.num_x, self.num_ch = X.shape\n self.num_xy = self.num_x * self.num_y\n else:\n self.num_x, self.num_y, self.num_ch = X.shape\n self.num_xy = self.num_x * self.num_y\n X = X.reshape(self.num_xy, self.num_ch) # transform from 3D-array to 2D-array (Data Matrix)\n\n # set channel information\n if channel_vals is None:\n self.channel_vals = np.arange(self.num_ch)\n else:\n self.channel_vals = channel_vals\n if unit_name is None:\n self.unit_name = 'Channel'\n else:\n self.unit_name = unit_name\n\n # randomly pick up spectra from the data matrix as component spectra\n indices = np.random.randint(self.num_xy, size=self.n_components)\n self.S_ = X[indices, :].T\n\n # optimize maxtix C by minimizing the reconstruction error (MSE: Mean Squared Error)\n self.C_ = X @ np.linalg.pinv(self.S_).T\n\n # residual spatial image (spatial image of RSME)\n self.E_ = np.sqrt( np.mean((X - [email protected]_.T)**2, axis=1) )\n self.E_ = self.E_.reshape(self.num_x, self.num_y)\n\n return self\n\n def plot_intensity(self, figsize=None, filename=None, xlabel=None):\n \"\"\"Plot component intensities\n\n Parameters\n ----------\n figsize: list of shape = (the size of horizontal axis, that of vertical axis), optional (default = None)\n Size of horizontal axis and vertical axis of figure\n filename: string, optional (default = None)\n The name of an output image data\n If None, the image is not saved to a file. \n xlabel: string, optional (default = None)\n The name of x-axis\n \"\"\"\n\n if figsize is None:\n plt.figure()\n else:\n plt.figure(figsize=figsize)\n \n if xlabel is None:\n xlabel = 'Measured data point'\n \n for k in range(self.C_.shape[1]):\n plt.plot(self.C_[:, k], label=str(k + 1))\n\n plt.xlim([0, self.C_.shape[0]])\n plt.xlabel(xlabel)\n plt.ylabel('Intensity')\n plt.legend()\n plt.grid()\n plt.tight_layout()\n if filename is None:\n plt.show()\n else:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0)\n plt.close()\n\n def plot_component(self, figsize=list(), filename=None):\n '''\n Plot component intensities (data points vs intensities)\n Parameters\n ----------\n figsize: the vertical and horizontal size of the figure\n '''\n if len(figsize) == 0:\n plt.figure()\n else:\n plt.figure(figsize=figsize)\n for k in range(self.C_.shape[1]):\n plt.plot(self.C_[:, k], label=str(k + 1))\n plt.xlim([0, self.C_.shape[0]])\n plt.xlabel('Spatial data point')\n plt.ylabel('Intensity')\n plt.title('Components')\n plt.legend()\n if filename is not None:\n plt.savefig(filename)\n plt.show()\n\n def imshow_component(self, figsize=None, figshape=None, filename=None):\n \"\"\"Display component spatial intensity distributions\n\n Parameters\n ----------\n figsize: list of shape = (the size of horizontal axis, that of vertical axis), optional (default = None)\n Size of horizontal axis and vertical axis of figure\n figshape: list of shape = (# of rows, # columns), optional (default = None)\n The number of rows and columns of axes\n filename: string, optional (default = None)\n The name of an output image data\n If None, the image is not saved to a file. \n \"\"\"\n\n if self.num_y == 1:\n self.plot_component(figsize=figsize, filename=filename)\n else:\n if figsize is None:\n plt.figure()\n else:\n plt.figure(figsize=figsize)\n\n # adjust figure layout\n if figshape is None:\n if self.n_components<=3:\n ncols = self.n_components\n nrows = 1\n else:\n ncols = int(np.ceil(np.sqrt(self.n_components)))\n if ncols*(ncols-1) >= self.n_components:\n nrows = ncols-1\n elif ncols**2 >= self.n_components:\n nrows = ncols\n else:\n nrows = ncols+1\n else:\n nrows, ncols = figshape[0], figshape[1]\n if nrows*ncols < self.n_components:\n print('Error: nrows x ncols should be larger than n_components!')\n return -1\n\n # display figures\n for k in range(self.C_.shape[1]):\n plt.subplot(nrows, ncols, k + 1)\n im = np.reshape(self.C_[:, k], (self.num_x, self.num_y))\n plt.imshow(im)\n plt.title(str(k+1))\n plt.tight_layout()\n if filename is None:\n plt.show()\n else:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0)\n plt.close()\n\n def imshow_residual_image(self, figsize=None, filename=None):\n \"\"\"Display residual (RMSE or beta-divergence) image averaged over all channels\n\n Parameters\n ----------\n figsize: list of shape = (the size of horizontal axis, that of vertical axis), optional (default = None)\n Size of horizontal axis and vertical axis of figure\n filename: string, optional (default = None)\n The name of an output image data\n If None, the image is not saved to a file. \n \"\"\"\n\n if figsize is None:\n plt.figure()\n else:\n plt.figure(figsize=figsize)\n plt.imshow(self.E_)\n plt.tight_layout()\n if filename is None:\n plt.show()\n else:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0)\n plt.close()\n\n def plot_spectra(self, figsize=None, filename=None, normalize=True):\n \"\"\"Plot component spectra\n\n Parameters\n ----------\n figsize: list of shape = (the size of horizontal axis, that of vertical axis), optional (default = None)\n Size of horizontal axis and vertical axis of figure\n filename: string, optional (default = None)\n The file name of an output image\n normalize: bool, optional (default = True)\n If True, each spectrum is normalized\n \"\"\"\n\n if figsize is None:\n plt.figure()\n else:\n plt.figure(figsize=figsize)\n for k in range(self.S_.shape[1]):\n if normalize:\n Sk = self.S_[:, k] / (np.sqrt(np.sum(self.S_[:, k]**2)) + 1e-16)\n else:\n Sk = self.S_[:, k]\n plt.plot(self.channel_vals, Sk, label=str(k + 1))\n plt.xlabel(self.unit_name)\n plt.ylabel('Intensity')\n plt.xlim([self.channel_vals[0], self.channel_vals[-1]])\n plt.legend()\n plt.grid()\n plt.tight_layout()\n if filename is None:\n plt.show()\n else:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0)\n plt.close()\n\nclass SVD(RandomMF):\n \"\"\"Singular Value Decomposition (SVD)\n\n Matrix factorization of spectrum image data by SVD\n\n Parameters\n ----------\n n_components : int\n The number of components decomposed from a data matrix\n\n Attributes\n ----------\n C_ : ndarray of shape = (# of spatial data points, n_components)\n Spatial intensity distributions of factorized components\n S_ : ndarray of shape = (# of spectrum channels, n_components)\n Factorized component spectra\n E_ : ndarray of shape = (# of spatial data points in the 1st axis, # of those in 2nd axis)\n Residual spatial image (spatial image of RSME)\n\n \"\"\"\n\n # constructor\n def __init__(self, n_components):\n super(SVD, self).__init__(n_components = n_components)\n\n def __repr__(self):\n class_name = self.__class__.__name__\n txt = 'n_components=' + str(self.n_components)\n return '%s(%s)' % (class_name, txt,)\n\n def __str__(self):\n txt = self.__repr__()\n return txt\n\n def fit(self, X, channel_vals=None, unit_name=None):\n \"\"\"Decompose a matrix into two Low-rank matrix by SVD\n\n Two low rank matrices are generated based on SVD.\n\n Parameters\n ----------\n X: ndarray of shape = (# of spatial data points in the 1st axis, # of those in 2nd axis, # of spectrum channels)\n Data matrix to be decomposed\n channel_vals: ndarray of shape = (# of spectrum channels), optional (default = None)\n The sequence of channel values\n unit_name: string, optional (default = None)\n The unit name of spectrum channel\n\n Returns\n -------\n self: instance of class SVD\n \"\"\"\n\n # initialize attributes from the given spectrum imaging data\n if X.ndim == 2:\n self.num_y = 1\n self.num_x, self.num_ch = X.shape\n self.num_xy = self.num_x * self.num_y\n else:\n self.num_x, self.num_y, self.num_ch = X.shape\n self.num_xy = self.num_x * self.num_y\n X = X.reshape(self.num_xy, self.num_ch) # transform from 3D-array to 2D-array (Data Matrix)\n\n if channel_vals is None:\n self.channel_vals = np.arange(self.num_ch)\n else:\n self.channel_vals = channel_vals\n if unit_name is None:\n self.unit_name = 'Channel'\n else:\n self.unit_name = unit_name\n\n print('Training SVD...')\n\n # SVD and extract only components of the largest singular values\n d = self.n_components\n Ud, Sd, Vd = scipy.linalg.svd(X) # computes the d-projection matrix\n Ud, Sd, Vd = Ud[:, :d], np.diag(Sd[:d]), Vd[:d, :] # choose d-dimension\n self.C_ = Ud\n self.S_ = (Sd@Vd).T\n\n # adjust the sign of spectra\n for k in range(self.n_components):\n i = np.argmax(np.abs(self.S_[:, k]))\n if self.S_[i, k] < 0:\n self.S_[:, k] = -self.S_[:, k]\n self.C_[:, k] = -self.C_[:, k]\n\n # residual spatial image (spatial image of RSME)\n self.E_ = np.sqrt( np.mean((X - [email protected]_.T)**2, axis=1) )\n self.E_ = self.E_.reshape(self.num_x, self.num_y)\n\n return self\n\nclass PCA(RandomMF):\n \"\"\"Principal Components Analysis (PCA)\n\n Parameters\n ----------\n n_components : int\n The number of components decomposed from a data matrix\n\n Attributes\n ----------\n C_ : ndarray of shape = (# of spatial data points, n_components)\n Spatial intensity distributions of factorized components\n S_ : ndarray of shape = (# of spectrum channels, n_components)\n Factorized component spectra\n E_ : ndarray of shape = (# of spatial data points in the 1st axis, # of those in 2nd axis)\n Residual spatial image (spatial image of RSME)\n \"\"\"\n\n def __init__(self, n_components):\n super(PCA, self).__init__(n_components=n_components)\n\n def __repr__(self):\n class_name = self.__class__.__name__\n txt = 'n_components=' + str(self.n_components)\n return '%s(%s)' % (class_name, txt,)\n\n def __str__(self):\n txt = self.__repr__()\n return txt\n\n def fit(self, X, channel_vals=None, unit_name=None):\n \"\"\"\n Decompose into two Low-rank matrix by PCA\n\n Parameters\n ----------\n X: ndarray of shape = (# of spatial data points in the 1st axis, # of those in 2nd axis, # of spectrum channels)\n Data matrix to be decomposed\n channel_vals: ndarray of shape = (# of spectrum channels), optional (default = None)\n The sequence of channel values\n unit_name: string, optional (default = None)\n The unit name of spectrum channel\n\n Returns\n -------\n self: instance of class PCA\n \"\"\"\n\n # initialize attributes from the given spectrum imaging data\n if X.ndim == 2:\n self.num_y = 1\n self.num_x, self.num_ch = X.shape\n self.num_xy = self.num_x * self.num_y\n else:\n self.num_x, self.num_y, self.num_ch = X.shape\n self.num_xy = self.num_x * self.num_y\n X = X.reshape(self.num_xy, self.num_ch) # transform from 3D-array to 2D-array (Data Matrix)\n\n if channel_vals is None:\n self.channel_vals = np.arange(self.num_ch)\n else:\n self.channel_vals = channel_vals\n if unit_name is None:\n self.unit_name = 'Channel'\n else:\n self.unit_name = unit_name\n\n self.X_mean = np.mean(X, 0, keepdims=True)\n X = X - self.X_mean\n\n print('Training PCA...')\n\n # Compute S and C via Variance-Covariance matrix\n d = self.n_components\n CovMat = X.T @ X / self.num_xy\n lam, self.S_ = scipy.linalg.eigh(CovMat,eigvals=(self.num_ch-d,self.num_ch-1))\n lam, self.S_ = lam[::-1], self.S_[:,::-1]\n for i in range(d):\n if np.max(self.S_[:,i]) < np.abs(np.min(self.S_[:,i])):\n self.S_[:, i] = -self.S_[:,i]\n self.C_ = [email protected]_\n\n # # SVD and extract only components of the largest singular values\n # d = self.n_components\n # Ud, Sd, Vd = scipy.linalg.svd(X) # computes the d-projection matrix\n # Ud, Sd, Vd = Ud[:, :d], np.diag(Sd[:d]), Vd[:d, :] # choose d-dimension\n # self.C_ = Ud\n # S = (Sd@Vd + self.X_mean).T\n #\n # # adjust the sign of spectra\n # for k in range(self.n_components):\n # i = np.argmax(np.abs(S[:, k]))\n # if S[i, k] < 0:\n # Sd[k, k] = -Sd[k, k]\n # self.C_[:, k] = -self.C_[:, k]\n # self.S_ = (Sd @ Vd).T\n\n # residual spatial image (spatial image of RSME)\n self.E_ = np.sqrt( np.mean((X - [email protected]_.T)**2, axis=1) )\n self.E_ = self.E_.reshape(self.num_x, self.num_y)\n\n return self\n" ]
[ [ "numpy.sum", "numpy.diag", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.imshow", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.reshape", "numpy.abs", "scipy.linalg.eigh", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "numpy.mean", "numpy.arange", "numpy.max", "numpy.min", "matplotlib.pyplot.close", "numpy.linalg.pinv", "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.sqrt", "numpy.random.randint", "matplotlib.pyplot.xlabel", "scipy.linalg.svd" ] ]
klo9klo9kloi/win_det_heatmaps
[ "fc427bcd593831d627698455b8917eb37add3f6e" ]
[ "common/utility/augment.py" ]
[ "import random\nimport numpy as np\nfrom easydict import EasyDict as edict\n\ndef get_default_augment_config():\n config = edict()\n config.do_aug = True\n\n config.scale_factor = 0.25\n config.rot_factor = 15\n config.center_factor = 0.10 # 15% relative to the patch size\n config.color_factor = 0.2\n config.do_flip_aug = True\n\n config.rot_aug_rate = 0.6 #possibility to rot aug\n config.flip_aug_rate = 0.5 #possibility to flip aug\n\n config.use_color_normalize = True\n config.mean = np.array([0.485 * 255, 0.456 * 255, 0.406 * 255])\n config.std = np.array([0.229 * 255, 0.224 * 255, 0.225 * 255])\n\n config.multiplier = [0.5, 1., 1.5, 2, 2.5]\n return config\n\n\ndef do_augmentation(aug_config):\n scale = np.clip(np.random.randn(), -0.5, 1.0) * aug_config.scale_factor + 1.0\n rot = np.clip(np.random.randn(), -1.0, 1.0) * aug_config.rot_factor \\\n if random.random() <= aug_config.rot_aug_rate else 0\n center = np.abs(np.clip(np.random.randn(2), -1.0, 1.0)) * aug_config.center_factor\n\n do_flip = aug_config.do_flip_aug and random.random() <= aug_config.flip_aug_rate\n c_up = 1.0 + aug_config.color_factor\n c_low = 1.0 - aug_config.color_factor\n color_scale = [random.uniform(c_low, c_up), random.uniform(c_low, c_up), random.uniform(c_low, c_up)]\n\n return scale, rot, center, do_flip, color_scale\n\n\ndef get_multiplier(img_size, scale_search, patch_size):\n \"\"\"Computes the sizes of image at different scales\n :param img: numpy array, the current image\n :returns : list of float. The computed scales\n \"\"\"\n return [x * patch_size / float(img_size) for x in scale_search]" ]
[ [ "numpy.array", "numpy.random.randn" ] ]
dongxulee/lifeCycleRefine
[ "6ca9670dea50150aabe31f86578323cec0ab018c" ]
[ "20211025/shutDownBoth/solveMDP_poorHigh.py" ]
[ "import numpy as np\nimport jax.numpy as jnp\nfrom jax.numpy import interp\nfrom jax import jit, partial, random, vmap\nfrom tqdm import tqdm\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nnp.printoptions(precision=2)\n\n\n'''\n Constants \n'''\n# time line, starts at 20 ends at 80\nT_min = 0\nT_max = 60\nT_R = 45\n# discounting factor\nbeta = 1/(1+0.02)\n# utility function parameter \ngamma = 2.0\n# relative importance of housing consumption and non durable consumption \nalpha = 0.7\n# parameter used to calculate the housing consumption \nkappa = 0.3\n# uB associated parameter\nB = 2\n# social welfare after the unemployment\nwelfare = 20\n# tax rate before and after retirement\ntau_L = 0.2\ntau_R = 0.1\n# number of states S\nnS = 8\n# number of states e\nnE = 2\n# housing state\nnO = 2\n# experience state \nnZ = 2\n\n\n'''\n Economic state calibration \n'''\n\n# probability of survival\nPa = jnp.array(np.load(\"constant/prob.npy\"))\n# deterministic income\ndetEarning = jnp.array(np.load(\"constant/detEarningHigh.npy\"))\n############################################################################################################ low skill feature \n# fix the deterministic income\ndetEarning = jnp.concatenate([detEarning[:46]*0.5, detEarning[46:]-45])\n# stock transaction fee\nKc = 0.02\n# stock participation cost\nc_k = 20\n############################################################################################################ low skill feature \n# Define transition matrix of economical states S\nPs = np.genfromtxt('constant/Ps.csv',delimiter=',')\nPs = jnp.array(Ps)\n# The possible GDP growth, stock return, bond return\ngkfe = np.genfromtxt('constant/gkfe.csv',delimiter=',')\ngkfe = jnp.array(gkfe)\n# GDP growth depending on current S state\ngGDP = gkfe[:,0]/100\n# risk free interest rate depending on current S state \nr_b = gkfe[:,1]/100\n# stock return depending on current S state\nr_k = gkfe[:,2]/100\n# unemployment rate depending on current S state \nPe = gkfe[:,7:]/100\nPe = Pe[:,::-1]\n\n\n'''\n calculate stationary distribution to prepare for simulation\n'''\n# calculate the stationary distribution of econ state and employment state\nS_distribution = jnp.ones(nS)/nS\nfor _ in range(100):\n S_distribution = jnp.matmul(S_distribution, Ps)\n \n#P(0,1)\nP01 = jnp.dot(Pe[:,0],S_distribution)\n#P(1,0)\nP10 = jnp.dot(Pe[:,1],S_distribution)\njnp.array([[1-P01, P01],[P10, 1-P10]])\n\nE_distribution = jnp.ones(2)/2\nfor _ in range(100):\n E_distribution = jnp.matmul(E_distribution, jnp.array([[1-P01, P01],[P10, 1-P10]]))\n\n \n'''\n 401k related constants\n'''\n# 401k amount growth rate\nr_bar = 0.02\n# income fraction goes into 401k \nyi = 0.04\n########################################### shut down 401k\nyi = 0\n###########################################\nPa = Pa[:T_max]\nNt = [np.sum(Pa[t:]) for t in range(T_min,T_max)]\n# factor used to calculate the withdraw amount \nDn = [(r_bar*(1+r_bar)**N)/((1+r_bar)**N - 1) for N in Nt]\nDn[-1] = 1\nDn = jnp.array(Dn)\n# cash accumulated before retirement \nnEarning = yi*E_distribution[1]*(1+jnp.dot(S_distribution,gGDP))*detEarning[:45]\nn_balance = np.zeros(T_R)\nfor t in range(T_R):\n nMultiplier = jnp.array([(1+r_bar)**(t-i) for i in range(t)])\n n_balance[t] = (nEarning[:t] * nMultiplier).sum()\n# cash payouts after retirement \nn_payout = []\namount = n_balance[-1]\nfor t in range(45, 60):\n n_payout.append(amount*Dn[t])\n amount = amount - amount*Dn[t]\n n_balance = jnp.append(n_balance,amount)\nn_payout = jnp.array(n_payout)\n\n\n'''\n housing related constants\n'''\n# variable associated with housing and mortgage \n# age limit of buying a house\nageLimit = 30\nmortgageLength = 30\n# mortgage rate \nrh = 0.045\n# housing unit\nH = 1000\n# max rent unit\nRl = 500\n# housing price constant \npt = 2*250/1000\n# 30k rent 1000 sf\npr = 2*10/1000 * 2 \n# constant cost \nc_h = 5\nc_s = H*pt*0.4\n# Dm is used to update the mortgage payment\nDm = [(1+rh) - rh*(1+rh)**(T_max - t)/((1+rh)**(T_max-t)-1) for t in range(T_min, T_max)]\nDm[-1] = 0\nDm = jnp.array(Dm)\n# 30 year mortgage\nMs = []\nM = H*pt*0.8\nm = M*(1+rh) - Dm[30]*M\nfor i in range(30, T_max):\n Ms.append(M)\n M = M*(1+rh) - m\nMs.append(0)\nMs = jnp.array(Ms)\n\n\n'''\n Discretize the state space\n Discretize the action space \n'''\n# actions dicretization(hp, cp, kp)\nnumGrid = 20\nAs = np.array(np.meshgrid(np.linspace(0.001,0.999,numGrid), np.linspace(0,1,numGrid), [0,1])).T.reshape(-1,3)\nAs = jnp.array(As)\n# wealth discretization\nwealthLevel = 300\npolynomialDegree = 2\nws = jnp.linspace(0, np.power(wealthLevel,1/polynomialDegree), numGrid)**polynomialDegree\n# age of last time bought a house value only count when o = 1. \naBuy = np.array(range(ageLimit))\n# dimentions of the state\ndim = (ws.size, aBuy.size, nS, nE, nO, nZ)\ndimSize = len(dim)\n\nxgrid = np.array([[w,ab,s,e,o,z] for w in ws\n for ab in aBuy\n for s in range(nS)\n for e in range(nE)\n for o in range(nO)\n for z in range(nZ)]).reshape(dim + (dimSize,))\n\nXs = xgrid.reshape((np.prod(dim),dimSize))\nXs = jnp.array(Xs)\nVgrid = np.zeros(dim + (T_max,))\n\n# start of function definitions\nnX = Xs.shape[0]\nnA = As.shape[0]\n\n\n\n'''\n Functions Definitions\n'''\n#Define the earning function, which applies for both employment status and 8 econ states\n@partial(jit, static_argnums=(0,))\ndef y(t, x):\n '''\n x = [w,ab,s,e,o,z]\n x = [0,1, 2,3,4,5]\n '''\n if t < T_R:\n return detEarning[t] * (1+gGDP[jnp.array(x[2], dtype = jnp.int8)]) * x[3] + (1-x[3]) * welfare\n else:\n return detEarning[-1]\n \n#Earning after tax and fixed by transaction in and out from 401k account \n@partial(jit, static_argnums=(0,))\ndef yAT(t,x):\n yt = y(t, x)\n if t < T_R:\n # yi portion of the income will be put into the 401k if employed\n return (1-tau_L)*(yt * (1-yi))*x[3] + (1-x[3])*yt\n else:\n # t >= T_R, n/discounting amount will be withdraw from the 401k \n return (1-tau_R)*yt + n_payout[t-T_R]\n \n#Define the utility function\n@jit\ndef u(c):\n return jnp.nan_to_num(x = (jnp.power(c, 1-gamma) - 1)/(1 - gamma), nan = -jnp.inf)\n\n#Define the bequeath function, which is a function of bequeath wealth\n@jit\ndef uB(tb):\n return B*u(tb)\n\n#Reward function depends on the housing and non-housing consumption\n@jit\ndef R(a):\n '''\n Input:\n a = [c,b,k,h,action]\n a = [0,1,2,3,4]\n '''\n c = a[:,0]\n b = a[:,1]\n k = a[:,2]\n h = a[:,3]\n C = jnp.power(c, alpha) * jnp.power(h, 1-alpha)\n return u(C) + (-1/((c > 0) * (b >= 0) * (k >= 0) * (h > 0)) + 1)\n\n# pc*qc / (ph*qh) = alpha/(1-alpha)\n@partial(jit, static_argnums=(0,))\ndef feasibleActions(t, x):\n '''\n x = [w,ab,s,e,o,z]\n x = [0,1, 2,3,4,5]\n a = [c,b,k,h,action]\n a = [0,1,2,3,4]\n '''\n # owner\n sell = As[:,2]\n ab = jnp.array(x[1], dtype = jnp.int8)\n # last term is the tax deduction of the interest portion of mortgage payment\n payment = ((t-ab) > 0)*((t-ab) <= mortgageLength)*(((t<=T_R)*tau_L + (t>T_R)*tau_R)*Ms[t-ab]*rh - m)\n # this is the fire sell term, as long as we could afford the payment, do not sell\n sell = (yAT(t,x) + x[0] + payment > 0)*jnp.zeros(nA) + (yAT(t,x) + x[0] + payment <= 0)*jnp.ones(nA)\n budget1 = yAT(t,x) + x[0] + (1-sell)*payment + sell*(H*pt - Ms[t-ab] - c_s)\n h = jnp.ones(nA)*H*(1+kappa)*(1-sell) + sell*jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)\n c = budget1*As[:,0]*(1-sell) + sell*(budget1*As[:,0] - h*pr)\n budget2 = budget1*(1-As[:,0])\n k = budget2*As[:,1]\n k = k - (1-x[5])*(k>0)*c_k\n k = k*(1-Kc)\n b = budget2*(1-As[:,1])\n owner_action = jnp.column_stack((c,b,k,h,sell)) \n \n \n # renter\n buy = As[:,2]*(t < ageLimit)\n####################################################################################################### shut down housing\n buy = As[:,2]*0\n#######################################################################################################\n budget1 = yAT(t,x) + x[0] - buy*(H*pt*0.2 + c_h)\n h = jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)*(1-buy) + buy*jnp.ones(nA)*H*(1+kappa)\n c = (budget1*As[:,0] - h*pr)*(1-buy) + buy*budget1*As[:,0]\n budget2 = budget1*(1-As[:,0])\n k = budget2*As[:,1]\n k = k - (1-x[5])*(k>0)*c_k\n k = k*(1-Kc)\n b = budget2*(1-As[:,1])\n renter_action = jnp.column_stack((c,b,k,h,buy))\n \n actions = x[4]*owner_action + (1-x[4])*renter_action\n return actions\n\n@partial(jit, static_argnums=(0,))\ndef transition(t,a,x):\n '''\n Input:\n x = [w,ab,s,e,o,z]\n x = [0,1, 2,3,4,5]\n a = [c,b,k,h,action]\n a = [0,1,2,3,4]\n Output:\n w_next\n ab_next\n s_next\n e_next\n o_next\n z_next\n \n prob_next\n '''\n nA = a.shape[0]\n s = jnp.array(x[2], dtype = jnp.int8)\n e = jnp.array(x[3], dtype = jnp.int8)\n # actions taken\n b = a[:,1]\n k = a[:,2]\n action = a[:,4]\n w_next = ((1+r_b[s])*b + jnp.outer(k,(1+r_k)).T).T.flatten().repeat(nE)\n ab_next = (1-x[4])*(t*(action == 1)).repeat(nS*nE) + x[4]*(x[1]*jnp.ones(w_next.size))\n s_next = jnp.tile(jnp.arange(nS),nA).repeat(nE)\n e_next = jnp.column_stack((e.repeat(nA*nS),(1-e).repeat(nA*nS))).flatten()\n z_next = x[5]*jnp.ones(w_next.size) + ((1-x[5]) * (k > 0)).repeat(nS*nE)\n # job status changing probability and econ state transition probability\n pe = Pe[s, e]\n ps = jnp.tile(Ps[s], nA)\n prob_next = jnp.column_stack(((1-pe)*ps,pe*ps)).flatten()\n # owner\n o_next_own = (x[4] - action).repeat(nS*nE)\n # renter\n o_next_rent = action.repeat(nS*nE)\n o_next = x[4] * o_next_own + (1-x[4]) * o_next_rent \n return jnp.column_stack((w_next,ab_next,s_next,e_next,o_next,z_next,prob_next))\n\n# used to calculate dot product\n@jit\ndef dotProduct(p_next, uBTB):\n return (p_next*uBTB).reshape((p_next.shape[0]//(nS*nE), (nS*nE))).sum(axis = 1)\n\n\n# define approximation of fit\n@jit\ndef fit(v, xpp):\n value = vmap(partial(jnp.interp,xp = ws))(x = xpp[:,0], fp = v[:,jnp.array(xpp[:,1], dtype = int),\n jnp.array(xpp[:,2], dtype = int),\n jnp.array(xpp[:,3], dtype = int),\n jnp.array(xpp[:,4], dtype = int),\n jnp.array(xpp[:,5], dtype = int)].T)\n return jnp.nan_to_num(x = value, nan = -jnp.inf)\n \n\n\n@partial(jit, static_argnums=(0,))\ndef V(t,V_next,x):\n '''\n x = [w,ab,s,e,o,z]\n x = [0,1, 2,3,4,5]\n xp:\n w_next 0\n ab_next 1\n s_next 2\n e_next 3\n o_next 4\n z_next 5\n prob_next 6\n '''\n actions = feasibleActions(t,x)\n xp = transition(t,actions,x)\n # bequeath utility, wealth level, the retirement account, heir sell the house at a cost of 25k\n TB = xp[:,0] + n_balance[t] + xp[:,4]*(H*pt-Ms[jnp.array(t-xp[:,1], dtype = jnp.int8)]*(1+rh) - 25)\n bequeathU = uB(TB)\n if t == T_max-1:\n Q = R(actions) + beta * dotProduct(xp[:,6], bequeathU)\n else:\n Q = R(actions) + beta * dotProduct(xp[:,6], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU)\n Q = Q + (-jnp.inf)*(x[1] >= t)\n v = Q.max()\n return v\n\n@partial(jit, static_argnums=(0,))\ndef V_solve(t,V_next,x):\n '''\n x = [w,ab,s,e,o,z]\n x = [0,1, 2,3,4,5]\n xp:\n w_next 0\n ab_next 1\n s_next 2\n e_next 3\n o_next 4\n z_next 5\n prob_next 6\n '''\n actions = feasibleActions(t,x)\n xp = transition(t,actions,x)\n # bequeath utility, wealth level, the retirement account, heir sell the house at a cost of 25k\n TB = xp[:,0] + n_balance[t] + xp[:,4]*(H*pt-Ms[jnp.array(t-xp[:,1], dtype = jnp.int8)]*(1+rh) - 25)\n bequeathU = uB(TB)\n if t == T_max-1:\n Q = R(actions) + beta * dotProduct(xp[:,6], bequeathU)\n else:\n Q = R(actions) + beta * dotProduct(xp[:,6], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU)\n Q = Q + (-jnp.inf)*(x[1] >= t)\n v = Q.max()\n cbkha = actions[Q.argmax()]\n return v, cbkha\n\n\n###################################solving the model################################################## \nimport os.path\nif os.path.exists(\"poorHigh.npy\"):\n print(\"Model Solved! \")\nelse:\n for t in tqdm(range(T_max-1,T_min-1, -1)):\n if t == T_max-1:\n v = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t]))(Xs)\n else:\n v = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t+1]))(Xs)\n Vgrid[:,:,:,:,:,:,t] = v.reshape(dim)\n np.save(\"poorHigh\",Vgrid)\n" ]
[ [ "numpy.load", "numpy.sum", "numpy.save", "numpy.zeros", "numpy.printoptions", "numpy.power", "numpy.prod", "numpy.genfromtxt", "numpy.linspace" ] ]
SharpKoi/Kashgari
[ "ef8c4b4d17dbd69616b9cc744489181909e313c3" ]
[ "kashgari/embeddings/abc_embedding.py" ]
[ "# encoding: utf-8\n\n# author: BrikerMan\n# contact: [email protected]\n# blog: https://eliyar.biz\n\n# file: abc_embedding.py\n# time: 2:43 下午\n\nimport json\nfrom typing import Dict, List, Any, Optional, Union\n\nimport numpy as np\nimport tensorflow as tf\nimport tqdm\n\nimport kashgari\nfrom kashgari.generators import CorpusGenerator\nfrom kashgari.logger import logger\nfrom kashgari.processors import ABCProcessor\n\nL = tf.keras.layers\n\n\nclass ABCEmbedding:\n def to_dict(self) -> Dict[str, Any]:\n config: Dict[str, Any] = {\n 'segment': self.segment,\n 'embedding_size': self.embedding_size,\n 'max_position': self.max_position,\n **self.kwargs\n }\n return {\n '__class_name__': self.__class__.__name__,\n '__module__': self.__class__.__module__,\n 'config': config,\n 'embed_model': json.loads(self.embed_model.to_json())\n }\n\n def __init__(self,\n segment: bool = False,\n embedding_size: int = 100,\n max_position: int = None,\n **kwargs: Any):\n\n self.embed_model: tf.keras.Model = None\n\n self.segment: bool = segment # type: ignore\n self.kwargs = kwargs\n\n self.embedding_size: int = embedding_size # type: ignore\n self.max_position: int = max_position # type: ignore\n self.vocab2idx = self.load_embed_vocab()\n self._text_processor: Optional[ABCProcessor] = None\n\n def _override_load_model(self, config: Dict) -> None:\n embed_model_json_str = json.dumps(config['embed_model'])\n self.embed_model = tf.keras.models.model_from_json(embed_model_json_str,\n custom_objects=kashgari.custom_objects)\n\n def setup_text_processor(self, processor: ABCProcessor) -> None:\n self._text_processor = processor\n self.build_embedding_model(vocab_size=processor.vocab_size)\n self._text_processor.segment = self.segment\n if self.vocab2idx:\n self._text_processor.vocab2idx = self.vocab2idx\n self._text_processor.idx2vocab = dict([(v, k) for k, v in self.vocab2idx.items()])\n\n def get_seq_length_from_corpus(self,\n generators: List[CorpusGenerator],\n *,\n use_label: bool = False,\n cover_rate: float = 0.95) -> int:\n \"\"\"\n Calculate proper sequence length according to the corpus\n\n Args:\n generators:\n use_label:\n cover_rate:\n\n Returns:\n\n \"\"\"\n seq_lens = []\n for gen in generators:\n for sentence, label in tqdm.tqdm(gen, desc=\"Calculating sequence length\"):\n if use_label:\n seq_lens.append(len(label))\n else:\n seq_lens.append(len(sentence))\n if cover_rate == 1.0:\n target_index = -1\n else:\n target_index = int(cover_rate * len(seq_lens))\n sequence_length = sorted(seq_lens)[target_index]\n logger.debug(f'Calculated sequence length = {sequence_length}')\n return sequence_length\n\n def load_embed_vocab(self) -> Optional[Dict[str, int]]:\n \"\"\"\n Load vocab dict from embedding layer\n\n Returns:\n vocab dict or None\n \"\"\"\n raise NotImplementedError\n\n def build_embedding_model(self,\n *,\n vocab_size: int = None,\n force: bool = False,\n **kwargs: Dict) -> None:\n raise NotImplementedError\n\n def embed(self,\n sentences: List[List[str]],\n *,\n debug: bool = False) -> np.ndarray:\n \"\"\"\n batch embed sentences\n\n Args:\n sentences: Sentence list to embed\n debug: show debug info\n Returns:\n vectorized sentence list\n \"\"\"\n if self._text_processor is None:\n raise ValueError('Need to setup the `embedding.setup_text_processor` before calling the embed function.')\n\n tensor_x = self._text_processor.transform(sentences,\n segment=self.segment,\n seq_length=self.max_position)\n if debug:\n logger.debug(f'sentence tensor: {tensor_x}')\n embed_results = self.embed_model.predict(tensor_x)\n return embed_results\n\n\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "tensorflow.keras.models.model_from_json" ] ]
neutrinoceros2/yt
[ "8cabf6091414e4d9a5037c4ff49199adf0ae64d6" ]
[ "yt/visualization/tests/test_plotwindow.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport shutil\nimport tempfile\nimport unittest\nfrom collections import OrderedDict\nfrom distutils.version import LooseVersion\n\nimport matplotlib\nimport numpy as np\nfrom nose.tools import assert_true\n\nfrom yt.frontends.stream.api import load_uniform_grid\nfrom yt.testing import (\n assert_array_almost_equal,\n assert_array_equal,\n assert_equal,\n assert_fname,\n assert_raises,\n assert_rel_equal,\n fake_random_ds,\n requires_file,\n)\nfrom yt.units import kboltz\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom yt.utilities.answer_testing.framework import (\n PlotWindowAttributeTest,\n data_dir_load,\n requires_ds,\n)\nfrom yt.utilities.exceptions import YTInvalidFieldType\nfrom yt.visualization.api import (\n OffAxisProjectionPlot,\n OffAxisSlicePlot,\n ProjectionPlot,\n SlicePlot,\n plot_2d,\n)\n\n\ndef setup():\n \"\"\"Test specific setup.\"\"\"\n from yt.config import ytcfg\n\n ytcfg[\"yt\", \"__withintesting\"] = \"True\"\n\n\nTEST_FLNMS = [\"test.png\"]\nM7 = \"DD0010/moving7_0010\"\nWT = \"WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030\"\n\nFPROPS = {\"family\": \"sans-serif\", \"style\": \"italic\", \"weight\": \"bold\", \"size\": 24}\n\nATTR_ARGS = {\n \"pan\": [(((0.1, 0.1),), {})],\n \"pan_rel\": [(((0.1, 0.1),), {})],\n \"set_axes_unit\": [\n ((\"kpc\",), {}),\n ((\"Mpc\",), {}),\n (((\"kpc\", \"kpc\"),), {}),\n (((\"kpc\", \"Mpc\"),), {}),\n ],\n \"set_buff_size\": [((1600,), {}), (((600, 800),), {})],\n \"set_center\": [(((0.4, 0.3),), {})],\n \"set_cmap\": [((\"density\", \"RdBu\"), {}), ((\"density\", \"kamae\"), {})],\n \"set_font\": [((OrderedDict(sorted(FPROPS.items(), key=lambda t: t[0])),), {})],\n \"set_log\": [((\"density\", False), {})],\n \"set_window_size\": [((7.0,), {})],\n \"set_zlim\": [\n ((\"density\", 1e-25, 1e-23), {}),\n ((\"density\", 1e-25, None), {\"dynamic_range\": 4}),\n ],\n \"zoom\": [((10,), {})],\n \"toggle_right_handed\": [((), {})],\n}\n\n\nCENTER_SPECS = (\n \"m\",\n \"M\",\n \"max\",\n \"Max\",\n \"c\",\n \"C\",\n \"center\",\n \"Center\",\n [0.5, 0.5, 0.5],\n [[0.2, 0.3, 0.4], \"cm\"],\n YTArray([0.3, 0.4, 0.7], \"cm\"),\n)\n\nWIDTH_SPECS = {\n # Width choices map to xlim, ylim, width, axes_unit_name 4-tuples\n None: (\n ((0, \"code_length\"), (1, \"code_length\")),\n ((0, \"code_length\"), (1, \"code_length\")),\n ((1, \"code_length\"), (1, \"code_length\")),\n None,\n ),\n 0.2: (\n ((0.4, \"code_length\"), (0.6, \"code_length\")),\n ((0.4, \"code_length\"), (0.6, \"code_length\")),\n ((0.2, \"code_length\"), (0.2, \"code_length\")),\n None,\n ),\n (0.4, 0.3): (\n ((0.3, \"code_length\"), (0.7, \"code_length\")),\n ((0.35, \"code_length\"), (0.65, \"code_length\")),\n ((0.4, \"code_length\"), (0.3, \"code_length\")),\n None,\n ),\n (1.2, \"cm\"): (\n ((-0.1, \"code_length\"), (1.1, \"code_length\")),\n ((-0.1, \"code_length\"), (1.1, \"code_length\")),\n ((1.2, \"code_length\"), (1.2, \"code_length\")),\n (\"cm\", \"cm\"),\n ),\n ((1.2, \"cm\"), (2.0, \"cm\")): (\n ((-0.1, \"code_length\"), (1.1, \"code_length\")),\n ((-0.5, \"code_length\"), (1.5, \"code_length\")),\n ((1.2, \"code_length\"), (2.0, \"code_length\")),\n (\"cm\", \"cm\"),\n ),\n ((1.2, \"cm\"), (0.02, \"m\")): (\n ((-0.1, \"code_length\"), (1.1, \"code_length\")),\n ((-0.5, \"code_length\"), (1.5, \"code_length\")),\n ((1.2, \"code_length\"), (2.0, \"code_length\")),\n (\"cm\", \"m\"),\n ),\n}\n\nWEIGHT_FIELDS = (\n None,\n \"density\",\n (\"gas\", \"density\"),\n)\n\nPROJECTION_METHODS = (\"integrate\", \"sum\", \"mip\")\n\nBUFF_SIZES = [(800, 800), (1600, 1600), (1254, 1254), (800, 600)]\n\n\ndef simple_contour(test_obj, plot):\n plot.annotate_contour(test_obj.plot_field)\n\n\ndef simple_velocity(test_obj, plot):\n plot.annotate_velocity()\n\n\ndef simple_streamlines(test_obj, plot):\n ax = test_obj.plot_axis\n xax = test_obj.ds.coordinates.x_axis[ax]\n yax = test_obj.ds.coordinates.y_axis[ax]\n xn = test_obj.ds.coordinates.axis_name[xax]\n yn = test_obj.ds.coordinates.axis_name[yax]\n plot.annotate_streamlines(f\"velocity_{xn}\", f\"velocity_{yn}\")\n\n\nCALLBACK_TESTS = (\n (\"simple_contour\", (simple_contour,)),\n (\"simple_velocity\", (simple_velocity,)),\n # (\"simple_streamlines\", (simple_streamlines,)),\n # (\"simple_all\", (simple_contour, simple_velocity, simple_streamlines)),\n)\n\n\n@requires_ds(M7)\ndef test_attributes():\n \"\"\"Test plot member functions that aren't callbacks\"\"\"\n plot_field = \"density\"\n decimals = 12\n\n ds = data_dir_load(M7)\n for ax in \"xyz\":\n for attr_name in ATTR_ARGS.keys():\n for args in ATTR_ARGS[attr_name]:\n test = PlotWindowAttributeTest(\n ds, plot_field, ax, attr_name, args, decimals\n )\n test_attributes.__name__ = test.description\n yield test\n for n, r in CALLBACK_TESTS:\n yield PlotWindowAttributeTest(\n ds,\n plot_field,\n ax,\n attr_name,\n args,\n decimals,\n callback_id=n,\n callback_runners=r,\n )\n\n\n@requires_ds(WT)\ndef test_attributes_wt():\n plot_field = \"density\"\n decimals = 12\n\n ds = data_dir_load(WT)\n ax = \"z\"\n for attr_name in ATTR_ARGS.keys():\n for args in ATTR_ARGS[attr_name]:\n yield PlotWindowAttributeTest(ds, plot_field, ax, attr_name, args, decimals)\n for n, r in CALLBACK_TESTS:\n yield PlotWindowAttributeTest(\n ds,\n plot_field,\n ax,\n attr_name,\n args,\n decimals,\n callback_id=n,\n callback_runners=r,\n )\n\n\nclass TestHideAxesColorbar(unittest.TestCase):\n\n ds = None\n\n def setUp(self):\n if self.ds is None:\n self.ds = fake_random_ds(64)\n self.slc = SlicePlot(self.ds, 0, \"density\")\n self.tmpdir = tempfile.mkdtemp()\n self.curdir = os.getcwd()\n os.chdir(self.tmpdir)\n\n def tearDown(self):\n os.chdir(self.curdir)\n shutil.rmtree(self.tmpdir)\n del self.ds\n del self.slc\n\n def test_hide_show_axes(self):\n self.slc.hide_axes()\n self.slc.save()\n self.slc.show_axes()\n self.slc.save()\n\n def test_hide_show_colorbar(self):\n self.slc.hide_colorbar()\n self.slc.save()\n self.slc.show_colorbar()\n self.slc.save()\n\n def test_hide_axes_colorbar(self):\n self.slc.hide_colorbar()\n self.slc.hide_axes()\n self.slc.save()\n\n\nclass TestSetWidth(unittest.TestCase):\n\n ds = None\n\n def setUp(self):\n if self.ds is None:\n self.ds = fake_random_ds(64)\n self.slc = SlicePlot(self.ds, 0, \"density\")\n\n def tearDown(self):\n del self.ds\n del self.slc\n\n def _assert_05cm(self):\n assert_array_equal(\n [self.slc.xlim, self.slc.ylim, self.slc.width],\n [\n (YTQuantity(0.25, \"cm\"), YTQuantity(0.75, \"cm\")),\n (YTQuantity(0.25, \"cm\"), YTQuantity(0.75, \"cm\")),\n (YTQuantity(0.5, \"cm\"), YTQuantity(0.5, \"cm\")),\n ],\n )\n\n def _assert_05_075cm(self):\n assert_array_equal(\n [self.slc.xlim, self.slc.ylim, self.slc.width],\n [\n (YTQuantity(0.25, \"cm\"), YTQuantity(0.75, \"cm\")),\n (YTQuantity(0.125, \"cm\"), YTQuantity(0.875, \"cm\")),\n (YTQuantity(0.5, \"cm\"), YTQuantity(0.75, \"cm\")),\n ],\n )\n\n def test_set_width_one(self):\n assert_equal(\n [self.slc.xlim, self.slc.ylim, self.slc.width],\n [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)],\n )\n assert_true(self.slc._axes_unit_names is None)\n\n def test_set_width_nonequal(self):\n self.slc.set_width((0.5, 0.8))\n assert_rel_equal(\n [self.slc.xlim, self.slc.ylim, self.slc.width],\n [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)],\n 15,\n )\n assert_true(self.slc._axes_unit_names is None)\n\n def test_twoargs_eq(self):\n self.slc.set_width(0.5, \"cm\")\n self._assert_05cm()\n assert_true(self.slc._axes_unit_names == (\"cm\", \"cm\"))\n\n def test_tuple_eq(self):\n self.slc.set_width((0.5, \"cm\"))\n self._assert_05cm()\n assert_true(self.slc._axes_unit_names == (\"cm\", \"cm\"))\n\n def test_tuple_of_tuples_neq(self):\n self.slc.set_width(((0.5, \"cm\"), (0.75, \"cm\")))\n self._assert_05_075cm()\n assert_true(self.slc._axes_unit_names == (\"cm\", \"cm\"))\n\n\nclass TestPlotWindowSave(unittest.TestCase):\n def setUp(self):\n self.tmpdir = tempfile.mkdtemp()\n self.curdir = os.getcwd()\n os.chdir(self.tmpdir)\n\n def tearDown(self):\n os.chdir(self.curdir)\n shutil.rmtree(self.tmpdir)\n\n def test_slice_plot(self):\n test_ds = fake_random_ds(16)\n for dim in range(3):\n slc = SlicePlot(test_ds, dim, \"density\")\n for fname in TEST_FLNMS:\n assert_fname(slc.save(fname)[0])\n\n def test_repr_html(self):\n test_ds = fake_random_ds(16)\n slc = SlicePlot(test_ds, 0, \"density\")\n slc._repr_html_()\n\n def test_projection_plot(self):\n test_ds = fake_random_ds(16)\n for dim in range(3):\n proj = ProjectionPlot(test_ds, dim, \"density\")\n for fname in TEST_FLNMS:\n assert_fname(proj.save(fname)[0])\n\n def test_projection_plot_ds(self):\n test_ds = fake_random_ds(16)\n reg = test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3)\n for dim in range(3):\n proj = ProjectionPlot(test_ds, dim, \"density\", data_source=reg)\n proj.save()\n\n def test_projection_plot_c(self):\n test_ds = fake_random_ds(16)\n for center in CENTER_SPECS:\n proj = ProjectionPlot(test_ds, 0, \"density\", center=center)\n proj.save()\n\n def test_projection_plot_wf(self):\n test_ds = fake_random_ds(16)\n for wf in WEIGHT_FIELDS:\n proj = ProjectionPlot(test_ds, 0, \"density\", weight_field=wf)\n proj.save()\n\n def test_projection_plot_m(self):\n test_ds = fake_random_ds(16)\n for method in PROJECTION_METHODS:\n proj = ProjectionPlot(test_ds, 0, \"density\", method=method)\n proj.save()\n\n def test_projection_plot_bs(self):\n test_ds = fake_random_ds(16)\n for bf in BUFF_SIZES:\n proj = ProjectionPlot(test_ds, 0, (\"gas\", \"density\"), buff_size=bf)\n image = proj.frb[\"gas\", \"density\"]\n\n # note that image.shape is inverted relative to the passed in buff_size\n assert_equal(image.shape[::-1], bf)\n\n def test_offaxis_slice_plot(self):\n test_ds = fake_random_ds(16)\n slc = OffAxisSlicePlot(test_ds, [1, 1, 1], \"density\")\n for fname in TEST_FLNMS:\n assert_fname(slc.save(fname)[0])\n\n def test_offaxis_projection_plot(self):\n test_ds = fake_random_ds(16)\n prj = OffAxisProjectionPlot(test_ds, [1, 1, 1], \"density\")\n for fname in TEST_FLNMS:\n assert_fname(prj.save(fname)[0])\n\n def test_creation_with_width(self):\n test_ds = fake_random_ds(16)\n for width in WIDTH_SPECS:\n xlim, ylim, pwidth, aun = WIDTH_SPECS[width]\n plot = ProjectionPlot(test_ds, 0, \"density\", width=width)\n\n xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]\n ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]\n pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]\n\n [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]\n [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]\n [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]\n assert_true(aun == plot._axes_unit_names)\n\n\ndef test_on_off_compare():\n # fake density field that varies in the x-direction only\n den = np.arange(32 ** 3) / 32 ** 2 + 1\n den = den.reshape(32, 32, 32)\n den = np.array(den, dtype=np.float64)\n data = dict(density=(den, \"g/cm**3\"))\n bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n ds = load_uniform_grid(data, den.shape, length_unit=\"Mpc\", bbox=bbox, nprocs=64)\n\n sl_on = SlicePlot(ds, \"z\", [\"density\"])\n\n L = [0, 0, 1]\n north_vector = [0, 1, 0]\n sl_off = OffAxisSlicePlot(\n ds, L, \"density\", center=[0, 0, 0], north_vector=north_vector\n )\n\n assert_array_almost_equal(sl_on.frb[\"density\"], sl_off.frb[\"density\"])\n\n sl_on.set_buff_size((800, 400))\n sl_on._recreate_frb()\n sl_off.set_buff_size((800, 400))\n sl_off._recreate_frb()\n\n assert_array_almost_equal(sl_on.frb[\"density\"], sl_off.frb[\"density\"])\n\n\ndef test_plot_particle_field_error():\n ds = fake_random_ds(32, particles=100)\n\n field_names = [\n \"particle_mass\",\n [\"particle_mass\", \"density\"],\n [\"density\", \"particle_mass\"],\n ]\n\n objects_normals = [\n (SlicePlot, 2),\n (SlicePlot, [1, 1, 1]),\n (ProjectionPlot, 2),\n (OffAxisProjectionPlot, [1, 1, 1]),\n ]\n\n for object, normal in objects_normals:\n for field_name_list in field_names:\n assert_raises(YTInvalidFieldType, object, ds, normal, field_name_list)\n\n\ndef test_setup_origin():\n origin_inputs = (\n \"domain\",\n \"left-window\",\n \"center-domain\",\n \"lower-right-window\",\n (\"window\",),\n (\"right\", \"domain\"),\n (\"lower\", \"window\"),\n (\"lower\", \"right\", \"window\"),\n (0.5, 0.5, \"domain\"),\n ((50, \"cm\"), (50, \"cm\"), \"domain\"),\n )\n w = (10, \"cm\")\n\n ds = fake_random_ds(32, length_unit=100.0)\n generated_limits = []\n # lower limit -> llim\n # upper limit -> ulim\n # xllim xulim yllim yulim\n correct_limits = [\n 45.0,\n 55.0,\n 45.0,\n 55.0,\n 0.0,\n 10.0,\n 0.0,\n 10.0,\n -5.0,\n 5.0,\n -5.0,\n 5.0,\n -10.0,\n 0,\n 0,\n 10.0,\n 0.0,\n 10.0,\n 0.0,\n 10.0,\n -55.0,\n -45.0,\n -55.0,\n -45.0,\n -5.0,\n 5.0,\n 0.0,\n 10.0,\n -10.0,\n 0,\n 0,\n 10.0,\n -5.0,\n 5.0,\n -5.0,\n 5.0,\n -5.0,\n 5.0,\n -5.0,\n 5.0,\n ]\n for o in origin_inputs:\n slc = SlicePlot(ds, 2, \"density\", width=w, origin=o)\n ax = slc.plots[\"density\"].axes\n xlims = ax.get_xlim()\n ylims = ax.get_ylim()\n lims = [xlims[0], xlims[1], ylims[0], ylims[1]]\n for l in lims:\n generated_limits.append(l)\n assert_array_almost_equal(correct_limits, generated_limits)\n\n\ndef test_frb_regen():\n ds = fake_random_ds(32)\n slc = SlicePlot(ds, 2, \"density\")\n slc.set_buff_size(1200)\n assert_equal(slc.frb[\"density\"].shape, (1200, 1200))\n slc.set_buff_size((400.0, 200.7))\n assert_equal(slc.frb[\"density\"].shape, (200, 400))\n\n\ndef test_set_background_color():\n ds = fake_random_ds(32)\n plot = SlicePlot(ds, 2, \"density\")\n for field in [\"density\", (\"gas\", \"density\")]:\n plot.set_background_color(field, \"red\")\n plot._setup_plots()\n ax = plot.plots[field].axes\n if LooseVersion(matplotlib.__version__) < LooseVersion(\"2.0.0\"):\n assert_equal(ax.get_axis_bgcolor(), \"red\")\n else:\n assert_equal(ax.get_facecolor(), (1.0, 0.0, 0.0, 1.0))\n\n\ndef test_set_unit():\n ds = fake_random_ds(32, fields=(\"temperature\",), units=(\"K\",))\n slc = SlicePlot(ds, 2, \"temperature\")\n\n orig_array = slc.frb[\"gas\", \"temperature\"].copy()\n\n slc.set_unit(\"temperature\", \"degF\")\n\n assert str(slc.frb[\"gas\", \"temperature\"].units) == \"°F\"\n assert_array_almost_equal(\n np.array(slc.frb[\"gas\", \"temperature\"]), np.array(orig_array) * 1.8 - 459.67\n )\n\n # test that a plot modifying function that destroys the frb preserves the\n # new unit\n slc.set_buff_size(1000)\n\n assert str(slc.frb[\"gas\", \"temperature\"].units) == \"°F\"\n\n slc.set_buff_size(800)\n\n slc.set_unit(\"temperature\", \"K\")\n assert str(slc.frb[\"gas\", \"temperature\"].units) == \"K\"\n assert_array_almost_equal(slc.frb[\"gas\", \"temperature\"], orig_array)\n\n slc.set_unit(\"temperature\", \"keV\", equivalency=\"thermal\")\n assert str(slc.frb[\"gas\", \"temperature\"].units) == \"keV\"\n assert_array_almost_equal(\n slc.frb[\"gas\", \"temperature\"], (orig_array * kboltz).to(\"keV\")\n )\n\n # test that a plot modifying function that destroys the frb preserves the\n # new unit with an equivalency\n slc.set_buff_size(1000)\n\n assert str(slc.frb[\"gas\", \"temperature\"].units) == \"keV\"\n\n # test that destroying the FRB then changing the unit using an equivalency\n # doesn't error out, see issue #1316\n slc = SlicePlot(ds, 2, \"temperature\")\n slc.set_buff_size(1000)\n slc.set_unit(\"temperature\", \"keV\", equivalency=\"thermal\")\n assert str(slc.frb[\"gas\", \"temperature\"].units) == \"keV\"\n\n\nWD = \"WDMerger_hdf5_chk_1000/WDMerger_hdf5_chk_1000.hdf5\"\nblast_wave = \"amrvac/bw_2d0000.dat\"\n\n\n@requires_file(WD)\n@requires_file(blast_wave)\ndef test_plot_2d():\n # Cartesian\n ds = fake_random_ds((32, 32, 1), fields=(\"temperature\",), units=(\"K\",))\n slc = SlicePlot(\n ds, \"z\", [\"temperature\"], width=(0.2, \"unitary\"), center=[0.4, 0.3, 0.5]\n )\n slc2 = plot_2d(ds, \"temperature\", width=(0.2, \"unitary\"), center=[0.4, 0.3])\n slc3 = plot_2d(\n ds, \"temperature\", width=(0.2, \"unitary\"), center=ds.arr([0.4, 0.3], \"cm\")\n )\n assert_array_equal(slc.frb[\"temperature\"], slc2.frb[\"temperature\"])\n assert_array_equal(slc.frb[\"temperature\"], slc3.frb[\"temperature\"])\n # Cylindrical\n ds = data_dir_load(WD)\n slc = SlicePlot(ds, \"theta\", [\"density\"], width=(30000.0, \"km\"))\n slc2 = plot_2d(ds, \"density\", width=(30000.0, \"km\"))\n assert_array_equal(slc.frb[\"density\"], slc2.frb[\"density\"])\n\n # Spherical\n ds = data_dir_load(blast_wave)\n slc = SlicePlot(ds, \"phi\", [\"density\"], width=(1, \"unitary\"))\n slc2 = plot_2d(ds, \"density\", width=(1, \"unitary\"))\n assert_array_equal(slc.frb[\"density\"], slc2.frb[\"density\"])\n\n\ndef test_symlog_colorbar():\n ds = fake_random_ds(16)\n\n def _thresh_density(field, data):\n wh = data[\"density\"] < 0.5\n ret = data[\"density\"]\n ret[wh] = 0\n return ret\n\n def _neg_density(field, data):\n return -data[\"threshold_density\"]\n\n ds.add_field(\n \"threshold_density\",\n function=_thresh_density,\n units=\"g/cm**3\",\n sampling_type=\"cell\",\n )\n ds.add_field(\n \"negative_density\", function=_neg_density, units=\"g/cm**3\", sampling_type=\"cell\"\n )\n\n for field in [\"density\", \"threshold_density\", \"negative_density\"]:\n plot = SlicePlot(ds, 2, field)\n plot.set_log(field, True, linthresh=0.1)\n with tempfile.NamedTemporaryFile(suffix=\"png\") as f:\n plot.save(f.name)\n\n\ndef test_nan_data():\n data = np.random.random((16, 16, 16)) - 0.5\n data[:9, :9, :9] = np.nan\n\n data = {\"density\": data}\n\n ds = load_uniform_grid(data, [16, 16, 16])\n\n plot = SlicePlot(ds, \"z\", \"density\")\n\n with tempfile.NamedTemporaryFile(suffix=\"png\") as f:\n plot.save(f.name)\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.random.random" ] ]
ekoly/lambdata-1
[ "55e0238cf06ff09c5f4d246c5d96b8d446c5237f" ]
[ "lambdata_ethanmjansen/__init__.py" ]
[ "'''\nlambdata - a collection of data science helper functions\n'''\n\nimport numpy as np\nimport pandas as pd\n\n# sample code\nONES = pd.DataFrame(np.ones(10))\nZEROS = pd.DataFrame(np.zeros(50))\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
matwilso/relation-networks
[ "66c67b342a90ae3699e576dcec883c329905b2e0" ]
[ "rns/util.py" ]
[ "import tensorflow as tf\n\ndef merge_summaries(sd, id):\n summaries = []\n for key in sd.keys():\n summaries.append(tf.summary.scalar(key, sd[key]))\n for key in id.keys():\n summaries.append(tf.summary.image(key, id[key]))\n return tf.summary.merge(summaries)\n\ndef pack_images(images, rows, cols):\n \"\"\"Helper utility to make a field of images.\"\"\"\n shape = tf.shape(images)\n width = shape[-3]\n height = shape[-2]\n depth = shape[-1]\n images = tf.reshape(images, (-1, width, height, depth))\n batch = tf.shape(images)[0]\n rows = tf.minimum(rows, batch)\n cols = tf.minimum(batch // rows, cols)\n images = images[:rows * cols]\n images = tf.reshape(images, (rows, cols, width, height, depth))\n images = tf.transpose(images, [0, 2, 1, 3, 4])\n images = tf.reshape(images, [1, rows * width, cols * height, depth])\n return images\n\ndef image_tile_summary(name, tensor, rows=8, cols=8):\n tf.summary.image(name, pack_images(tensor, rows, cols), max_outputs=3)\n\ndef cartesian_product(a,b):\n a, b = a[None, :, None], b[:, None, None]\n prod = tf.concat([b + tf.zeros_like(a), tf.zeros_like(b) + a], axis = 2)\n #new_shape = tf.stack([-1, tf.shape(cartesian_product)[-1]])\n #cartesian_product = tf.reshape(cartesian_product, new_shape)\n prod = tf.reshape(prod, [-1])\n return prod\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.minimum", "tensorflow.shape", "tensorflow.reshape", "tensorflow.summary.image", "tensorflow.zeros_like", "tensorflow.summary.merge", "tensorflow.transpose" ] ]
Channingss/PaddleX
[ "f001960b7359f3a88b7dd96e1f34500b90566ceb" ]
[ "paddlex/interpret/core/_session_preparation.py" ]
[ "#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport os\nimport os.path as osp\nimport paddle.fluid as fluid\nimport paddlex as pdx\nimport numpy as np\nfrom paddle.fluid.param_attr import ParamAttr\nfrom paddlex.interpret.as_data_reader.readers import preprocess_image\n\ndef gen_user_home():\n if \"HOME\" in os.environ:\n home_path = os.environ[\"HOME\"]\n if os.path.exists(home_path) and os.path.isdir(home_path):\n return home_path\n return os.path.expanduser('~')\n\n\ndef paddle_get_fc_weights(var_name=\"fc_0.w_0\"):\n fc_weights = fluid.global_scope().find_var(var_name).get_tensor()\n return np.array(fc_weights)\n\n\ndef paddle_resize(extracted_features, outsize):\n resized_features = fluid.layers.resize_bilinear(extracted_features, outsize)\n return resized_features\n\n\ndef compute_features_for_kmeans(data_content):\n root_path = gen_user_home()\n root_path = osp.join(root_path, '.paddlex')\n h_pre_models = osp.join(root_path, \"pre_models\")\n if not osp.exists(h_pre_models):\n if not osp.exists(root_path):\n os.makedirs(root_path)\n url = \"https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz\"\n pdx.utils.download_and_decompress(url, path=root_path)\n def conv_bn_layer(input,\n num_filters,\n filter_size,\n stride=1,\n groups=1,\n act=None,\n name=None,\n is_test=True,\n global_name=''):\n conv = fluid.layers.conv2d(\n input=input,\n num_filters=num_filters,\n filter_size=filter_size,\n stride=stride,\n padding=(filter_size - 1) // 2,\n groups=groups,\n act=None,\n param_attr=ParamAttr(name=global_name + name + \"_weights\"),\n bias_attr=False,\n name=global_name + name + '.conv2d.output.1')\n if name == \"conv1\":\n bn_name = \"bn_\" + name\n else:\n bn_name = \"bn\" + name[3:]\n return fluid.layers.batch_norm(\n input=conv,\n act=act,\n name=global_name + bn_name + '.output.1',\n param_attr=ParamAttr(global_name + bn_name + '_scale'),\n bias_attr=ParamAttr(global_name + bn_name + '_offset'),\n moving_mean_name=global_name + bn_name + '_mean',\n moving_variance_name=global_name + bn_name + '_variance',\n use_global_stats=is_test\n )\n\n startup_prog = fluid.default_startup_program().clone(for_test=True)\n prog = fluid.Program()\n with fluid.program_guard(prog, startup_prog):\n with fluid.unique_name.guard():\n image_op = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')\n\n conv = conv_bn_layer(\n input=image_op,\n num_filters=32,\n filter_size=3,\n stride=2,\n act='relu',\n name='conv1_1')\n conv = conv_bn_layer(\n input=conv,\n num_filters=32,\n filter_size=3,\n stride=1,\n act='relu',\n name='conv1_2')\n conv = conv_bn_layer(\n input=conv,\n num_filters=64,\n filter_size=3,\n stride=1,\n act='relu',\n name='conv1_3')\n extracted_features = conv\n resized_features = fluid.layers.resize_bilinear(extracted_features, image_op.shape[2:])\n\n gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))\n place = fluid.CUDAPlace(gpu_id)\n # place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n fluid.io.load_persistables(exe, h_pre_models, prog)\n\n images = preprocess_image(data_content) # transpose to [N, 3, H, W], scaled to [0.0, 1.0]\n result = exe.run(prog, fetch_list=[resized_features], feed={'image': images})\n\n return result[0][0]\n" ]
[ [ "numpy.array" ] ]
tonymackinnon/ray
[ "14a1419682bdba40d2c8bf226e1727cf44abcaa4" ]
[ "python/ray/tests/test_basic.py" ]
[ "# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom concurrent.futures import ThreadPoolExecutor\nimport json\nimport logging\nfrom multiprocessing import Process\nimport os\nimport random\nimport re\nimport setproctitle\nimport shutil\nimport six\nimport socket\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nimport ray.tests.cluster_utils\nimport ray.tests.utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_simple_serialization(ray_start_regular):\n primitive_objects = [\n # Various primitive types.\n 0,\n 0.0,\n 0.9,\n 1 << 62,\n 1 << 999,\n \"a\",\n string.printable,\n \"\\u262F\",\n u\"hello world\",\n u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\",\n None,\n True,\n False,\n [],\n (),\n {},\n type,\n int,\n set(),\n # Collections types.\n collections.Counter([np.random.randint(0, 10) for _ in range(100)]),\n collections.OrderedDict([(\"hello\", 1), (\"world\", 2)]),\n collections.defaultdict(lambda: 0, [(\"hello\", 1), (\"world\", 2)]),\n collections.defaultdict(lambda: [], [(\"hello\", 1), (\"world\", 2)]),\n collections.deque([1, 2, 3, \"a\", \"b\", \"c\", 3.5]),\n # Numpy dtypes.\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n ]\n\n if sys.version_info < (3, 0):\n primitive_objects.append(long(0)) # noqa: E501,F821\n\n composite_objects = (\n [[obj]\n for obj in primitive_objects] + [(obj, )\n for obj in primitive_objects] + [{\n (): obj\n } for obj in primitive_objects])\n\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in primitive_objects + composite_objects:\n new_obj_1 = ray.get(f.remote(obj))\n new_obj_2 = ray.get(ray.put(obj))\n assert obj == new_obj_1\n assert obj == new_obj_2\n # TODO(rkn): The numpy dtypes currently come back as regular integers\n # or floats.\n if type(obj).__module__ != \"numpy\":\n assert type(obj) == type(new_obj_1)\n assert type(obj) == type(new_obj_2)\n\n\ndef test_complex_serialization(ray_start_regular):\n def assert_equal(obj1, obj2):\n module_numpy = (type(obj1).__module__ == np.__name__\n or type(obj2).__module__ == np.__name__)\n if module_numpy:\n empty_shape = ((hasattr(obj1, \"shape\") and obj1.shape == ())\n or (hasattr(obj2, \"shape\") and obj2.shape == ()))\n if empty_shape:\n # This is a special case because currently\n # np.testing.assert_equal fails because we do not properly\n # handle different numerical types.\n assert obj1 == obj2, (\"Objects {} and {} are \"\n \"different.\".format(obj1, obj2))\n else:\n np.testing.assert_equal(obj1, obj2)\n elif hasattr(obj1, \"__dict__\") and hasattr(obj2, \"__dict__\"):\n special_keys = [\"_pytype_\"]\n assert (set(list(obj1.__dict__.keys()) + special_keys) == set(\n list(obj2.__dict__.keys()) + special_keys)), (\n \"Objects {} and {} are different.\".format(obj1, obj2))\n for key in obj1.__dict__.keys():\n if key not in special_keys:\n assert_equal(obj1.__dict__[key], obj2.__dict__[key])\n elif type(obj1) is dict or type(obj2) is dict:\n assert_equal(obj1.keys(), obj2.keys())\n for key in obj1.keys():\n assert_equal(obj1[key], obj2[key])\n elif type(obj1) is list or type(obj2) is list:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are lists with \"\n \"different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif type(obj1) is tuple or type(obj2) is tuple:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are tuples \"\n \"with different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif (ray.serialization.is_named_tuple(type(obj1))\n or ray.serialization.is_named_tuple(type(obj2))):\n assert len(obj1) == len(obj2), (\n \"Objects {} and {} are named \"\n \"tuples with different lengths.\".format(obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n else:\n assert obj1 == obj2, \"Objects {} and {} are different.\".format(\n obj1, obj2)\n\n if sys.version_info >= (3, 0):\n long_extras = [0, np.array([[\"hi\", u\"hi\"], [1.3, 1]])]\n else:\n\n long_extras = [\n long(0), # noqa: E501,F821\n np.array([\n [\"hi\", u\"hi\"],\n [1.3, long(1)] # noqa: E501,F821\n ])\n ]\n\n PRIMITIVE_OBJECTS = [\n 0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], \"a\",\n string.printable, \"\\u262F\", u\"hello world\",\n u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\", None, True, False, [], (), {},\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n np.zeros([100, 100]),\n np.random.normal(size=[100, 100]),\n np.array([\"hi\", 3]),\n np.array([\"hi\", 3], dtype=object)\n ] + long_extras\n\n COMPLEX_OBJECTS = [\n [[[[[[[[[[[[]]]]]]]]]]]],\n {\n \"obj{}\".format(i): np.random.normal(size=[100, 100])\n for i in range(10)\n },\n # {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {\n # (): {(): {}}}}}}}}}}}}},\n (\n (((((((((), ), ), ), ), ), ), ), ), ),\n {\n \"a\": {\n \"b\": {\n \"c\": {\n \"d\": {}\n }\n }\n }\n },\n ]\n\n class Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n class Bar(object):\n def __init__(self):\n for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):\n setattr(self, \"field{}\".format(i), val)\n\n class Baz(object):\n def __init__(self):\n self.foo = Foo()\n self.bar = Bar()\n\n def method(self, arg):\n pass\n\n class Qux(object):\n def __init__(self):\n self.objs = [Foo(), Bar(), Baz()]\n\n class SubQux(Qux):\n def __init__(self):\n Qux.__init__(self)\n\n class CustomError(Exception):\n pass\n\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n NamedTupleExample = collections.namedtuple(\n \"Example\", \"field1, field2, field3, field4, field5\")\n\n CUSTOM_OBJECTS = [\n Exception(\"Test object.\"),\n CustomError(),\n Point(11, y=22),\n Foo(),\n Bar(),\n Baz(), # Qux(), SubQux(),\n NamedTupleExample(1, 1.0, \"hi\", np.zeros([3, 5]), [1, 2, 3]),\n ]\n\n # Test dataclasses in Python 3.7.\n if sys.version_info >= (3, 7):\n from dataclasses import make_dataclass\n\n DataClass0 = make_dataclass(\"DataClass0\", [(\"number\", int)])\n\n CUSTOM_OBJECTS.append(DataClass0(number=3))\n\n class CustomClass(object):\n def __init__(self, value):\n self.value = value\n\n DataClass1 = make_dataclass(\"DataClass1\", [(\"custom\", CustomClass)])\n\n class DataClass2(DataClass1):\n @classmethod\n def from_custom(cls, data):\n custom = CustomClass(data)\n return cls(custom)\n\n def __reduce__(self):\n return (self.from_custom, (self.custom.value, ))\n\n CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))\n\n BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS\n\n LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]\n TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]\n # The check that type(obj).__module__ != \"numpy\" should be unnecessary, but\n # otherwise this seems to fail on Mac OS X on Travis.\n DICT_OBJECTS = ([{\n obj: obj\n } for obj in PRIMITIVE_OBJECTS if (\n obj.__hash__ is not None and type(obj).__module__ != \"numpy\")] + [{\n 0: obj\n } for obj in BASE_OBJECTS] + [{\n Foo(123): Foo(456)\n }])\n\n RAY_TEST_OBJECTS = (\n BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)\n\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in RAY_TEST_OBJECTS:\n assert_equal(obj, ray.get(f.remote(obj)))\n assert_equal(obj, ray.get(ray.put(obj)))\n\n\ndef test_nested_functions(ray_start_regular):\n # Make sure that remote functions can use other values that are defined\n # after the remote function but before the first function invocation.\n @ray.remote\n def f():\n return g(), ray.get(h.remote())\n\n def g():\n return 1\n\n @ray.remote\n def h():\n return 2\n\n assert ray.get(f.remote()) == (1, 2)\n\n\ndef test_ray_recursive_objects(ray_start_regular):\n class ClassA(object):\n pass\n\n # Make a list that contains itself.\n lst = []\n lst.append(lst)\n # Make an object that contains itself as a field.\n a1 = ClassA()\n a1.field = a1\n # Make two objects that contain each other as fields.\n a2 = ClassA()\n a3 = ClassA()\n a2.field = a3\n a3.field = a2\n # Make a dictionary that contains itself.\n d1 = {}\n d1[\"key\"] = d1\n # Create a list of recursive objects.\n recursive_objects = [lst, a1, a2, a3, d1]\n\n # Check that exceptions are thrown when we serialize the recursive\n # objects.\n for obj in recursive_objects:\n with pytest.raises(Exception):\n ray.put(obj)\n\n\ndef test_passing_arguments_by_value_out_of_the_box(ray_start_regular):\n @ray.remote\n def f(x):\n return x\n\n # Test passing lambdas.\n\n def temp():\n return 1\n\n assert ray.get(f.remote(temp))() == 1\n assert ray.get(f.remote(lambda x: x + 1))(3) == 4\n\n # Test sets.\n assert ray.get(f.remote(set())) == set()\n s = {1, (1, 2, \"hi\")}\n assert ray.get(f.remote(s)) == s\n\n # Test types.\n assert ray.get(f.remote(int)) == int\n assert ray.get(f.remote(float)) == float\n assert ray.get(f.remote(str)) == str\n\n class Foo(object):\n def __init__(self):\n pass\n\n # Make sure that we can put and get a custom type. Note that the result\n # won't be \"equal\" to Foo.\n ray.get(ray.put(Foo))\n\n\ndef test_putting_object_that_closes_over_object_id(ray_start_regular):\n # This test is here to prevent a regression of\n # https://github.com/ray-project/ray/issues/1317.\n\n class Foo(object):\n def __init__(self):\n self.val = ray.put(0)\n\n def method(self):\n f\n\n f = Foo()\n ray.put(f)\n\n\ndef test_put_get(shutdown_only):\n ray.init(num_cpus=0)\n\n for i in range(100):\n value_before = i * 10**6\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = i * 10**6 * 1.0\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = \"h\" * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = [1] * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n\ndef test_custom_serializers(ray_start_regular):\n class Foo(object):\n def __init__(self):\n self.x = 3\n\n def custom_serializer(obj):\n return 3, \"string1\", type(obj).__name__\n\n def custom_deserializer(serialized_obj):\n return serialized_obj, \"string2\"\n\n ray.register_custom_serializer(\n Foo, serializer=custom_serializer, deserializer=custom_deserializer)\n\n assert ray.get(ray.put(Foo())) == ((3, \"string1\", Foo.__name__), \"string2\")\n\n class Bar(object):\n def __init__(self):\n self.x = 3\n\n ray.register_custom_serializer(\n Bar, serializer=custom_serializer, deserializer=custom_deserializer)\n\n @ray.remote\n def f():\n return Bar()\n\n assert ray.get(f.remote()) == ((3, \"string1\", Bar.__name__), \"string2\")\n\n\ndef test_serialization_final_fallback(ray_start_regular):\n pytest.importorskip(\"catboost\")\n # This test will only run when \"catboost\" is installed.\n from catboost import CatBoostClassifier\n\n model = CatBoostClassifier(\n iterations=2,\n depth=2,\n learning_rate=1,\n loss_function=\"Logloss\",\n logging_level=\"Verbose\")\n\n reconstructed_model = ray.get(ray.put(model))\n assert set(model.get_params().items()) == set(\n reconstructed_model.get_params().items())\n\n\ndef test_register_class(ray_start_2_cpus):\n # Check that putting an object of a class that has not been registered\n # throws an exception.\n class TempClass(object):\n pass\n\n ray.get(ray.put(TempClass()))\n\n # Test passing custom classes into remote functions from the driver.\n @ray.remote\n def f(x):\n return x\n\n class Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n foo = ray.get(f.remote(Foo(7)))\n assert foo == Foo(7)\n\n regex = re.compile(r\"\\d+\\.\\d*\")\n new_regex = ray.get(f.remote(regex))\n # This seems to fail on the system Python 3 that comes with\n # Ubuntu, so it is commented out for now:\n # assert regex == new_regex\n # Instead, we do this:\n assert regex.pattern == new_regex.pattern\n\n class TempClass1(object):\n def __init__(self):\n self.value = 1\n\n # Test returning custom classes created on workers.\n @ray.remote\n def g():\n class TempClass2(object):\n def __init__(self):\n self.value = 2\n\n return TempClass1(), TempClass2()\n\n object_1, object_2 = ray.get(g.remote())\n assert object_1.value == 1\n assert object_2.value == 2\n\n # Test exporting custom class definitions from one worker to another\n # when the worker is blocked in a get.\n class NewTempClass(object):\n def __init__(self, value):\n self.value = value\n\n @ray.remote\n def h1(x):\n return NewTempClass(x)\n\n @ray.remote\n def h2(x):\n return ray.get(h1.remote(x))\n\n assert ray.get(h2.remote(10)).value == 10\n\n # Test registering multiple classes with the same name.\n @ray.remote(num_return_vals=3)\n def j():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = []\n for _ in range(5):\n results += j.remote()\n for i in range(len(results) // 3):\n c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])\n\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n @ray.remote\n def k():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = ray.get([k.remote() for _ in range(5)])\n for c0, c1, c2 in results:\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n\ndef test_keyword_args(ray_start_regular):\n @ray.remote\n def keyword_fct1(a, b=\"hello\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct2(a=\"hello\", b=\"world\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct3(a, b, c=\"hello\", d=\"world\"):\n return \"{} {} {} {}\".format(a, b, c, d)\n\n x = keyword_fct1.remote(1)\n assert ray.get(x) == \"1 hello\"\n x = keyword_fct1.remote(1, \"hi\")\n assert ray.get(x) == \"1 hi\"\n x = keyword_fct1.remote(1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n x = keyword_fct1.remote(a=1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n\n x = keyword_fct2.remote(a=\"w\", b=\"hi\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(b=\"hi\", a=\"w\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(a=\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(b=\"hi\")\n assert ray.get(x) == \"hello hi\"\n x = keyword_fct2.remote(\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(\"w\", \"hi\")\n assert ray.get(x) == \"w hi\"\n\n x = keyword_fct3.remote(0, 1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(a=0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, d=\"hi\", c=\"w\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, c=\"w\")\n assert ray.get(x) == \"0 1 w world\"\n x = keyword_fct3.remote(0, 1, d=\"hi\")\n assert ray.get(x) == \"0 1 hello hi\"\n x = keyword_fct3.remote(0, 1)\n assert ray.get(x) == \"0 1 hello world\"\n x = keyword_fct3.remote(a=0, b=1)\n assert ray.get(x) == \"0 1 hello world\"\n\n # Check that we cannot pass invalid keyword arguments to functions.\n @ray.remote\n def f1():\n return\n\n @ray.remote\n def f2(x, y=0, z=0):\n return\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f1.remote(3)\n\n with pytest.raises(Exception):\n f1.remote(x=3)\n\n with pytest.raises(Exception):\n f2.remote(0, w=0)\n\n with pytest.raises(Exception):\n f2.remote(3, x=3)\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f2.remote(1, 2, 3, 4)\n\n @ray.remote\n def f3(x):\n return x\n\n assert ray.get(f3.remote(4)) == 4\n\n\ndef test_variable_number_of_args(shutdown_only):\n @ray.remote\n def varargs_fct1(*a):\n return \" \".join(map(str, a))\n\n @ray.remote\n def varargs_fct2(a, *b):\n return \" \".join(map(str, b))\n\n try:\n\n @ray.remote\n def kwargs_throw_exception(**c):\n return ()\n\n kwargs_exception_thrown = False\n except Exception:\n kwargs_exception_thrown = True\n\n ray.init(num_cpus=1)\n\n x = varargs_fct1.remote(0, 1, 2)\n assert ray.get(x) == \"0 1 2\"\n x = varargs_fct2.remote(0, 1, 2)\n assert ray.get(x) == \"1 2\"\n\n assert kwargs_exception_thrown\n\n @ray.remote\n def f1(*args):\n return args\n\n @ray.remote\n def f2(x, y, *args):\n return x, y, args\n\n assert ray.get(f1.remote()) == ()\n assert ray.get(f1.remote(1)) == (1, )\n assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)\n with pytest.raises(Exception):\n f2.remote()\n with pytest.raises(Exception):\n f2.remote(1)\n assert ray.get(f2.remote(1, 2)) == (1, 2, ())\n assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))\n assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))\n\n def testNoArgs(self):\n @ray.remote\n def no_op():\n pass\n\n self.ray_start()\n\n ray.get(no_op.remote())\n\n\ndef test_defining_remote_functions(shutdown_only):\n ray.init(num_cpus=3)\n\n # Test that we can define a remote function in the shell.\n @ray.remote\n def f(x):\n return x + 1\n\n assert ray.get(f.remote(0)) == 1\n\n # Test that we can redefine the remote function.\n @ray.remote\n def f(x):\n return x + 10\n\n while True:\n val = ray.get(f.remote(0))\n assert val in [1, 10]\n if val == 10:\n break\n else:\n logger.info(\"Still using old definition of f, trying again.\")\n\n # Test that we can close over plain old data.\n data = [\n np.zeros([3, 5]), (1, 2, \"a\"), [0.0, 1.0, 1 << 62], 1 << 60, {\n \"a\": np.zeros(3)\n }\n ]\n\n @ray.remote\n def g():\n return data\n\n ray.get(g.remote())\n\n # Test that we can close over modules.\n @ray.remote\n def h():\n return np.zeros([3, 5])\n\n assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))\n\n @ray.remote\n def j():\n return time.time()\n\n ray.get(j.remote())\n\n # Test that we can define remote functions that call other remote\n # functions.\n @ray.remote\n def k(x):\n return x + 1\n\n @ray.remote\n def k2(x):\n return ray.get(k.remote(x))\n\n @ray.remote\n def m(x):\n return ray.get(k2.remote(x))\n\n assert ray.get(k.remote(1)) == 2\n assert ray.get(k2.remote(1)) == 2\n assert ray.get(m.remote(1)) == 2\n\n\ndef test_submit_api(shutdown_only):\n ray.init(num_cpus=2, num_gpus=1, resources={\"Custom\": 1})\n\n @ray.remote\n def f(n):\n return list(range(n))\n\n @ray.remote\n def g():\n return ray.get_gpu_ids()\n\n assert f._remote([0], num_return_vals=0) is None\n id1 = f._remote(args=[1], num_return_vals=1)\n assert ray.get(id1) == [0]\n id1, id2 = f._remote(args=[2], num_return_vals=2)\n assert ray.get([id1, id2]) == [0, 1]\n id1, id2, id3 = f._remote(args=[3], num_return_vals=3)\n assert ray.get([id1, id2, id3]) == [0, 1, 2]\n assert ray.get(\n g._remote(args=[], num_cpus=1, num_gpus=1,\n resources={\"Custom\": 1})) == [0]\n infeasible_id = g._remote(args=[], resources={\"NonexistentCustom\": 1})\n assert ray.get(g._remote()) == []\n ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)\n assert len(ready_ids) == 0\n assert len(remaining_ids) == 1\n\n @ray.remote\n class Actor(object):\n def __init__(self, x, y=0):\n self.x = x\n self.y = y\n\n def method(self, a, b=0):\n return self.x, self.y, a, b\n\n def gpu_ids(self):\n return ray.get_gpu_ids()\n\n @ray.remote\n class Actor2(object):\n def __init__(self):\n pass\n\n def method(self):\n pass\n\n a = Actor._remote(\n args=[0], kwargs={\"y\": 1}, num_gpus=1, resources={\"Custom\": 1})\n\n a2 = Actor2._remote()\n ray.get(a2.method._remote())\n\n id1, id2, id3, id4 = a.method._remote(\n args=[\"test\"], kwargs={\"b\": 2}, num_return_vals=4)\n assert ray.get([id1, id2, id3, id4]) == [0, 1, \"test\", 2]\n\n\ndef test_many_fractional_resources(shutdown_only):\n ray.init(num_cpus=2, num_gpus=2, resources={\"Custom\": 2})\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote\n def f(block, accepted_resources):\n true_resources = {\n resource: value[0][1]\n for resource, value in ray.get_resource_ids().items()\n }\n if block:\n ray.get(g.remote())\n return true_resources == accepted_resources\n\n # Check that the resource are assigned correctly.\n result_ids = []\n for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):\n resource_set = {\"CPU\": int(rand1 * 10000) / 10000}\n result_ids.append(f._remote([False, resource_set], num_cpus=rand1))\n\n resource_set = {\"CPU\": 1, \"GPU\": int(rand1 * 10000) / 10000}\n result_ids.append(f._remote([False, resource_set], num_gpus=rand1))\n\n resource_set = {\"CPU\": 1, \"Custom\": int(rand1 * 10000) / 10000}\n result_ids.append(\n f._remote([False, resource_set], resources={\"Custom\": rand1}))\n\n resource_set = {\n \"CPU\": int(rand1 * 10000) / 10000,\n \"GPU\": int(rand2 * 10000) / 10000,\n \"Custom\": int(rand3 * 10000) / 10000\n }\n result_ids.append(\n f._remote(\n [False, resource_set],\n num_cpus=rand1,\n num_gpus=rand2,\n resources={\"Custom\": rand3}))\n result_ids.append(\n f._remote(\n [True, resource_set],\n num_cpus=rand1,\n num_gpus=rand2,\n resources={\"Custom\": rand3}))\n assert all(ray.get(result_ids))\n\n # Check that the available resources at the end are the same as the\n # beginning.\n stop_time = time.time() + 10\n correct_available_resources = False\n while time.time() < stop_time:\n if ray.available_resources() == {\n \"CPU\": 2.0,\n \"GPU\": 2.0,\n \"Custom\": 2.0,\n }:\n correct_available_resources = True\n break\n if not correct_available_resources:\n assert False, \"Did not get correct available resources.\"\n\n\ndef test_get_multiple(ray_start_regular):\n object_ids = [ray.put(i) for i in range(10)]\n assert ray.get(object_ids) == list(range(10))\n\n # Get a random choice of object IDs with duplicates.\n indices = list(np.random.choice(range(10), 5))\n indices += indices\n results = ray.get([object_ids[i] for i in indices])\n assert results == indices\n\n\ndef test_get_multiple_experimental(ray_start_regular):\n object_ids = [ray.put(i) for i in range(10)]\n\n object_ids_tuple = tuple(object_ids)\n assert ray.experimental.get(object_ids_tuple) == list(range(10))\n\n object_ids_nparray = np.array(object_ids)\n assert ray.experimental.get(object_ids_nparray) == list(range(10))\n\n\ndef test_get_dict(ray_start_regular):\n d = {str(i): ray.put(i) for i in range(5)}\n for i in range(5, 10):\n d[str(i)] = i\n result = ray.experimental.get(d)\n expected = {str(i): i for i in range(10)}\n assert result == expected\n\n\ndef test_wait(ray_start_regular):\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n ready_ids, remaining_ids = ray.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)\n assert set(ready_ids) == set(objectids)\n assert remaining_ids == []\n\n objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)\n assert time.time() - start_time < 2\n assert len(ready_ids) == 3\n assert len(remaining_ids) == 1\n ray.wait(objectids)\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)\n assert time.time() - start_time < 5\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n # Verify that calling wait with duplicate object IDs throws an\n # exception.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.wait([x, x])\n\n # Make sure it is possible to call wait with an empty list.\n ready_ids, remaining_ids = ray.wait([])\n assert ready_ids == []\n assert remaining_ids == []\n\n # Test semantics of num_returns with no timeout.\n oids = [ray.put(i) for i in range(10)]\n (found, rest) = ray.wait(oids, num_returns=2)\n assert len(found) == 2\n assert len(rest) == 8\n\n # Verify that incorrect usage raises a TypeError.\n x = ray.put(1)\n with pytest.raises(TypeError):\n ray.wait(x)\n with pytest.raises(TypeError):\n ray.wait(1)\n with pytest.raises(TypeError):\n ray.wait([1])\n\n\ndef test_wait_iterables(ray_start_regular):\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n objectids = np.array(\n [f.remote(1.0),\n f.remote(0.5),\n f.remote(0.5),\n f.remote(0.5)])\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n\ndef test_multiple_waits_and_gets(shutdown_only):\n # It is important to use three workers here, so that the three tasks\n # launched in this experiment can run at the same time.\n ray.init(num_cpus=3)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n @ray.remote\n def g(l):\n # The argument l should be a list containing one object ID.\n ray.wait([l[0]])\n\n @ray.remote\n def h(l):\n # The argument l should be a list containing one object ID.\n ray.get(l[0])\n\n # Make sure that multiple wait requests involving the same object ID\n # all return.\n x = f.remote(1)\n ray.get([g.remote([x]), g.remote([x])])\n\n # Make sure that multiple get requests involving the same object ID all\n # return.\n x = f.remote(1)\n ray.get([h.remote([x]), h.remote([x])])\n\n\ndef test_caching_functions_to_run(shutdown_only):\n # Test that we export functions to run on all workers before the driver\n # is connected.\n def f(worker_info):\n sys.path.append(1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def f(worker_info):\n sys.path.append(2)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def g(worker_info):\n sys.path.append(3)\n\n ray.worker.global_worker.run_function_on_all_workers(g)\n\n def f(worker_info):\n sys.path.append(4)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n ray.init(num_cpus=1)\n\n @ray.remote\n def get_state():\n time.sleep(1)\n return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]\n\n res1 = get_state.remote()\n res2 = get_state.remote()\n assert ray.get(res1) == (1, 2, 3, 4)\n assert ray.get(res2) == (1, 2, 3, 4)\n\n # Clean up the path on the workers.\n def f(worker_info):\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n\ndef test_running_function_on_all_workers(ray_start_regular):\n def f(worker_info):\n sys.path.append(\"fake_directory\")\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n @ray.remote\n def get_path1():\n return sys.path\n\n assert \"fake_directory\" == ray.get(get_path1.remote())[-1]\n\n def f(worker_info):\n sys.path.pop(-1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n # Create a second remote function to guarantee that when we call\n # get_path2.remote(), the second function to run will have been run on\n # the worker.\n @ray.remote\n def get_path2():\n return sys.path\n\n assert \"fake_directory\" not in ray.get(get_path2.remote())\n\n\ndef test_profiling_api(ray_start_2_cpus):\n @ray.remote\n def f():\n with ray.profile(\n \"custom_event\",\n extra_data={\"name\": \"custom name\"}) as ray_prof:\n ray_prof.set_attribute(\"key\", \"value\")\n\n ray.put(1)\n object_id = f.remote()\n ray.wait([object_id])\n ray.get(object_id)\n\n # Wait until all of the profiling information appears in the profile\n # table.\n timeout_seconds = 20\n start_time = time.time()\n while True:\n if time.time() - start_time > timeout_seconds:\n raise Exception(\"Timed out while waiting for information in \"\n \"profile table.\")\n profile_data = ray.timeline()\n event_types = {event[\"cat\"] for event in profile_data}\n expected_types = [\n \"worker_idle\",\n \"task\",\n \"task:deserialize_arguments\",\n \"task:execute\",\n \"task:store_outputs\",\n \"wait_for_function\",\n \"ray.get\",\n \"ray.put\",\n \"ray.wait\",\n \"submit_task\",\n \"fetch_and_run_function\",\n \"register_remote_function\",\n \"custom_event\", # This is the custom one from ray.profile.\n ]\n\n if all(expected_type in event_types\n for expected_type in expected_types):\n break\n\n\ndef test_wait_cluster(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=1, resources={\"RemoteResource\": 1})\n cluster.add_node(num_cpus=1, resources={\"RemoteResource\": 1})\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"RemoteResource\": 1})\n def f():\n return\n\n # Make sure we have enough workers on the remote nodes to execute some\n # tasks.\n tasks = [f.remote() for _ in range(10)]\n start = time.time()\n ray.get(tasks)\n end = time.time()\n\n # Submit some more tasks that can only be executed on the remote nodes.\n tasks = [f.remote() for _ in range(10)]\n # Sleep for a bit to let the tasks finish.\n time.sleep((end - start) * 2)\n _, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)\n # All remote tasks should have finished.\n assert len(unready) == 0\n\n\ndef test_object_transfer_dump(ray_start_cluster):\n cluster = ray_start_cluster\n\n num_nodes = 3\n for i in range(num_nodes):\n cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n return\n\n # These objects will live on different nodes.\n object_ids = [\n f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)\n ]\n\n # Broadcast each object from each machine to each other machine.\n for object_id in object_ids:\n ray.get([\n f._remote(args=[object_id], resources={str(i): 1})\n for i in range(num_nodes)\n ])\n\n # The profiling information only flushes once every second.\n time.sleep(1.1)\n\n transfer_dump = ray.object_transfer_timeline()\n # Make sure the transfer dump can be serialized with JSON.\n json.loads(json.dumps(transfer_dump))\n assert len(transfer_dump) >= num_nodes**2\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_receive\"\n }) == num_nodes\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_send\"\n }) == num_nodes\n\n\ndef test_identical_function_names(ray_start_regular):\n # Define a bunch of remote functions and make sure that we don't\n # accidentally call an older version.\n\n num_calls = 200\n\n @ray.remote\n def f():\n return 1\n\n results1 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 2\n\n results2 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 3\n\n results3 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 4\n\n results4 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 5\n\n results5 = [f.remote() for _ in range(num_calls)]\n\n assert ray.get(results1) == num_calls * [1]\n assert ray.get(results2) == num_calls * [2]\n assert ray.get(results3) == num_calls * [3]\n assert ray.get(results4) == num_calls * [4]\n assert ray.get(results5) == num_calls * [5]\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote # noqa: F811\n def g():\n return 2\n\n @ray.remote # noqa: F811\n def g():\n return 3\n\n @ray.remote # noqa: F811\n def g():\n return 4\n\n @ray.remote # noqa: F811\n def g():\n return 5\n\n result_values = ray.get([g.remote() for _ in range(num_calls)])\n assert result_values == num_calls * [5]\n\n\ndef test_illegal_api_calls(ray_start_regular):\n\n # Verify that we cannot call put on an ObjectID.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.put(x)\n # Verify that we cannot call get on a regular value.\n with pytest.raises(Exception):\n ray.get(3)\n\n\n# TODO(hchen): This test currently doesn't work in Python 2. This is likely\n# because plasma client isn't thread-safe. This needs to be fixed from the\n# Arrow side. See #4107 for relevant discussions.\[email protected](six.PY2, reason=\"Doesn't work in Python 2.\")\ndef test_multithreading(ray_start_2_cpus):\n # This test requires at least 2 CPUs to finish since the worker does not\n # release resources when joining the threads.\n\n def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):\n \"\"\"A helper function that runs test cases in multiple threads.\"\"\"\n\n def wrapper():\n for _ in range(num_repeats):\n test_case()\n time.sleep(random.randint(0, 10) / 1000.0)\n return \"ok\"\n\n executor = ThreadPoolExecutor(max_workers=num_threads)\n futures = [executor.submit(wrapper) for _ in range(num_threads)]\n for future in futures:\n assert future.result() == \"ok\"\n\n @ray.remote\n def echo(value, delay_ms=0):\n if delay_ms > 0:\n time.sleep(delay_ms / 1000.0)\n return value\n\n @ray.remote\n class Echo(object):\n def echo(self, value):\n return value\n\n def test_api_in_multi_threads():\n \"\"\"Test using Ray api in multiple threads.\"\"\"\n\n # Test calling remote functions in multiple threads.\n def test_remote_call():\n value = random.randint(0, 1000000)\n result = ray.get(echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_remote_call)\n\n # Test multiple threads calling one actor.\n actor = Echo.remote()\n\n def test_call_actor():\n value = random.randint(0, 1000000)\n result = ray.get(actor.echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_call_actor)\n\n # Test put and get.\n def test_put_and_get():\n value = random.randint(0, 1000000)\n result = ray.get(ray.put(value))\n assert value == result\n\n run_test_in_multi_threads(test_put_and_get)\n\n # Test multiple threads waiting for objects.\n num_wait_objects = 10\n objects = [\n echo.remote(i, delay_ms=10) for i in range(num_wait_objects)\n ]\n\n def test_wait():\n ready, _ = ray.wait(\n objects,\n num_returns=len(objects),\n timeout=1000.0,\n )\n assert len(ready) == num_wait_objects\n assert ray.get(ready) == list(range(num_wait_objects))\n\n run_test_in_multi_threads(test_wait, num_repeats=1)\n\n # Run tests in a driver.\n test_api_in_multi_threads()\n\n # Run tests in a worker.\n @ray.remote\n def run_tests_in_worker():\n test_api_in_multi_threads()\n return \"ok\"\n\n assert ray.get(run_tests_in_worker.remote()) == \"ok\"\n\n # Test actor that runs background threads.\n @ray.remote\n class MultithreadedActor(object):\n def __init__(self):\n self.lock = threading.Lock()\n self.thread_results = []\n\n def background_thread(self, wait_objects):\n try:\n # Test wait\n ready, _ = ray.wait(\n wait_objects,\n num_returns=len(wait_objects),\n timeout=1000.0,\n )\n assert len(ready) == len(wait_objects)\n for _ in range(20):\n num = 10\n # Test remote call\n results = [echo.remote(i) for i in range(num)]\n assert ray.get(results) == list(range(num))\n # Test put and get\n objects = [ray.put(i) for i in range(num)]\n assert ray.get(objects) == list(range(num))\n time.sleep(random.randint(0, 10) / 1000.0)\n except Exception as e:\n with self.lock:\n self.thread_results.append(e)\n else:\n with self.lock:\n self.thread_results.append(\"ok\")\n\n def spawn(self):\n wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]\n self.threads = [\n threading.Thread(\n target=self.background_thread, args=(wait_objects, ))\n for _ in range(20)\n ]\n [thread.start() for thread in self.threads]\n\n def join(self):\n [thread.join() for thread in self.threads]\n assert self.thread_results == [\"ok\"] * len(self.threads)\n return \"ok\"\n\n actor = MultithreadedActor.remote()\n actor.spawn.remote()\n ray.get(actor.join.remote()) == \"ok\"\n\n\ndef test_free_objects_multi_node(ray_start_cluster):\n # This test will do following:\n # 1. Create 3 raylets that each hold an actor.\n # 2. Each actor creates an object which is the deletion target.\n # 3. Wait 0.1 second for the objects to be deleted.\n # 4. Check that the deletion targets have been deleted.\n # Caution: if remote functions are used instead of actor methods,\n # one raylet may create more than one worker to execute the\n # tasks, so the flushing operations may be executed in different\n # workers and the plasma client holding the deletion target\n # may not be flushed.\n cluster = ray_start_cluster\n config = json.dumps({\"object_manager_repeated_push_delay_ms\": 1000})\n for i in range(3):\n cluster.add_node(\n num_cpus=1,\n resources={\"Custom{}\".format(i): 1},\n _internal_config=config)\n ray.init(redis_address=cluster.redis_address)\n\n class RawActor(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n ActorOnNode0 = ray.remote(resources={\"Custom0\": 1})(RawActor)\n ActorOnNode1 = ray.remote(resources={\"Custom1\": 1})(RawActor)\n ActorOnNode2 = ray.remote(resources={\"Custom2\": 1})(RawActor)\n\n def create(actors):\n a = actors[0].get.remote()\n b = actors[1].get.remote()\n c = actors[2].get.remote()\n (l1, l2) = ray.wait([a, b, c], num_returns=3)\n assert len(l1) == 3\n assert len(l2) == 0\n return (a, b, c)\n\n def run_one_test(actors, local_only, delete_creating_tasks):\n (a, b, c) = create(actors)\n # The three objects should be generated on different object stores.\n assert ray.get(a) != ray.get(b)\n assert ray.get(a) != ray.get(c)\n assert ray.get(c) != ray.get(b)\n ray.internal.free(\n [a, b, c],\n local_only=local_only,\n delete_creating_tasks=delete_creating_tasks)\n # Wait for the objects to be deleted.\n time.sleep(0.1)\n return (a, b, c)\n\n actors = [\n ActorOnNode0.remote(),\n ActorOnNode1.remote(),\n ActorOnNode2.remote()\n ]\n # Case 1: run this local_only=False. All 3 objects will be deleted.\n (a, b, c) = run_one_test(actors, False, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)\n # All the objects are deleted.\n assert len(l1) == 0\n assert len(l2) == 3\n # Case 2: run this local_only=True. Only 1 object will be deleted.\n (a, b, c) = run_one_test(actors, True, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)\n # One object is deleted and 2 objects are not.\n assert len(l1) == 2\n assert len(l2) == 1\n # The deleted object will have the same store with the driver.\n local_return = ray.worker.global_worker.plasma_client.store_socket_name\n for object_id in l1:\n assert ray.get(object_id) != local_return\n\n # Case3: These cases test the deleting creating tasks for the object.\n (a, b, c) = run_one_test(actors, False, False)\n task_table = ray.tasks()\n for obj in [a, b, c]:\n assert ray._raylet.compute_task_id(obj).hex() in task_table\n\n (a, b, c) = run_one_test(actors, False, True)\n task_table = ray.tasks()\n for obj in [a, b, c]:\n assert ray._raylet.compute_task_id(obj).hex() not in task_table\n\n\ndef test_local_mode(shutdown_only):\n @ray.remote\n def local_mode_f():\n return np.array([0, 0])\n\n @ray.remote\n def local_mode_g(x):\n x[0] = 1\n return x\n\n ray.init(local_mode=True)\n\n @ray.remote\n def f():\n return np.ones([3, 4, 5])\n\n xref = f.remote()\n # Remote functions should return ObjectIDs.\n assert isinstance(xref, ray.ObjectID)\n assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))\n y = np.random.normal(size=[11, 12])\n # Check that ray.get(ray.put) is the identity.\n assert np.alltrue(y == ray.get(ray.put(y)))\n\n # Make sure objects are immutable, this example is why we need to copy\n # arguments before passing them into remote functions in python mode\n aref = local_mode_f.remote()\n assert np.alltrue(ray.get(aref) == np.array([0, 0]))\n bref = local_mode_g.remote(ray.get(aref))\n # Make sure local_mode_g does not mutate aref.\n assert np.alltrue(ray.get(aref) == np.array([0, 0]))\n assert np.alltrue(ray.get(bref) == np.array([1, 0]))\n\n # wait should return the first num_returns values passed in as the\n # first list and the remaining values as the second list\n num_returns = 5\n object_ids = [ray.put(i) for i in range(20)]\n ready, remaining = ray.wait(\n object_ids, num_returns=num_returns, timeout=None)\n assert ready == object_ids[:num_returns]\n assert remaining == object_ids[num_returns:]\n\n # Check that ray.put() and ray.internal.free() work in local mode.\n\n v1 = np.ones(10)\n v2 = np.zeros(10)\n\n k1 = ray.put(v1)\n assert np.alltrue(v1 == ray.get(k1))\n k2 = ray.put(v2)\n assert np.alltrue(v2 == ray.get(k2))\n\n ray.internal.free([k1, k2])\n with pytest.raises(Exception):\n ray.get(k1)\n with pytest.raises(Exception):\n ray.get(k2)\n\n # Should fail silently.\n ray.internal.free([k1, k2])\n\n # Test actors in LOCAL_MODE.\n\n @ray.remote\n class LocalModeTestClass(object):\n def __init__(self, array):\n self.array = array\n\n def set_array(self, array):\n self.array = array\n\n def get_array(self):\n return self.array\n\n def modify_and_set_array(self, array):\n array[0] = -1\n self.array = array\n\n @ray.method(num_return_vals=3)\n def returns_multiple(self):\n return 1, 2, 3\n\n test_actor = LocalModeTestClass.remote(np.arange(10))\n obj = test_actor.get_array.remote()\n assert isinstance(obj, ray.ObjectID)\n assert np.alltrue(ray.get(obj) == np.arange(10))\n\n test_array = np.arange(10)\n # Remote actor functions should not mutate arguments\n test_actor.modify_and_set_array.remote(test_array)\n assert np.alltrue(test_array == np.arange(10))\n # Remote actor functions should keep state\n test_array[0] = -1\n assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))\n\n # Check that actor handles work in local mode.\n\n @ray.remote\n def use_actor_handle(handle):\n array = np.ones(10)\n handle.set_array.remote(array)\n assert np.alltrue(array == ray.get(handle.get_array.remote()))\n\n ray.get(use_actor_handle.remote(test_actor))\n\n # Check that exceptions are deferred until ray.get().\n\n exception_str = \"test_basic remote task exception\"\n\n @ray.remote\n def throws():\n raise Exception(exception_str)\n\n obj = throws.remote()\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj)\n\n # Check that multiple return values are handled properly.\n\n @ray.remote(num_return_vals=3)\n def returns_multiple():\n return 1, 2, 3\n\n obj1, obj2, obj3 = returns_multiple.remote()\n assert ray.get(obj1) == 1\n assert ray.get(obj2) == 2\n assert ray.get(obj3) == 3\n assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]\n\n obj1, obj2, obj3 = test_actor.returns_multiple.remote()\n assert ray.get(obj1) == 1\n assert ray.get(obj2) == 2\n assert ray.get(obj3) == 3\n assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]\n\n @ray.remote(num_return_vals=2)\n def returns_multiple_throws():\n raise Exception(exception_str)\n\n obj1, obj2 = returns_multiple_throws.remote()\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj)\n ray.get(obj1)\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj2)\n\n\ndef test_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=2)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n time_buffer = 2\n\n # At most 10 copies of this can run at once.\n @ray.remote(num_cpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(10)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(11)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_cpus=3)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_gpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(2)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_multi_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=10)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n @ray.remote(num_cpus=1, num_gpus=9)\n def f(n):\n time.sleep(n)\n\n @ray.remote(num_cpus=9, num_gpus=1)\n def g(n):\n time.sleep(n)\n\n time_buffer = 2\n\n start_time = time.time()\n ray.get([f.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_gpu_ids(shutdown_only):\n num_gpus = 10\n ray.init(num_cpus=10, num_gpus=num_gpus)\n\n def get_gpu_ids(num_gpus_per_worker):\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == num_gpus_per_worker\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))\n f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))\n f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))\n f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))\n f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))\n\n # Wait for all workers to start up.\n @ray.remote\n def f():\n time.sleep(0.1)\n return os.getpid()\n\n start_time = time.time()\n while True:\n if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:\n break\n if time.time() > start_time + 10:\n raise Exception(\"Timed out while waiting for workers to start \"\n \"up.\")\n\n list_of_ids = ray.get([f0.remote() for _ in range(10)])\n assert list_of_ids == 10 * [[]]\n\n list_of_ids = ray.get([f1.remote() for _ in range(10)])\n set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}\n assert set_of_ids == {(i, ) for i in range(10)}\n\n list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])\n all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]\n assert set(all_ids) == set(range(10))\n\n # There are only 10 GPUs, and each task uses 5 GPUs, so there should only\n # be 2 tasks scheduled at a given time.\n t1 = time.time()\n ray.get([f5.remote() for _ in range(20)])\n assert time.time() - t1 >= 10 * 0.1\n\n # Test that actors have CUDA_VISIBLE_DEVICES set properly.\n\n @ray.remote\n class Actor0(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n @ray.remote(num_gpus=1)\n class Actor1(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n a0 = Actor0.remote()\n ray.get(a0.test.remote())\n\n a1 = Actor1.remote()\n ray.get(a1.test.remote())\n\n\ndef test_zero_cpus(shutdown_only):\n ray.init(num_cpus=0)\n\n # We should be able to execute a task that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n def f():\n return 1\n\n ray.get(f.remote())\n\n # We should be able to create an actor that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n class Actor(object):\n def method(self):\n pass\n\n a = Actor.remote()\n x = a.method.remote()\n ray.get(x)\n\n\ndef test_zero_cpus_actor(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0)\n cluster.add_node(num_cpus=2)\n ray.init(redis_address=cluster.redis_address)\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote\n class Foo(object):\n def method(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # Make sure tasks and actors run on the remote raylet.\n a = Foo.remote()\n assert ray.get(a.method.remote()) != local_plasma\n\n\ndef test_fractional_resources(shutdown_only):\n ray.init(num_cpus=6, num_gpus=3, resources={\"Custom\": 1})\n\n @ray.remote(num_gpus=0.5)\n class Foo1(object):\n def method(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n return gpu_ids[0]\n\n foos = [Foo1.remote() for _ in range(6)]\n gpu_ids = ray.get([f.method.remote() for f in foos])\n for i in range(3):\n assert gpu_ids.count(i) == 2\n del foos\n\n @ray.remote\n class Foo2(object):\n def method(self):\n pass\n\n # Create an actor that requires 0.7 of the custom resource.\n f1 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ray.get(f1.method.remote())\n # Make sure that we cannot create an actor that requires 0.7 of the\n # custom resource. TODO(rkn): Re-enable this once ray.wait is\n # implemented.\n f2 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ready, _ = ray.wait([f2.method.remote()], timeout=0.5)\n assert len(ready) == 0\n # Make sure we can start an actor that requries only 0.3 of the custom\n # resource.\n f3 = Foo2._remote([], {}, resources={\"Custom\": 0.3})\n ray.get(f3.method.remote())\n\n del f1, f3\n\n # Make sure that we get exceptions if we submit tasks that require a\n # fractional number of resources greater than 1.\n\n @ray.remote(num_cpus=1.5)\n def test():\n pass\n\n with pytest.raises(ValueError):\n test.remote()\n\n with pytest.raises(ValueError):\n Foo2._remote([], {}, resources={\"Custom\": 1.5})\n\n\ndef test_multiple_raylets(ray_start_cluster):\n # This test will define a bunch of tasks that can only be assigned to\n # specific raylets, and we will check that they are assigned\n # to the correct raylets.\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=11, num_gpus=0)\n cluster.add_node(num_cpus=5, num_gpus=5)\n cluster.add_node(num_cpus=10, num_gpus=1)\n ray.init(redis_address=cluster.redis_address)\n cluster.wait_for_nodes()\n\n # Define a bunch of remote functions that all return the socket name of\n # the plasma store. Since there is a one-to-one correspondence between\n # plasma stores and raylets (at least right now), this can be\n # used to identify which raylet the task was assigned to.\n\n # This must be run on the zeroth raylet.\n @ray.remote(num_cpus=11)\n def run_on_0():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first raylet.\n @ray.remote(num_gpus=2)\n def run_on_1():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the second raylet.\n @ray.remote(num_cpus=6, num_gpus=1)\n def run_on_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This can be run anywhere.\n @ray.remote(num_cpus=0, num_gpus=0)\n def run_on_0_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first or second raylet.\n @ray.remote(num_gpus=1)\n def run_on_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the zeroth or second raylet.\n @ray.remote(num_cpus=8)\n def run_on_0_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n def run_lots_of_tasks():\n names = []\n results = []\n for i in range(100):\n index = np.random.randint(6)\n if index == 0:\n names.append(\"run_on_0\")\n results.append(run_on_0.remote())\n elif index == 1:\n names.append(\"run_on_1\")\n results.append(run_on_1.remote())\n elif index == 2:\n names.append(\"run_on_2\")\n results.append(run_on_2.remote())\n elif index == 3:\n names.append(\"run_on_0_1_2\")\n results.append(run_on_0_1_2.remote())\n elif index == 4:\n names.append(\"run_on_1_2\")\n results.append(run_on_1_2.remote())\n elif index == 5:\n names.append(\"run_on_0_2\")\n results.append(run_on_0_2.remote())\n return names, results\n\n client_table = ray.nodes()\n store_names = []\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 0\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 5\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 1\n ]\n assert len(store_names) == 3\n\n def validate_names_and_results(names, results):\n for name, result in zip(names, ray.get(results)):\n if name == \"run_on_0\":\n assert result in [store_names[0]]\n elif name == \"run_on_1\":\n assert result in [store_names[1]]\n elif name == \"run_on_2\":\n assert result in [store_names[2]]\n elif name == \"run_on_0_1_2\":\n assert (result in [\n store_names[0], store_names[1], store_names[2]\n ])\n elif name == \"run_on_1_2\":\n assert result in [store_names[1], store_names[2]]\n elif name == \"run_on_0_2\":\n assert result in [store_names[0], store_names[2]]\n else:\n raise Exception(\"This should be unreachable.\")\n assert set(ray.get(results)) == set(store_names)\n\n names, results = run_lots_of_tasks()\n validate_names_and_results(names, results)\n\n # Make sure the same thing works when this is nested inside of a task.\n\n @ray.remote\n def run_nested1():\n names, results = run_lots_of_tasks()\n return names, results\n\n @ray.remote\n def run_nested2():\n names, results = ray.get(run_nested1.remote())\n return names, results\n\n names, results = ray.get(run_nested2.remote())\n validate_names_and_results(names, results)\n\n\ndef test_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 0})\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 1})\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def h():\n ray.get([f.remote() for _ in range(5)])\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The g tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([g.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != local_plasma\n\n # Make sure that resource bookkeeping works when a task that uses a\n # custom resources gets blocked.\n ray.get([h.remote() for _ in range(5)])\n\n\ndef test_two_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 1,\n \"CustomResource2\": 2\n })\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 3,\n \"CustomResource2\": 4\n })\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"CustomResource1\": 1})\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource2\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 1, \"CustomResource2\": 3})\n def h():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 4})\n def j():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource3\": 1})\n def k():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f and g tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The h tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([h.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != local_plasma\n\n # Make sure that tasks with unsatisfied custom resource requirements do\n # not get scheduled.\n ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)\n assert ready_ids == []\n\n\ndef test_many_custom_resources(shutdown_only):\n num_custom_resources = 10000\n total_resources = {\n str(i): np.random.randint(1, 7)\n for i in range(num_custom_resources)\n }\n ray.init(num_cpus=5, resources=total_resources)\n\n def f():\n return 1\n\n remote_functions = []\n for _ in range(20):\n num_resources = np.random.randint(0, num_custom_resources + 1)\n permuted_resources = np.random.permutation(\n num_custom_resources)[:num_resources]\n random_resources = {\n str(i): total_resources[str(i)]\n for i in permuted_resources\n }\n remote_function = ray.remote(resources=random_resources)(f)\n remote_functions.append(remote_function)\n\n remote_functions.append(ray.remote(f))\n remote_functions.append(ray.remote(resources=total_resources)(f))\n\n results = []\n for remote_function in remote_functions:\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n\n ray.get(results)\n\n\n# TODO: 5 retry attempts may be too little for Travis and we may need to\n# increase it if this test begins to be flaky on Travis.\ndef test_zero_capacity_deletion_semantics(shutdown_only):\n ray.init(num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})\n\n def test():\n resources = ray.available_resources()\n MAX_RETRY_ATTEMPTS = 5\n retry_count = 0\n\n while resources and retry_count < MAX_RETRY_ATTEMPTS:\n time.sleep(0.1)\n resources = ray.available_resources()\n retry_count += 1\n\n if retry_count >= MAX_RETRY_ATTEMPTS:\n raise RuntimeError(\n \"Resources were available even after five retries.\")\n\n return resources\n\n function = ray.remote(\n num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})(test)\n cluster_resources = ray.get(function.remote())\n\n # All cluster resources should be utilized and\n # cluster_resources must be empty\n assert cluster_resources == {}\n\n\[email protected]\ndef save_gpu_ids_shutdown_only():\n # Record the curent value of this environment variable so that we can\n # reset it after the test.\n original_gpu_ids = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n yield None\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Reset the environment variable.\n if original_gpu_ids is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = original_gpu_ids\n else:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n\ndef test_specific_gpus(save_gpu_ids_shutdown_only):\n allowed_gpu_ids = [4, 5, 6]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in allowed_gpu_ids])\n ray.init(num_gpus=3)\n\n @ray.remote(num_gpus=1)\n def f():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert gpu_ids[0] in allowed_gpu_ids\n\n @ray.remote(num_gpus=2)\n def g():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert gpu_ids[0] in allowed_gpu_ids\n assert gpu_ids[1] in allowed_gpu_ids\n\n ray.get([f.remote() for _ in range(100)])\n ray.get([g.remote() for _ in range(100)])\n\n\ndef test_blocking_tasks(ray_start_regular):\n @ray.remote\n def f(i, j):\n return (i, j)\n\n @ray.remote\n def g(i):\n # Each instance of g submits and blocks on the result of another\n # remote task.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.get(object_ids)\n\n @ray.remote\n def h(i):\n # Each instance of g submits and blocks on the result of another\n # remote task using ray.wait.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.wait(object_ids, num_returns=len(object_ids))\n\n ray.get([h.remote(i) for i in range(4)])\n\n @ray.remote\n def _sleep(i):\n time.sleep(0.01)\n return (i)\n\n @ray.remote\n def sleep():\n # Each instance of sleep submits and blocks on the result of\n # another remote task, which takes some time to execute.\n ray.get([_sleep.remote(i) for i in range(10)])\n\n ray.get(sleep.remote())\n\n\ndef test_max_call_tasks(ray_start_regular):\n @ray.remote(max_calls=1)\n def f():\n return os.getpid()\n\n pid = ray.get(f.remote())\n ray.tests.utils.wait_for_pid_to_exit(pid)\n\n @ray.remote(max_calls=2)\n def f():\n return os.getpid()\n\n pid1 = ray.get(f.remote())\n pid2 = ray.get(f.remote())\n assert pid1 == pid2\n ray.tests.utils.wait_for_pid_to_exit(pid1)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets\n # in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets in a\n # roughly equal manner even when the tasks have dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This object will be local to one of the raylets. Make sure\n # this doesn't prevent tasks from being scheduled on other raylets.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_tasks(num_tasks, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.tasks()) >= num_tasks:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.objects()) >= num_objects:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_global_state_api(shutdown_only):\n\n error_message = (\"The ray global state API cannot be used \"\n \"before ray.init has been called.\")\n\n with pytest.raises(Exception, match=error_message):\n ray.objects()\n\n with pytest.raises(Exception, match=error_message):\n ray.tasks()\n\n with pytest.raises(Exception, match=error_message):\n ray.nodes()\n\n with pytest.raises(Exception, match=error_message):\n ray.jobs()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n resources = {\"CPU\": 5, \"GPU\": 3, \"CustomResource\": 1}\n assert ray.cluster_resources() == resources\n\n assert ray.objects() == {}\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n driver_task_id = ray.worker.global_worker.current_task_id.hex()\n\n # One task is put in the task table which corresponds to this driver.\n wait_for_num_tasks(1)\n task_table = ray.tasks()\n assert len(task_table) == 1\n assert driver_task_id == list(task_table.keys())[0]\n task_spec = task_table[driver_task_id][\"TaskSpec\"]\n nil_id_hex = ray.ObjectID.nil().hex()\n\n assert task_spec[\"TaskID\"] == driver_task_id\n assert task_spec[\"ActorID\"] == nil_id_hex\n assert task_spec[\"Args\"] == []\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"FunctionID\"] == nil_id_hex\n assert task_spec[\"ReturnObjectIDs\"] == []\n\n client_table = ray.nodes()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n def f(*xs):\n return 1\n\n x_id = ray.put(1)\n result_id = f.remote(1, \"hi\", x_id)\n\n # Wait for one additional task to complete.\n wait_for_num_tasks(1 + 1)\n task_table = ray.tasks()\n assert len(task_table) == 1 + 1\n task_id_set = set(task_table.keys())\n task_id_set.remove(driver_task_id)\n task_id = list(task_id_set)[0]\n\n task_spec = task_table[task_id][\"TaskSpec\"]\n assert task_spec[\"ActorID\"] == nil_id_hex\n assert task_spec[\"Args\"] == [1, \"hi\", x_id]\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"ReturnObjectIDs\"] == [result_id]\n\n assert task_table[task_id] == ray.tasks(task_id)\n\n # Wait for two objects, one for the x_id and one for result_id.\n wait_for_num_objects(2)\n\n def wait_for_object_table():\n timeout = 10\n start_time = time.time()\n while time.time() - start_time < timeout:\n object_table = ray.objects()\n tables_ready = (object_table[x_id][\"ManagerIDs\"] is not None and\n object_table[result_id][\"ManagerIDs\"] is not None)\n if tables_ready:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for object table to \"\n \"update.\")\n\n object_table = ray.objects()\n assert len(object_table) == 2\n\n assert object_table[x_id] == ray.objects(x_id)\n object_table_entry = ray.objects(result_id)\n assert object_table[result_id] == object_table_entry\n\n job_table = ray.jobs()\n\n assert len(job_table) == 1\n assert job_table[0][\"JobID\"] == job_id.hex()\n assert job_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError(object):\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n if sys.version_info >= (3, 0):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n else:\n import cStringIO\n self.output_buffer = cStringIO.StringIO()\n self.error_buffer = cStringIO.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n # It's important to make sure that these print statements occur even\n # without calling sys.stdout.flush() and sys.stderr.flush().\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n for i in range(200):\n assert str(i) in output_lines\n error_lines = captured[\"err\"]\n assert len(error_lines) == 0\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n error_lines = captured[\"err\"]\n assert len(error_lines) == 0\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n\ndef test_specific_job_id():\n dummy_driver_id = ray.JobID.from_int(1)\n ray.init(num_cpus=1, job_id=dummy_driver_id)\n\n # in driver\n assert dummy_driver_id == ray._get_runtime_context().current_driver_id\n\n # in worker\n @ray.remote\n def f():\n return ray._get_runtime_context().current_driver_id\n\n assert dummy_driver_id == ray.get(f.remote())\n\n ray.shutdown()\n\n\ndef test_object_id_properties():\n id_bytes = b\"00112233445566778899\"\n object_id = ray.ObjectID(id_bytes)\n assert object_id.binary() == id_bytes\n object_id = ray.ObjectID.nil()\n assert object_id.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(b\"0123456789\")\n object_id = ray.ObjectID.from_random()\n assert not object_id.is_nil()\n assert object_id.binary() != id_bytes\n id_dumps = pickle.dumps(object_id)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_id\n file_prefix = \"test_object_id_properties\"\n\n # Make sure the ids are fork safe.\n def write(index):\n str = ray.ObjectID.from_random().hex()\n with open(\"{}{}\".format(file_prefix, index), \"w\") as fo:\n fo.write(str)\n\n def read(index):\n with open(\"{}{}\".format(file_prefix, index), \"r\") as fi:\n for line in fi:\n return line\n\n processes = [Process(target=write, args=(_, )) for _ in range(4)]\n for process in processes:\n process.start()\n for process in processes:\n process.join()\n hexes = {read(i) for i in range(4)}\n [os.remove(\"{}{}\".format(file_prefix, i)) for i in range(4)]\n assert len(hexes) == 4\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(num_cpus=1, object_store_memory=10**8)\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.plasma_client.contains(\n ray.pyarrow.plasma.ObjectID(x_id.binary()))\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_ray_setproctitle(ray_start_2_cpus):\n @ray.remote\n class UniqueName(object):\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:f()\"\n\n @ray.remote\n def unique_1():\n assert setproctitle.getproctitle(\n ) == \"ray_worker:ray.tests.test_basic.unique_1()\"\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.WorkerID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(ray_start_2_cpus):\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(subprocess.check_output([\"ray\", \"stack\"]))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_pandas_parquet_serialization():\n # Only test this if pandas is installed\n pytest.importorskip(\"pandas\")\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"parquet-test\")\n pd.DataFrame({\"col1\": [0, 1], \"col2\": [0, 1]}).to_parquet(filename)\n with open(os.path.join(tempdir, \"parquet-compression\"), \"wb\") as f:\n table = pa.Table.from_arrays([pa.array([1, 2, 3])], [\"hello\"])\n pq.write_table(table, f, compression=\"lz4\")\n # Clean up\n shutil.rmtree(tempdir)\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n random_name = ray.ObjectID.from_random().hex()\n temp_raylet_socket_dir = \"/tmp/ray/tests/{}\".format(random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)\n\n\ndef test_raylet_is_robust_to_random_messages(ray_start_regular):\n node_manager_address = None\n node_manager_port = None\n for client in ray.nodes():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b\"asdf\")\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_non_ascii_comment(ray_start_regular):\n @ray.remote\n def f():\n # 日本語 Japanese comment\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\[email protected]\ndef echo(x):\n return x\n\n\[email protected]\nclass WithConstructor(object):\n def __init__(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\[email protected]\nclass WithoutConstructor(object):\n def set_data(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\nclass BaseClass(object):\n def __init__(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\[email protected]\nclass DerivedClass(BaseClass):\n def __init__(self, data):\n # Due to different behaviors of super in Python 2 and Python 3,\n # we use BaseClass directly here.\n BaseClass.__init__(self, data)\n\n\ndef test_load_code_from_local(shutdown_only):\n ray.init(load_code_from_local=True, num_cpus=4)\n message = \"foo\"\n # Test normal function.\n assert ray.get(echo.remote(message)) == message\n # Test actor class with constructor.\n actor = WithConstructor.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test actor class without constructor.\n actor = WithoutConstructor.remote()\n actor.set_data.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test derived actor class.\n actor = DerivedClass.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test using ray.remote decorator on raw classes.\n base_actor_class = ray.remote(num_cpus=1)(BaseClass)\n base_actor = base_actor_class.remote(message)\n assert ray.get(base_actor.get_data.remote()) == message\n\n\ndef test_shutdown_disconnect_global_state():\n ray.init(num_cpus=0)\n ray.shutdown()\n\n with pytest.raises(Exception) as e:\n ray.objects()\n assert str(e.value).endswith(\"ray.init has been called.\")\n\n\[email protected](\n \"ray_start_object_store_memory\", [10**8], indirect=True)\ndef test_redis_lru_with_set(ray_start_object_store_memory):\n x = np.zeros(8 * 10**7, dtype=np.uint8)\n x_id = ray.put(x)\n\n # Remove the object from the object table to simulate Redis LRU eviction.\n removed = False\n start_time = time.time()\n while time.time() < start_time + 10:\n if ray.state.state.redis_clients[0].delete(b\"OBJECT\" +\n x_id.binary()) == 1:\n removed = True\n break\n assert removed\n\n # Now evict the object from the object store.\n ray.put(x) # This should not crash.\n\n\ndef test_decorated_function(ray_start_regular):\n def function_invocation_decorator(f):\n def new_f(args, kwargs):\n # Reverse the arguments.\n return f(args[::-1], {\"d\": 5}), kwargs\n\n return new_f\n\n def f(a, b, c, d=None):\n return a, b, c, d\n\n f.__ray_invocation_decorator__ = function_invocation_decorator\n f = ray.remote(f)\n\n result_id, kwargs = f.remote(1, 2, 3, d=4)\n assert kwargs == {\"d\": 4}\n assert ray.get(result_id) == (3, 2, 1, 5)\n\n\ndef test_get_postprocess(ray_start_regular):\n def get_postprocessor(object_ids, values):\n return [value for value in values if value > 0]\n\n ray.worker.global_worker._post_get_hooks.append(get_postprocessor)\n\n assert ray.get(\n [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]\n\n\ndef test_export_after_shutdown(ray_start_regular):\n # This test checks that we can use actor and remote function definitions\n # across multiple Ray sessions.\n\n @ray.remote\n def f():\n pass\n\n @ray.remote\n class Actor(object):\n def method(self):\n pass\n\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray and use the remote function and actor again.\n ray.init(num_cpus=1)\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray again and make sure that these definitions can be exported from\n # workers.\n ray.init(num_cpus=2)\n\n @ray.remote\n def export_definitions_from_worker(remote_function, actor_class):\n ray.get(remote_function.remote())\n actor_handle = actor_class.remote()\n ray.get(actor_handle.method.remote())\n\n ray.get(export_definitions_from_worker.remote(f, Actor))\n" ]
[ [ "numpy.random.uniform", "numpy.ones", "numpy.int8", "numpy.zeros", "numpy.random.permutation", "numpy.testing.assert_equal", "pandas.DataFrame", "numpy.random.normal", "numpy.float32", "numpy.int64", "numpy.arange", "numpy.int32", "numpy.uint32", "numpy.uint64", "numpy.array", "numpy.random.randint", "numpy.float64", "numpy.uint8" ] ]
rosequ/pytorch-examples
[ "659a73cc68fc9d2d68d0c43fb33ff446a2b86c06" ]
[ "nn/two_layer_net_nn.py" ]
[ "import torch\nfrom torch.autograd import Variable\n\n\"\"\"\nA fully-connected ReLU network with one hidden layer, trained to predict y from x\nby minimizing squared Euclidean distance.\n\nThis implementation uses the nn package from PyTorch to build the network.\nPyTorch autograd makes it easy to define computational graphs and take gradients,\nbut raw autograd can be a bit too low-level for defining complex neural networks;\nthis is where the nn package can help. The nn package defines a set of Modules,\nwhich you can think of as a neural network layer that has produces output from\ninput and may have some trainable weights.\n\"\"\"\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold inputs and outputs, and wrap them in Variables.\nx = Variable(torch.randn(N, D_in))\ny = Variable(torch.randn(N, D_out), requires_grad=False)\n\n# Use the nn package to define our model as a sequence of layers. nn.Sequential\n# is a Module which contains other Modules, and applies them in sequence to\n# produce its output. Each Linear Module computes output from input using a\n# linear function, and holds internal Variables for its weight and bias.\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n )\n\n# The nn package also contains definitions of popular loss functions; in this\n# case we will use Mean Squared Error (MSE) as our loss function.\nloss_fn = torch.nn.MSELoss(size_average=False)\n\nlearning_rate = 1e-4\nfor t in range(500):\n # Forward pass: compute predicted y by passing x to the model. Module objects\n # override the __call__ operator so you can call them like functions. When\n # doing so you pass a Variable of input data to the Module and it produces\n # a Variable of output data.\n y_pred = model(x)\n\n # Compute and print loss. We pass Variables containing the predicted and true\n # values of y, and the loss function returns a Variable containing the loss.\n loss = loss_fn(y_pred, y)\n print(t, loss.data[0])\n \n # Zero the gradients before running the backward pass.\n model.zero_grad()\n\n # Backward pass: compute gradient of the loss with respect to all the learnable\n # parameters of the model. Internally, the parameters of each Module are stored\n # in Variables with requires_grad=True, so this call will compute gradients for\n # all learnable parameters in the model.\n loss.backward()\n\n # Update the weights using gradient descent. Each parameter is a Variable, so\n # we can access its data and gradients like we did before.\n for param in model.parameters():\n param.data -= learning_rate * param.grad.data\n" ]
[ [ "torch.randn", "torch.nn.ReLU", "torch.nn.Linear", "torch.nn.MSELoss" ] ]
daili0015/ModelFeast
[ "03afca0b129532135910ee2ac72a3b85be795289" ]
[ "models/StereoCNN/Resnet_module.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: zcy\n# @Date: 2019-02-14 19:29:27\n# @Last Modified by: zcy\n# @Last Modified time: 2019-02-15 15:06:31\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom functools import partial\n\n__all__ = ['ResNet', 'BasicBlock', 'Bottleneck']\n\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n\n\ndef downsample_basic_block(x, planes, stride):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.Tensor(\n out.size(0), planes - out.size(1), out.size(2), out.size(3),\n out.size(4)).zero_()\n if isinstance(out.data, torch.cuda.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = torch.cat([out.data, zero_pads], dim=1)\n\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3x3(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n self.conv2 = nn.Conv3d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self,\n block,\n layers,\n shortcut_type='B',\n n_classes=400,\n in_channels=3):\n \n super(ResNet, self).__init__()\n\n first_features = 64 if in_channels==3 else 32\n self.inplanes = first_features\n self.conv1 = nn.Conv3d(\n in_channels,\n first_features,\n kernel_size=7,\n stride=(1, 2, 2),\n padding=(3, 3, 3),\n bias=False)\n self.bn1 = nn.BatchNorm3d(first_features)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(block, first_features, layers[0], shortcut_type)\n self.layer2 = self._make_layer(\n block, 128, layers[1], shortcut_type, stride=2)\n self.layer3 = self._make_layer(\n block, 256, layers[2], shortcut_type, stride=2)\n self.layer4 = self._make_layer(\n block, 512, layers[3], shortcut_type, stride=2)\n # last_duration = int(math.ceil(sample_duration / 16))\n # last_size = int(math.ceil(sample_size / 32))\n # self.avgpool = nn.AvgPool3d(\n # (last_duration, last_size, last_size), stride=1)\n self.fc = nn.Linear(512 * block.expansion, n_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(\n downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n # x = self.avgpool(x)\n x = F.adaptive_avg_pool3d(x, (1, 1, 1))\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\n\n\nif __name__ == '__main__':\n a = 64\n img_size=(a, a)\n model = resnet10(n_classes=2, in_channels=1)\n x = torch.randn(3, 1, 22, img_size[0], img_size[1])\n # (BatchSize, channels, depth, h, w)\n y = model(x)\n print(y.size())\n\n" ]
[ [ "torch.nn.init.kaiming_normal_", "torch.nn.MaxPool3d", "torch.nn.BatchNorm3d", "torch.nn.Linear", "torch.randn", "torch.nn.functional.adaptive_avg_pool3d", "torch.nn.ReLU", "torch.nn.Sequential", "torch.nn.functional.avg_pool3d", "torch.cat", "torch.nn.Conv3d" ] ]
Johnson-yue/stylegan2encoder
[ "709ccb52fe9a1b4dfdc367f0390cf419f2c3e972" ]
[ "encoder/generator_model.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport dnnlib.tflib as tflib\nfrom functools import partial\n\n\ndef create_stub(name, batch_size):\n return tf.constant(0, dtype='float32', shape=(batch_size, 0))\n\n\ndef create_variable_for_generator(name, batch_size):\n return tf.get_variable('learnable_dlatents',\n shape=(batch_size, 18, 512),\n dtype='float32',\n initializer=tf.initializers.random_normal())\n\n\nclass Generator:\n def __init__(self, model, batch_size, randomize_noise=False):\n self.batch_size = batch_size\n\n self.initial_dlatents = np.zeros((self.batch_size, 18, 512))\n model.components.synthesis.run(self.initial_dlatents,\n randomize_noise=randomize_noise, minibatch_size=self.batch_size,\n custom_inputs=[partial(create_variable_for_generator, batch_size=batch_size),\n partial(create_stub, batch_size=batch_size)],\n structure='fixed')\n\n self.sess = tf.get_default_session()\n self.graph = tf.get_default_graph()\n\n self.dlatent_variable = next(v for v in tf.global_variables() if 'learnable_dlatents' in v.name)\n self.set_dlatents(self.initial_dlatents)\n\n self.generator_output = self.graph.get_tensor_by_name('G_synthesis_1/_Run/concat/concat:0')\n self.generated_image = tflib.convert_images_to_uint8(self.generator_output, nchw_to_nhwc=True, uint8_cast=False)\n self.generated_image_uint8 = tf.saturate_cast(self.generated_image, tf.uint8)\n\n def reset_dlatents(self):\n self.set_dlatents(self.initial_dlatents)\n\n def set_dlatents(self, dlatents):\n assert (dlatents.shape == (self.batch_size, 18, 512))\n self.sess.run(tf.assign(self.dlatent_variable, dlatents))\n\n def get_dlatents(self):\n return self.sess.run(self.dlatent_variable)\n\n def generate_images(self, dlatents=None):\n if dlatents:\n self.set_dlatents(dlatents)\n return self.sess.run(self.generated_image_uint8)\n" ]
[ [ "numpy.zeros", "tensorflow.get_default_session", "tensorflow.global_variables", "tensorflow.initializers.random_normal", "tensorflow.saturate_cast", "tensorflow.assign", "tensorflow.get_default_graph", "tensorflow.constant" ] ]
Ciaran-Carroll/college
[ "46052aa177280f7900e04e0e828247d7097eb07b" ]
[ "Project/Project 2/Harris_Corner_Detection.py" ]
[ "'''\n25th April 2018\nGroup Members: Kevin Burke (14155893)\n\t\t\t\tPaul Lynch (16123778)\n\t\t\t\tCiaran Carroll (13113259)\n Qicong Zhang (16069978)\n\n\nProject 2:\nResearch and Implement Harris Corner Detection using Python/Numpy Investigating\nthe behaviour of the algorithm.\n\nAims:\n - Find corners in each image using Harris Corner detection algorithm\n - Generate a normalised patch descriptor vector for all points\n - Find the matching points using Normalised Cross Correlation\n - Use an Exhaustive RANSAC technique to find the best fit translation between the two images\n - Plot the translated images to generate a composite 'stitched' image.\n'''\n\nfrom pylab import *\nimport numpy as np\nfrom scipy.ndimage import filters\nfrom matplotlib import pylab as plt\nfrom PIL import Image\n\ndef compute_harris_response(image, sigma = 2):\n ''' Compute the Harris corner detector algorithm for each pixel\n in a gray level image. '''\n \n # Gaussian Kernels (sigma=1) for image\n imagex = np.zeros(image.shape)\n filters.gaussian_filter(image, (1,1), (0,1), imagex)\n imagey = np.zeros(image.shape)\n filters.gaussian_filter(image, (1,1), (1,0), imagey)\n \n # Compute components of the Harris matrix (using sigma=2)\n A = filters.gaussian_filter(imagex*imagex, sigma) #A = G * Ix^2\n B = filters.gaussian_filter(imagex*imagey, sigma) #B = G * IxIy\n C = filters.gaussian_filter(imagey*imagey, sigma) #C = G * Iy^2\n \n #Components of Matrix [(A,B),(B,C)]\n \n # Determinant and trace\n Det_M = A*C - B**2\n Tr_M = A + C\n\n R = Det_M / (Tr_M * (10**(-6)))\n \n return R\n \n \ndef get_harris_points(harris_im, min_d =10, threshold=0.1): \n ''' Returns corners from a Harris response image. min_d is the minimum number of \n pixels seperating corners and image boundary. '''\n \n # Find top corne candidates above a threshold\n corner_threshold = harris_im.max() * threshold\n harrisim_t = (harris_im > corner_threshold) * 1\n \n # Find coordinates of these candidates, and their responsevalues\n coords = array(harrisim_t.nonzero()).T\n candidate_values = [harris_im[c[0],c[1]] for c in coords]\n \n # Find the indices into the 'candidate_values' array that sort in order\n # of increasing response strength\n indices = argsort(candidate_values)[::-1]\n \n # store allowed point locations in array that sort\n allowed_locations = zeros(harris_im.shape)\n allowed_locations[min_d:-min_d,min_d:-min_d] = 1\n \n # select the best points using min_distance\n filtered_coords = []\n for i in indices:\n if allowed_locations[coords[i,0], coords[i,1]] == 1:\n filtered_coords.append(coords[i])\n allowed_locations[(coords[i,0]-min_d):(coords[i,0]+min_d),\n (coords[i,1]-min_d):(coords[i,1]+min_d)] = 0\n \n return filtered_coords\n \n \ndef plot_harris_points(image,filtered_coords):\n ''' Plots corners found in image '''\n plt.figure()\n plt.gray()\n plt.imshow(image)\n plt.plot([p[1] for p in filtered_coords],\n [p[0] for p in filtered_coords],'.')\n plt.axis('off')\n plt.show()\n \n\ndef get_descriptors(image,filtered_coords,wid=5): #return pixel value\n ''' For each point return pixels values around the point using neighbourhood\n of width 2_wid+1 '''\n \n desc = []\n for coords in filtered_coords:\n patch = image[coords[0]-wid:coords[0]+wid+1,\n coords[1]-wid:coords[1]+wid+1].flatten()\n desc.append(patch)\n return desc\n\n\ndef match(desc1, desc2, threshold=0.95): \n ''' For each of the corner descriptor in image 1, select its match\n to the image 2 using normaliazed cross correlation. '''\n\n n = len(desc1[0])\n \n # pair-wise distances\n d = -np.ones((len(desc1),len(desc2)))\n for i in range(len(desc1)):\n for j in range(len(desc2)):\n d1 = (desc1[i] - np.mean(desc1[i])) / np.std(desc1[i])\n d2 = (desc2[j] - np.mean(desc2[j])) / np.std(desc2[j])\n ncc_value = np.sum(d1 * d2) / (n-1)\n if ncc_value > threshold: #Use 0.95 threshold for better matching\n d[i,j] = ncc_value\n \n ndx = np.argsort(-d)\n matchscores = ndx[:,0]\n \n return matchscores\n\n\ndef match_twosided(desc1, desc2, threshold=0.95): #Same as above, two sided symmetric version\n ''' Two-sided symmetric version of match(). '''\n matches_12 = match(desc1,desc2,threshold)\n matches_21 = match(desc2,desc1,threshold)\n \n ndx_12 = np.where(matches_12 >= 0)[0]\n \n # Remove matches that are not symmetric\n for n in ndx_12:\n if matches_21[matches_12[n]] != n:\n matches_12[n] = -1 \n \n return matches_12\n\n\ndef appendimages(image1, image2):\n ''' Return a new image that appends the two images side-by-side. '''\n \n # Select the image with the fewest rows and fill in enough empty rows\n rows1 = image1.shape[0]\n rows2 = image2.shape[0]\n \n if rows1 < rows2:\n im1 = np.concatenate((image1, np.zeros((rows2-rows1,image1.shape[1]))), axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((image2, np.zeros((rows1-rows2,image2.shape[1]))), axis=0)\n # If none of these cases are true, no filling needed \n \n \n return np.concatenate((image1, image2), axis=1)\n \n \ndef plot_matches(image1, image2, locations1, locations2, matchscores, show_below=True):\n ''' Show a figure with lines joining the accepted matches '''\n image3 = appendimages(image1, image2)\n if show_below:\n image3 = np.vstack((image3, image3))\n \n plt.imshow(image3)\n \n cols1 = image1.shape[1]\n for i,m in enumerate(matchscores):\n if m>0:\n plt.plot([locations1[i][1],locations2[m][1]+cols1],[locations1[i][0],locations2[m][0]],'c')\n plt.axis('off')\n\n#Take in both images, in grayscale and rgb\nimage1_gray = np.array(Image.open('balloon1.png').convert('L'))\nimage1_rgb = np.array(Image.open('balloon1.png'))\n\nimage2_gray = np.array(Image.open('balloon2.png').convert('L'))\nimage2_rgb = np.array(Image.open('balloon2.png'))\n\n#Find Harris Response, R, and get an array of points, plot these points \n#for both images\nharris_image1 = compute_harris_response(image1_gray)\nfiltered_coords1 = get_harris_points(harris_image1, 6)\nprint('No. of Harris Interest points in Image 1: ', len(filtered_coords1))\nplot_harris_points(image1_rgb, filtered_coords1)\n\nharris_image2 = compute_harris_response(image2_gray)\nfiltered_coords2 = get_harris_points(harris_image2, 6)\nprint('No. of Harris Interest points in Image 2: ', len(filtered_coords1))\nplot_harris_points(image2_rgb, filtered_coords2)\n\nplt.imshow(harris_image1)\nplt.show()\nplt.imshow(harris_image2)\nplt.show()\n\nwid = 5\nharris_image3 = compute_harris_response(image1_gray, 5)\nfiltered_coords3 = get_harris_points(harris_image3, wid+1)\nd1 = get_descriptors(image1_gray, filtered_coords3, wid)\n\nharris_image4 = compute_harris_response(image2_gray, 5)\nfiltered_coords4 = get_harris_points(harris_image4 ,wid+1)\nd2 = get_descriptors(image2_gray, filtered_coords4,wid)\n\n#Find matches between the two sets of descriptors in the image pair\nmatches = match_twosided(d1,d2)\n\n#Show the matches side by side\nplt.figure()\nplt.gray()\nplot_matches(image1_rgb, image2_rgb, filtered_coords3, filtered_coords4,matches)\nplt.show()\n\n" ]
[ [ "numpy.vstack", "numpy.sum", "numpy.zeros", "matplotlib.pylab.figure", "numpy.argsort", "matplotlib.pylab.show", "numpy.std", "numpy.where", "matplotlib.pylab.axis", "matplotlib.pylab.imshow", "scipy.ndimage.filters.gaussian_filter", "numpy.concatenate", "matplotlib.pylab.plot", "matplotlib.pylab.gray", "numpy.mean" ] ]
Daipuwei/Introduction-to-Machine-Learning-Based-on-Mathematical-Principles-with-Python
[ "625675ce514e461ce74cf30586d241cbcb1e4848" ]
[ "Chapter4/Other.py" ]
[ "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2018/11/4 18:48\r\n# @Author : DaiPuWei\r\n# E-Mail : [email protected]\r\n# blog : https://blog.csdn.net/qq_30091945\r\n# @Site : 中国民航大学北教25实验室506\r\n# @File : Other.py\r\n# @Software: PyCharm\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndef Merge(data,row,col):\r\n \"\"\"\r\n 这是生成DataFrame数据的函数\r\n :param data: 数据,格式为列表(list),不是numpy.array\r\n :param row: 行名称\r\n :param col: 列名称\r\n \"\"\"\r\n data = np.array(data).T\r\n return pd.DataFrame(data=data,columns=col,index=row)\r\n\r\ndef Confusion_Matrix_Merge(confusion_matrix,name):\r\n \"\"\"\r\n 这是将混淆矩阵转化为DataFrame数据的函数\r\n :param confusion_matrix: 混淆矩阵\r\n :param name: 分类名称\r\n \"\"\"\r\n return pd.DataFrame(data=confusion_matrix,index=name,columns=name)\r\n\r\ndef confusion_matrix(real_result,predict_result):\r\n \"\"\"\r\n 这是计算预测结果的混淆矩阵的函数\r\n :param real_result: 真实分类结果\r\n :param predict_result: 预测分类结果\r\n \"\"\"\r\n labels = []\r\n for result in real_result:\r\n if result not in labels:\r\n labels.append(result)\r\n labels = np.sort(labels)\r\n # 计算混淆矩阵\r\n confusion_matrix = []\r\n for label1 in labels:\r\n # 真实结果中为label1的数据下标\r\n index = real_result == label1\r\n _confusion_matrix = []\r\n for label2 in labels:\r\n _predict_result = predict_result[index]\r\n _confusion_matrix.append(np.sum(_predict_result == label2))\r\n confusion_matrix.append(_confusion_matrix)\r\n confusion_matrix = np.array(confusion_matrix)\r\n return confusion_matrix\r\n\r\ndef Transform(Label):\r\n \"\"\"\r\n 这是将one-hot编码标签转化为数值标签的函数\r\n :param Label: one-hot标签\r\n \"\"\"\r\n _Label = []\r\n for label in Label:\r\n _Label.append(np.argmax(label))\r\n return np.array(_Label)" ]
[ [ "numpy.sum", "pandas.DataFrame", "numpy.argmax", "numpy.sort", "numpy.array" ] ]
retwal/Predictive
[ "57c3cb64901b7a0629b70053ecf01dac5be66d6f" ]
[ "Chapter11/contextual_bandit_agent.py" ]
[ "import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport os\n\nfrom tensorflow.python.framework import ops\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nops.reset_default_graph()\n\nclass contextualBandit():\n def __init__(self):\n '''\n This constructor, lists out all of our bandits. We assume the current state being arms 4, 2, 3 and 1 that are the most optimal respectively\n '''\n self.state = 0 \n self.bandits = np.array([[0.2,0,-0.0,-5], [0.1,-5,1,0.25], [0.3,0.4,-5,0.5], [-5,5,5,5]])\n self.num_bandits = self.bandits.shape[0]\n self.num_actions = self.bandits.shape[1]\n \n def getBandit(self): \n '''\n This function returns a random state for each episode.\n '''\n self.state = np.random.randint(0, len(self.bandits)) \n return self.state\n \n def pullArm(self,action): \n '''\n This funciton creates the reword to the bandits on the basis of randomly generated numbers. It then returns either a positive or negative reward -i.e. action\n ''' \n bandit = self.bandits[self.state, action]\n result = np.random.randn(1)\n if result > bandit:\n return 1\n else:\n return -1\n\nclass ContextualAgent():\n def __init__(self, lr, s_size,a_size):\n '''\n This function establishes the feed-forward part of the network. The agent takes a state and produces an action -i.e. contextual agent\n ''' \n self.state_in= tf.placeholder(shape=[1], dtype=tf.int32)\n state_in_OH = slim.one_hot_encoding(self.state_in, s_size)\n output = slim.fully_connected(state_in_OH, a_size,biases_initializer=None, activation_fn=tf.nn.sigmoid, weights_initializer=tf.ones_initializer())\n self.output = tf.reshape(output,[-1])\n self.chosen_action = tf.argmax(self.output,0)\n self.reward_holder = tf.placeholder(shape=[1], dtype=tf.float32)\n self.action_holder = tf.placeholder(shape=[1], dtype=tf.int32)\n self.responsible_weight = tf.slice(self.output, self.action_holder,[1])\n self.loss = -(tf.log(self.responsible_weight)*self.reward_holder)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)\n self.update = optimizer.minimize(self.loss)\n\ntf.reset_default_graph() #Clear the Tensorflow graph.\nlrarning_rate = 0.001\ncontextualBandit = contextualBandit() #Load the bandits.\ncontextualAgent = ContextualAgent(lr=lrarning_rate, s_size=contextualBandit.num_bandits, a_size=contextualBandit.num_actions) #Load the agent.\nweights = tf.trainable_variables()[0] #The weights we will evaluate to look into the network.\n\nmax_iteration = 10000 #Set the max iteration for training the agent.\ntotal_reward = np.zeros([contextualBandit.num_bandits,contextualBandit.num_actions]) #Set scoreboard for bandits to 0.\nchance_of_random_action = 0.1 #Set the chance of taking a random action.\n\ninit_op = tf.global_variables_initializer()\nright_flag = 0\nwrong_flag = 0\n\n# Launch the tensorflow graph\nwith tf.Session() as sess:\n sess.run(init_op)\n i = 0\n while i < max_iteration:\n s = contextualBandit.getBandit() #Get a state from the environment.\n #Choose either a random action or one from our network.\n if np.random.rand(1) < chance_of_random_action:\n action = np.random.randint(contextualBandit.num_actions)\n else:\n action = sess.run(contextualAgent.chosen_action,feed_dict={contextualAgent.state_in:[s]})\n reward = contextualBandit.pullArm(action) #Get our reward for taking an action given a bandit.\n #Update the network.\n feed_dict={contextualAgent.reward_holder:[reward],contextualAgent.action_holder:[action],contextualAgent.state_in:[s]}\n _,ww = sess.run([contextualAgent.update,weights], feed_dict=feed_dict) \n #Update our running tally of scores.\n total_reward[s,action] += reward\n if i % 500 == 0:\n print(\"Mean reward for each of the \" + str(contextualBandit.num_bandits) + \" bandits: \" + str(np.mean(total_reward,axis=1)))\n i+=1\n\nfor a in range(contextualBandit.num_bandits):\n print(\"The agent thinks action \" + str(np.argmax(ww[a])+1) + \" for bandit \" + str(a+1) + \" would be the most efficient one.\")\n if np.argmax(ww[a]) == np.argmin(contextualBandit.bandits[a]):\n right_flag += 1\n print(\" and it was right at the end!\")\n else:\n print(\" and it was wrong at the end!\")\n wrong_flag += 1\n\nprediction_accuracy = (right_flag/(right_flag+wrong_flag))\nprint(\"Prediction accuracy (%):\", prediction_accuracy * 100)\n" ]
[ [ "tensorflow.reshape", "tensorflow.slice", "tensorflow.contrib.slim.one_hot_encoding", "tensorflow.global_variables_initializer", "numpy.argmin", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.ones_initializer", "numpy.random.rand", "numpy.mean", "numpy.zeros", "numpy.argmax", "tensorflow.Session", "tensorflow.reset_default_graph", "tensorflow.placeholder", "numpy.random.randn", "tensorflow.trainable_variables", "tensorflow.train.GradientDescentOptimizer", "tensorflow.argmax", "numpy.array", "tensorflow.log", "numpy.random.randint" ] ]
openwfm/wrfxpy
[ "7f7feba97baa6cd85134185520559028d2b5464e" ]
[ "src/fmda/fuel_moisture_da.py" ]
[ "# Copyright (C) 2013-2016 Martin Vejmelka, UC Denver\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR\n# A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom wrf.wrf_data import WRFModelData\nfrom .trend_surface_model import fit_tsm\nfrom utils import ensure_dir, great_circle_distance, find_closest_grid_point\nfrom .fuel_moisture_model import FuelMoistureModel\nfrom .fm10_observation import FM10Observation\n\ntry:\n from ingest.MesoDB.mesoDB import mesoDB\nexcept:\n pass\n\nimport sys\nimport os\nimport os.path as osp\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport pytz\nimport netCDF4\nimport logging\nimport json\nfrom MesoPy import Meso,MesoPyError\nfrom utils import inq\nfrom six.moves import range\nfrom six.moves import zip\n\ndef check_overlap(wrf_path,ts_now):\n \"\"\"\n Check if the WRF file <wrf_path> timstamps contain <ts_now>.\n \"\"\"\n wrfout = WRFModelData(wrf_path)\n outts = wrfout['GMT']\n if ts_now in outts:\n return True\n else:\n logging.info(\"FMDA previous forecast [%s - %s] exists, running DA till %s\" % (str(outts[0]),str(outts[-1]),str(ts_now)))\n return False\n\n\ndef retrieve_mesowest_observations(meso_token, tm_start, tm_end, glat, glon, ghgt):\n \"\"\"\n Retrieve observation data from Mesowest and repackage them as a time-indexed\n dictionary of lists of observations. \n :param meso_token: the mesowest API access token or list of them\n :param tm_start: the start of the observation window\n :param tm_end: the end of the observation window\n :param glat: the lattitudes of the grid points\n :param glon: the longitudes of the grid points\n :param ghgt: the elevation of the grid points\n \"\"\"\n def decode_meso_time(t):\n # example: '2016-03-30T00:30:00Z'\n return datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=pytz.UTC)\n\n def meso_time(dt):\n # example: 201603311600\n return '%04d%02d%02d%02d%02d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute)\n \n # the bbox for mesowest is: (min(lon), min(lat), max(lon), max(lat)).\n min_lat, max_lat = np.amin(glat), np.amax(glat)\n min_lon, max_lon = np.amin(glon), np.amax(glon)\n\n # retrieve data from Mesonet API (http://api.mesowest.net/)\n try:\n logging.info('retrieve_mesowest_observations: retrieving data using MesoDB')\n db = mesoDB('ingest/MesoDB', meso_token)\n db.update['startTime'] = tm_start - timedelta(minutes=30)\n db.update['endTime'] = tm_end + timedelta(minutes=30)\n db.params['startTime'] = tm_start - timedelta(minutes=30)\n db.params['endTime'] = tm_end + timedelta(minutes=30)\n db.params['longitude1'] = min_lon\n db.params['longitude2'] = max_lon\n db.params['latitude1'] = min_lat\n db.params['latitude2'] = max_lat\n df = db.get_DB().dropna(subset=['fm10'])\n st = db.sites()\n if not len(df):\n logging.info('retrieve_mesowest_observations: no data for the query specified')\n return {}\n logging.info('retrieve_mesowest_observations: re-packaging the observations')\n obs_data = {}\n for stid,data in df.groupby('STID'):\n if len(data):\n st_data = st.loc[stid]\n st_lat, st_lon = float(st_data['LATITUDE']), float(st_data['LONGITUDE'])\n ngp = find_closest_grid_point(st_lon, st_lat, glon, glat)\n st_elev = st_data['ELEVATION']\n if st_elev is None:\n elev = ghgt[ngp] \n else:\n elev = float(st_data['ELEVATION']) / 3.2808\n dts = data.datetime.dt.to_pydatetime() \n fms = np.array(data.fm10) \n for ts,fm_obs in zip(dts,fms):\n if fm_obs is not None:\n o = FM10Observation(ts,st_lat,st_lon,elev,float(fm_obs)/100.,ngp)\n obs_t = obs_data.get(ts, [])\n obs_t.append(o)\n obs_data[ts] = obs_t\n return obs_data\n except:\n logging.warning('retrieve_mesowest_observations: failed with exception {}')\n logging.info('retrieve_mesowest_observations: retrieving data using Meso instead')\n if isinstance(meso_token,str):\n meso_tokens = [meso_token]\n n_tokens = 1\n else:\n meso_tokens = meso_token\n n_tokens = len(meso_tokens)\n for tn,meso_token in enumerate(meso_tokens):\n m = Meso(meso_token)\n logging.info(\"Retrieving fuel moisture from %s to %s\" % (meso_time(tm_start - timedelta(minutes=30)),\n meso_time(tm_end + timedelta(minutes=30))))\n logging.info(\"bbox=' %g,%g,%g,%g'\" % (min_lon, min_lat, max_lon, max_lat))\n try:\n meso_obss = m.timeseries(meso_time(tm_start - timedelta(minutes=30)),\n meso_time(tm_end + timedelta(minutes=30)),\n showemptystations = '0', bbox='%g,%g,%g,%g' % (min_lon, min_lat, max_lon, max_lat),\n vars='fuel_moisture')\n break\n except Exception as e:\n if tn == n_tokens-1: \n raise MesoPyError('Could not connect to the API. Probably the token(s) usage for this month is full.') \n else:\n logging.warning('Could not connect to the API. Probably the token usage for this month is full. Trying next token...')\n\n if meso_obss is None:\n logging.info('retrieve_mesowest_observations: Meso.timeseries returned None')\n return {}\n \n logging.info('retrieve_mesowest_observations: re-packaging the observations')\n # repackage all the observations into a time-indexed structure which groups\n # observations at the same time together\n obs_data = {}\n for stinfo in meso_obss['STATION']:\n st_lat, st_lon = float(stinfo['LATITUDE']), float(stinfo['LONGITUDE'])\n ngp = find_closest_grid_point(st_lon, st_lat, glon, glat)\n st_elev = stinfo['ELEVATION']\n if st_elev is None:\n elev = ghgt[ngp] \n else:\n elev = float(stinfo['ELEVATION']) / 3.2808\n dts = [decode_meso_time(x) for x in stinfo['OBSERVATIONS']['date_time']]\n if 'fuel_moisture_set_1' in stinfo['OBSERVATIONS']:\n fms = stinfo['OBSERVATIONS']['fuel_moisture_set_1']\n for ts,fm_obs in zip(dts,fms):\n if fm_obs is not None:\n o = FM10Observation(ts,st_lat,st_lon,elev,float(fm_obs)/100.,ngp)\n obs_t = obs_data.get(ts, [])\n obs_t.append(o)\n obs_data[ts] = obs_t\n\n return obs_data\n\n\ndef execute_da_step(model, model_time, covariates, covariates_names, fm10):\n \"\"\"\n Execute a single DA step from the current state/extended parameters and covariance matrix using\n the <covariates> and observations <fm10>. Assimilation time window is fixed at 60 mins.\n\n :param model: a FuelMoistureModel\n :param model_time: the current model time\n :param covariates: the covariate fields to take into account to model the spatial structure of the FM field\n :param covariates_names: strings with the names of the covariates\n :param fm10: the 10-hr fuel moisture observations\n \"\"\"\n valid_times = [z for z in fm10.keys() if abs((z - model_time).total_seconds()) < 1800]\n\n if len(valid_times) > 0:\n\n # retrieve all observations for current time\n obs_valid_now = []\n for z in valid_times:\n obs_valid_now.extend(fm10[z])\n\n logging.info('FMDA found %d valid observations at model time %s' % (len(obs_valid_now), str(model_time)))\n\n fmc_gc = model.get_state()\n dom_shape = fmc_gc.shape[:2]\n\n logging.info('execute_da_step: model state fmc_gc %s' % inq(fmc_gc))\n # construct covariate storage\n Xd3 = min(len(covariates) + 1, len(obs_valid_now))\n logging.info('FMDA is using %d covariates: %s' % (Xd3,','.join(['fmc_gc[:,:,1]']+covariates_names)))\n X = np.zeros((dom_shape[0], dom_shape[1], Xd3))\n X[:,:,0] = fmc_gc[:,:,1]\n for i,c in zip(list(range(Xd3-1)),covariates):\n X[:,:,i+1] = covariates[i]\n\n # run the trend surface model (clamp output to [0.0 - 2.5] to be safe)\n Kf_fn, Vf_fn = fit_tsm(obs_valid_now, X)\n Kf_fn[Kf_fn < 0.0] = 0.0\n Kf_fn[Kf_fn > 2.5] = 2.5\n\n Kg = np.zeros((dom_shape[0], dom_shape[1], fmc_gc.shape[2]))\n\n # run the data assimilation step now\n logging.info(\"FMDA mean Kf: %g Vf: %g state[0]: %g state[1]: %g state[2]: %g\" %\n (np.mean(Kf_fn), np.mean(Vf_fn), np.mean(fmc_gc[:,:,0]), np.mean(fmc_gc[:,:,1]), np.mean(fmc_gc[:,:,2])))\n model.kalman_update_single2(Kf_fn[:,:,np.newaxis], Vf_fn[:,:,np.newaxis,np.newaxis], 1, Kg)\n logging.info(\"FMDA mean Kf: %g Vf: %g state[0]: %g state[1]: %g state[2]: %g\" %\n (np.mean(Kf_fn), np.mean(Vf_fn), np.mean(fmc_gc[:,:,0]), np.mean(fmc_gc[:,:,1]), np.mean(fmc_gc[:,:,2])))\n else:\n logging.warning('FMDA no valid observations found, skipping data assimilation.')\n \n\ndef run_data_assimilation(wrf_model, fm10, wrf_model_prev = None):\n \"\"\"\n Run the fuel moisture and DA for all time steps in the model wrf_model.\n If a previous run is available, the fuel moisture values (and covariance if available)\n are transferred.\n\n :param wrf_model: the current WRF data file to process (wrf input or wrf output)\n :param fm10: a list of the observations of 10-hr fuel moisture available\n :param wrf_model_prev: optional, the previous WRF data file from which fm state may be copied\n :return: the fuel moisture model with the assimilated fields\n \"\"\"\n tss = wrf_model.get_gmt_times()\n lat, lon = wrf_model.get_lats(), wrf_model.get_lons()\n dom_shape = lat.shape\n T2 = wrf_model['T2']\n Q2 = wrf_model['Q2']\n PSFC = wrf_model['PSFC']\n hgt = wrf_model['HGT']\n rain = wrf_model['RAIN']\n rain = np.log(rain + 1.0)\n constant = np.ones_like(T2)\n Ed,Ew = wrf_model['Ed'], wrf_model['Ew']\n E = 0.5 * (Ed[0,:,:] + Ew[0,:,:])\n \n P0 = np.diag([0.01,0.01,0.01,0.01,0.001,0.001])\n Tk = np.array([1.0, 10.0, 100.0, 1000.0]) * 3600\n Q = np.diag([1e-4,5e-5,1e-5,1e-6,1e-6,1e-6])\n\n # initialize the grid moisture model with the fuel moisture equilibrium\n model = FuelMoistureModel(E[:,:,np.newaxis][:,:,np.zeros((4,),dtype=np.int)], Tk, P0)\n\n # if a previous fuel moisture model run is available, copy it's state\n if wrf_model_prev is not None:\n logging.info('FMDA replacing fuel moisture equilibrium with previous calculation from %s.' % wrf_model_prev.path)\n prev_tss = wrf_model_prev.get_gmt_times()\n if tss[0] in prev_tss:\n prev_ndx = prev_tss.index(tss[0])\n model.get_state()[:,:,:3] = wrf_model_prev['FMC_GC'][prev_ndx,:3,:,:].transpose((1,2,0))\n model.get_state()[:,:,3:5] = wrf_model_prev['FMEP'][prev_ndx,:,:,:].transpose((1,2,0))\n \n # precompute static covariates (we assume domains don't move around)\n cov_lon = lon - np.mean(lon)\n cov_lat = lat - np.mean(lat)\n cov_hgt = hgt / 1000.0\n cov_const = np.ones(dom_shape)\n\n # advance model and run DA for each timestep\n for i, ts in enumerate(tss):\n cov_t2 = T2[i,:,:]\n cov_q2 = Q2[i,:,:]\n cov_psfc = PSFC[i,:,:]\n cov_rain = np.log(rain[i,:,:] + 1.0)\n covariates = [cov_t2, cov_psfc,cov_lon,cov_lat,cov_hgt,cov_t2,cov_q2,cov_const]\n covariates_names = ['t2', 'psfc','lon','lat','hgt','t2','q2','const']\n if np.any(rain > 0.0):\n covariates.append(rain)\n covariates_names.append('rain')\n\n if i > 0:\n model.advance_model(Ed[i,:,:], Ew[i,:,:], rain[i,:,:], (ts - tss[i-1]).seconds, Q)\n\n logging.info('FMDA calling execute_da_step with %d covariates' % len(covariates))\n execute_da_step(model, ts, covariates, covariates_names, fm10)\n\n # overwrite the WRF model variables for this time step\n d = netCDF4.Dataset(wrf_model.path, 'r+')\n d.variables['FMC_GC'][i,:3,:,:] = model.get_state()[:,:,:3].transpose(2,0,1)\n d.variables['FMEP'][i,:,:,:] = model.get_state()[:,:,4:6].transpose(2,0,1)\n d.close()\n\n\ndef assimilate_fm10_observations(path_wrf, path_wrf0, mesowest_token):\n \n # load the wrfinput file\n logging.info('assimilate_fm10_observations: loading %s' % path_wrf)\n wrfin = WRFModelData(path_wrf, ['T2', 'Q2', 'PSFC', 'HGT', 'FMC_GC', 'FMEP'])\n lat, lon = wrfin.get_lats(), wrfin.get_lons()\n tss = wrfin.get_gmt_times()\n tm_start, tm_end = tss[0], tss[-1]\n dom_shape = lat.shape\n logging.info('FMDA domain size is %d x %d grid points with lats (%g to %g) and lons (%g to %g)' %\n (dom_shape[0], dom_shape[1],np.amin(lat),np.amax(lat),np.amin(lon),np.amax(lon)))\n\n # compute the diagonal distance between grid points\n grid_dist_km = great_circle_distance(lon[0,0], lat[0,0], lon[1,1], lat[1,1])\n \n # retrieve fuel moisture observations via the Mesowest API\n fm10 = retrieve_mesowest_observations(mesowest_token, tm_start, tm_end, wrfin.get_lats(), wrfin.get_lons(), wrfin.get_field('HGT'))\n\n logging.info('FMDA retrieved %d observations from Mesowest.' % len(fm10))\n\n # if a previous cycle is available (i.e. the wrfoutput is a valid file), load the model\n prev_wrf = None\n if path_wrf0 is not None and os.path.exists(path_wrf0) and check_overlap(path_wrf0,tm_start):\n prev_wrf = WRFModelData(path_wrf0)\n outts = prev_wrf['GMT']\n logging.info(\"FMDA previous forecast [%s - %s] exists\" % (str(outts[0]),str(outts[-1])))\n else:\n logging.info(\"FMDA no previous forecast found, running DA from equilibrium at %s\" % str(tm_start))\n \n # run from the start until now (retrieve fuel moisture, extended parameters, covariance matrix)\n run_data_assimilation(wrfin, fm10, prev_wrf)\n\n return 0\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 2:\n print(('usage: %s <wrf-file>' % sys.argv[0]))\n sys.exit(1)\n\n logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\n #retrieve our mesowest token from the store\n tokens = json.load(open('etc/tokens.json'))\n mesowest_token = tokens['mesowest']\n\n logging.info('FMDA opening %s for assimilation' % sys.argv[1])\n assimilate_fm10_observations(sys.argv[1], None, mesowest_token)\n sys.exit(0)\n\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.diag", "numpy.any", "numpy.ones_like", "numpy.amin", "numpy.log", "numpy.amax", "numpy.array", "numpy.mean" ] ]
aijdissanayake/request-management
[ "a88a2ce35a7a1a98630ffd14c1a31a5173b662c8" ]
[ "backend/src/reporting/views.py" ]
[ "from rest_framework.views import APIView\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom django.http import HttpResponse\nfrom xhtml2pdf import pisa\nimport datetime\nfrom django.db import connection\nimport pandas as pd\nimport requests\nimport json\nfrom django.conf import settings\nimport urllib\nimport os\n\nfrom .services import get_police_division_summary, get_category_summary, \\\n get_mode_summary, get_severity_summary, get_status_summary, get_subcategory_summary, get_district_summary, \\\n get_incident_date_summary, get_slip_data, get_daily_category_data, get_daily_summary_data, get_daily_district_data\nfrom .functions import apply_style, decode_column_names, incident_type_title, incident_type_query\n\n'''\nmiddleware to access PDF-service\n'''\nclass ReportingAccessView(APIView):\n '''\n Based on https://github.com/ECLK/pdf-service\n Generates PDF\n\n GET request with required parameters\n\n Response would be a pdf stream to be opened in a different tab\n '''\n permission_classes = []\n def get(self, request):\n endpoint_uri = settings.PDF_SERVICE_ENDPOINT\n json_dict = {}\n template_type = request.query_params.get('template_type')\n\n if(template_type == \"simple-template\"):\n file_dict = {}\n file_dict['template'] = \"exTemplateBootstrap.js\"\n file_dict['title'] = \"This is my title on test\"\n\n # prepare all data to be on json object 'file'\n json_dict['file'] = file_dict\n\n elif (template_type == \"slip\"):\n '''\n Inquiry Slip\n GET parameters => /?template_type=slip&id=<incident_id>\n '''\n incident_id = request.query_params.get('id')\n json_dict[\"file\"] = get_slip_data(incident_id)\n\n elif (template_type == \"daily_category\"):\n \"\"\"\n daily_summery_report_categorywise\n GET parameters => /?template_type=daily_category\n \"\"\"\n json_dict[\"file\"] = get_daily_category_data()\n\n elif (template_type == \"daily_summary\"):\n \"\"\"\n daily_summary_report_main\n GET parameters => /?template_type=daily_district\n \"\"\"\n json_dict[\"file\"] = get_daily_summary_data()\n\n elif (template_type == \"daily_district\"):\n \"\"\"\n daily_summary_report_districtwise\n GET parameters => /?template_type=daily_district\n \"\"\"\n json_dict[\"file\"] = get_daily_district_data()\n\n\n request_data = json.dumps(json_dict)\n res = requests.post(url=endpoint_uri, data = request_data, headers={'content-type': 'application/json'})\n\n if res.status_code == 200:\n pdf_file = requests.get(res.json()[\"url\"])\n\n response = HttpResponse(content=pdf_file.content, content_type='application/pdf')\n response['Access-Control-Expose-Headers'] = 'Title'\n response['Title'] = 'report_' + datetime.date.today().strftime(\"%Y%m%d%H%M%S\") + \".pdf\"\n\n return response\n else:\n return HttpResponse(status=res.status_code, content=res.text, content_type='application/json')\n\nclass ReportingView(APIView):\n \"\"\"\n Incident Resource\n \"\"\"\n\n def get(self, request, format=None):\n \"\"\"\n Get incident by incident id\n \"\"\"\n param_report = self.request.query_params.get('report', None)\n start_date = self.request.query_params.get('start_date', '')\n end_date = self.request.query_params.get('end_date', '')\n detailed_report = True if self.request.query_params.get('detailed_report', 'false') == 'true' else False\n complain = True if self.request.query_params.get('complain', 'false') == 'true' else False\n inquiry = True if self.request.query_params.get('inquiry', 'false') == 'true' else False\n\n if start_date == '':\n start_date = datetime.date.today().strftime(\"%Y-%m-%d 16:00:00\")\n else:\n start_date = start_date.replace(\"T\", \" \", 1)\n if end_date == '':\n end_date = datetime.date.today().strftime(\"%Y-%m-%d 16:00:00\")\n else:\n end_date = end_date.replace(\"T\", \" \", 1)\n\n if param_report is None or param_report == \"\":\n return Response(\"No report specified\", status=status.HTTP_400_BAD_REQUEST)\n\n table_html = None\n table_title = None\n incident_type_string = incident_type_title(complain, inquiry)\n\n # if param_report == \"police_division_summary_report\":\n # table_html = get_police_division_summary()\n # table_title = \"Police Division Summary Report\"\n\n layout = \"A4 portrait\"\n title = \"\"\"from %s to %s by \"\"\" % (start_date, end_date)\n if param_report == \"category_wise_summary_report\":\n table_html = get_category_summary(start_date, end_date, detailed_report, complain, inquiry)\n if detailed_report:\n table_title = title + \"District and Category\"\n else:\n table_title = title + \"Category\"\n\n elif param_report == \"mode_wise_summary_report\":\n table_html = get_mode_summary(start_date, end_date, detailed_report, complain, inquiry)\n if detailed_report:\n layout = \"A4 landscape\"\n table_title = title + \"District and Mode\"\n else:\n table_title = title + \"Mode\"\n\n elif param_report == \"district_wise_summary_report\":\n table_html = get_district_summary(start_date, end_date, detailed_report, complain, inquiry)\n table_title = title + \"District\"\n\n elif param_report == \"severity_wise_summary_report\":\n table_html = get_severity_summary(start_date, end_date, detailed_report, complain, inquiry)\n if detailed_report:\n table_title = title + \"District and Severity\"\n else:\n table_title = title + \"Severity\"\n\n elif param_report == \"subcategory_wise_summary_report\":\n table_html = get_subcategory_summary(start_date, end_date, detailed_report, complain, inquiry)\n if detailed_report:\n layout = \"A3 landscape\"\n table_title = title + \"District and Subcategory\"\n else:\n table_title = title + \"Subcategory\"\n\n elif param_report == \"incident_date_wise_summary_report\":\n table_html = get_incident_date_summary(start_date, end_date, detailed_report, complain, inquiry)\n table_title = title + \"Incident Date\"\n\n elif param_report == \"status_wise_summary_report\":\n table_html = get_status_summary(start_date, end_date, detailed_report, complain, inquiry)\n if detailed_report:\n table_title = title + \"District and Status\"\n else:\n table_title = title + \"Status\"\n\n if table_html is None:\n return Response(\"Report not found\", status=status.HTTP_400_BAD_REQUEST)\n\n # Prepare report header\n sql3 = incident_type_query(complain, inquiry)\n sql = \"\"\"SELECT\n Count(id) as TotalCount\n FROM incidents_incident WHERE %s\"\"\" % sql3\n dataframe = pd.read_sql_query(sql, connection)\n total_count = dataframe['TotalCount'][0]\n\n table_html = apply_style(\n decode_column_names(table_html)\n .replace(\".0\", \"\", -1)\n .replace(\"(Total No. of Incidents)\",\n \"\"\"<strong>(Total No. of Incidents from %s to %s)</strong>\"\"\" % (start_date, end_date), -1)\n .replace(\"(Unassigned)\", \"<strong>(Unassigned)</strong>\", -1)\n , table_title, incident_type_string, layout, total_count)\n\n response = HttpResponse(content_type='application/pdf')\n response['Access-Control-Expose-Headers'] = 'Title'\n response['Title'] = \"\"\"Incidents reported within the period %s %s %s.pdf\"\"\" % (\n table_title, incident_type_string, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n pisa.CreatePDF(table_html, dest=response)\n return response\n" ]
[ [ "pandas.read_sql_query" ] ]
steven0129/TinyNeuralNetwork
[ "2ffa5a806cade1d1ebd2aa54c5697d3ad131d22f" ]
[ "examples/converter/convert_from_json.py" ]
[ "import argparse\nimport os\n\nimport torch\nfrom tinynn.converter import TFLiteConverter\nfrom tinynn.util.converter_util import export_converter_files, parse_config\n\nCURRENT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\ndef export_files():\n from models.cifar10.mobilenet import DEFAULT_STATE_DICT, Mobilenet\n\n model = Mobilenet()\n model.load_state_dict(torch.load(DEFAULT_STATE_DICT))\n model.cpu()\n model.eval()\n\n dummy_input = torch.rand((1, 3, 224, 224))\n\n export_dir = 'out'\n export_name = 'mbv1_224'\n\n export_converter_files(model, dummy_input, export_dir, export_name)\n\n json_file = os.path.join(CURRENT_PATH, export_dir, f'{export_name}.json')\n return json_file\n\n\ndef main_worker(args):\n json_file = args.path\n if json_file is None:\n json_file = export_files()\n\n # We will try to parse the config and prepare the inputs for you.\n # If you want to use your own inputs, just assign it to `generated_inputs` here.\n torch_model_path, tflite_model_path, input_transpose, generated_inputs, output_transpose = parse_config(json_file)\n\n # When converting quantized models, please ensure the quantization backend is set.\n torch.backends.quantized.engine = 'qnnpack'\n\n with torch.no_grad():\n model = torch.jit.load(torch_model_path)\n model.cpu()\n model.eval()\n\n # Pay attention to the arguments `input_transpose` and `output_transpose` in the next line.\n # By default, we will perform nchw -> nhwc transpose every 4D input and output tensor.\n # If you don't want to do this, please pass in False for them.\n converter = TFLiteConverter(model, generated_inputs, tflite_model_path, input_transpose, output_transpose)\n converter.convert()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', metavar='DIR', default=None, help='path to the config (.json)')\n\n args = parser.parse_args()\n main_worker(args)\n" ]
[ [ "torch.jit.load", "torch.rand", "torch.no_grad", "torch.load" ] ]
NigeloYang/tensorflow-practice
[ "0778f3751512773504eb6c685dfb138aa8e43d40" ]
[ "pre_data/matplotlib/demo/add_axes.py" ]
[ "import matplotlib.pyplot as plt\nimport matplotlib\n\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei']\nmatplotlib.rcParams['axes.unicode_minus'] = False\n\nfig = plt.figure()\n\n# 模拟数据\nx = [1, 2, 3, 4, 5, 6, 7, 8]\ny = [1, 2, 3, 1, 6, 3, 5, 9]\n\nleft, bottom, width, height = 0.1, 0.1, 0.8, 0.8\nx1 = fig.add_axes([left, bottom, width, height])\nx1.plot(x, y, 'b')\nx1.set_title('子图1')\n\nleft, bottom, width, height = 0.2, 0.5, 0.25, 0.25\nx2 = fig.add_axes([left, bottom, width, height])\nx2.plot(x, y, 'y')\nx2.set_title('子图2')\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.show" ] ]
aleksandrina-streltsova/lidar-global-registration
[ "00cc919f17fe5b6854b575ca0aea3712ce034df6" ]
[ "global_registration.py" ]
[ "import time\nimport os\nimport sys\n\nimport pyntcloud\nimport yaml\nimport copy\n\nfrom open3d.cuda.pybind.geometry import PointCloud\nfrom open3d.cuda.pybind.pipelines.registration import Feature, RegistrationResult\nfrom typing import NamedTuple, List, Tuple, Optional\nfrom tqdm import tqdm\n\nimport numpy as np\nimport pandas as pd\nimport open3d as o3d\n\nRegistrationData = NamedTuple('RegistrationData', [\n ('pcd', PointCloud),\n ('pcd_fpfh', Optional[Feature]),\n ('filename', str)\n])\nGROUND_TRUTH_COLUMNS = ['reading', 'gT00', 'gT01', 'gT02', 'gT03', 'gT10', 'gT11', 'gT12', 'gT13', 'gT20', 'gT21',\n 'gT22', 'gT23', 'gT30', 'gT31', 'gT32', 'gT33']\n\n\ndef count_correct_correspondences(source: RegistrationData, target: RegistrationData,\n correspondence_set: np.ndarray, transformation_gt: np.ndarray,\n error_threshold: float):\n source_correspondences = np.asarray(copy.deepcopy(source.pcd).transform(transformation_gt).points)[\n correspondence_set[:, 0]]\n target_correspondences = np.asarray(target.pcd.points)[correspondence_set[:, 1]]\n errors = np.linalg.norm(source_correspondences - target_correspondences, axis=1)\n return np.count_nonzero(errors < error_threshold)\n\n\ndef get_transformation(csv_path: str, src_filename: str, tgt_filename: str):\n df = pd.read_csv(csv_path)\n gt = {}\n for _, row in df.iterrows():\n gt[row[0]] = np.array(list(map(float, row[1:].values))).reshape((4, 4))\n return np.linalg.inv(gt[tgt_filename]) @ gt[src_filename]\n\n\ndef preprocess_point_cloud(pcd: PointCloud, voxel_size: int, config) -> Tuple[PointCloud, Feature]:\n pcd_down = pcd.voxel_down_sample(voxel_size)\n print(f\" Pointcloud down sampled from {len(pcd.points)} points to {len(pcd_down.points)} points.\")\n\n radius_normal = config['normal_radius_coef'] * voxel_size\n pcd_down.estimate_normals(o3d.geometry.KDTreeSearchParamRadius(radius=radius_normal))\n\n radius_feature = config['feature_radius_coef'] * voxel_size\n pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n pcd_down,\n o3d.geometry.KDTreeSearchParamRadius(radius=radius_feature))\n return pcd_down, pcd_fpfh\n\n\ndef load_dataset(voxel_size: int, config, is_local: bool = False) -> List[RegistrationData]:\n dataset = []\n for i, filename in enumerate(sorted(os.listdir(config['path']))):\n print(f\":: Processing {filename}\")\n pcd = o3d.io.read_point_cloud(os.path.join(config['path'], filename))\n if is_local:\n dataset.append(RegistrationData(pcd, None, filename))\n else:\n pcd_down, pcd_fpfh = preprocess_point_cloud(pcd, voxel_size, config)\n dataset.append(RegistrationData(pcd_down, pcd_fpfh, filename))\n del pcd\n return dataset\n\n\ndef load_source_and_target(voxel_size: int, config, is_local: bool = False) -> Tuple[\n RegistrationData, RegistrationData]:\n dataset = []\n for i, key in enumerate(['source', 'target']):\n filepath = config[key]\n filename = os.path.basename(filepath)\n print(f\":: Processing {filename}\")\n pcd = o3d.io.read_point_cloud(filepath)\n if is_local:\n dataset.append(RegistrationData(pcd, None, filename))\n else:\n pcd_down, pcd_fpfh = preprocess_point_cloud(pcd, voxel_size, config)\n dataset.append(RegistrationData(pcd_down, pcd_fpfh, filename))\n del pcd\n return dataset[0], dataset[1]\n\n\ndef execute_global_registration(source: RegistrationData, target: RegistrationData,\n voxel_size: int, config, testname: str) -> RegistrationResult:\n print(f\":: RANSAC global registration on downsampled point clouds: {source.filename} and {target.filename}.\")\n start = time.time()\n distance_threshold = config['distance_thr_coef'] * voxel_size\n result: RegistrationResult = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(\n source.pcd, target.pcd, source.pcd_fpfh, target.pcd_fpfh, config['reciprocal'],\n distance_threshold,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False),\n ransac_n=3,\n checkers=[\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(),\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)\n ],\n criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(config['iteration'], config['confidence']))\n correspondence_set = np.asarray(result.correspondence_set)\n print(\" Global registration took: %.3f sec.\" % (time.time() - start))\n print(f\" fitness: {result.fitness}\\n\"\n f\" inlier_rmse: {result.inlier_rmse}\\n\"\n f\" inliers: {len(result.correspondence_set)}/{round(len(result.correspondence_set) / result.fitness)}\")\n save_clouds(result.transformation, config, testname)\n if 'ground_truth' in config:\n transformation_gt = get_transformation(config['ground_truth'], source.filename, target.filename)\n print(\n f\" correct inliers: {count_correct_correspondences(source, target, np.asarray(result.correspondence_set), transformation_gt, distance_threshold)}\\n\")\n print(f\"transformation: \\n\\n{result.transformation}\\n\")\n print(f\"transformation (ground truth): \\n\\n{transformation_gt}\\n\")\n save_correspondence_distances(source, target, correspondence_set, transformation_gt, voxel_size, testname)\n return result\n\n\ndef execute_local_registration(source: RegistrationData, target: RegistrationData,\n transformation: np.array, config) -> RegistrationResult:\n print(f\":: Apply point-to-point ICP: {source.filename} and {target.filename}.\")\n start = time.time()\n\n result = o3d.pipelines.registration.registration_icp(\n source.pcd, target.pcd, config['icp_thr'], transformation,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False))\n\n print(\" Local registration took: %.3f sec.\" % (time.time() - start))\n print(f\" fitness: {result.fitness}\\n\"\n f\" inlier_rmse: {result.inlier_rmse}\\n\")\n print(f\"transformation: \\n\\n{result.transformation}\\n\")\n return result\n\n\ndef save_clouds(transformation: np.ndarray, config, testname: str):\n source_pcd = o3d.io.read_point_cloud(config['source'])\n target_pcd = o3d.io.read_point_cloud(config['target'])\n source_pcd.paint_uniform_color([1, 0.706, 0])\n target_pcd.paint_uniform_color([0, 0.651, 0.929])\n source_pcd.transform(transformation)\n source_pcd += target_pcd\n o3d.io.write_point_cloud(testname + \"_aligned_open3d.ply\", source_pcd,\n compressed=True, print_progress=True)\n del source_pcd, target_pcd\n\n\ndef save_correspondence_distances(source: RegistrationData, target: RegistrationData, correspondence_set: np.ndarray,\n transformation_gt: np.ndarray, voxel_size: float, testname: str):\n source_correspondences = np.asarray(copy.deepcopy(source.pcd).transform(transformation_gt).points)[\n correspondence_set[:, 0]]\n target_correspondences = np.asarray(target.pcd.points)[correspondence_set[:, 1]]\n errors = np.linalg.norm(source_correspondences - target_correspondences, axis=1) / voxel_size\n df = pd.DataFrame(errors, columns=['distance'])\n df.to_csv(testname + '_distances.csv', index=False)\n\n\ndef run_global_registration_and_save_ply():\n with open(sys.argv[1], 'r') as stream:\n config = yaml.load(stream)\n testname = os.path.basename(sys.argv[1])[:-5]\n dataset = load_dataset(config['voxel_size'], config)\n for i in range(len(dataset) - 1):\n source = dataset[i]\n target = dataset[i + 1]\n execute_global_registration(source, target, config['voxel_size'], config, testname)\n\n\ndef run_global_registration():\n with open(sys.argv[1], 'r') as stream:\n config = yaml.load(stream, Loader=yaml.Loader)\n testname = os.path.basename(sys.argv[1])[:-5]\n source, target = load_source_and_target(config['voxel_size'], config)\n execute_global_registration(source, target, config['voxel_size'], config, testname)\n\n\ndef run_global_and_local_registration():\n with open(sys.argv[1], 'r') as stream:\n config = yaml.load(stream, Loader=yaml.Loader)\n testname = os.path.basename(sys.argv[1])[:-5]\n source, target = load_source_and_target(config['voxel_size'], config)\n result = execute_global_registration(source, target, config['voxel_size'], config, testname)\n source, target = load_source_and_target(config['voxel_size'], config, is_local=True)\n execute_local_registration(source, target, result.transformation, config)\n\n\ndef estimate_and_save_ground_truth():\n with open(sys.argv[1], 'r') as stream:\n config = yaml.load(stream, Loader=yaml.Loader)\n if os.path.exists(os.path.join(config['path'], 'ground_truth.csv')):\n return\n testname = os.path.basename(sys.argv[1])[:-5]\n filenames = list(sorted(filter(lambda f: f[-4:] == '.ply' and f.startswith(testname), os.listdir(config['path']))))\n transformations = [np.eye(4)]\n voxel_size = config['voxel_size']\n\n print(f\":: Processing {filenames[0]}\")\n pcd = o3d.io.read_point_cloud(os.path.join(config['path'], filenames[0]))\n pcd_down, pcd_fpfh = preprocess_point_cloud(pcd, voxel_size, config)\n source = RegistrationData(pcd_down, pcd_fpfh, filenames[0])\n source_local = RegistrationData(pcd, None, filenames[0])\n\n for i, filename in enumerate(filenames[1:]):\n print(f\":: Processing {filename}\")\n pcd = o3d.io.read_point_cloud(os.path.join(config['path'], filename))\n pcd_down, pcd_fpfh = preprocess_point_cloud(pcd, voxel_size, config)\n target = RegistrationData(pcd_down, pcd_fpfh, filename)\n target_local = RegistrationData(pcd, None, filename)\n\n result = execute_global_registration(source, target, voxel_size, config, testname)\n result = execute_local_registration(source_local, target_local, result.transformation, config)\n transformations.append(transformations[-1] @ np.linalg.inv(result.transformation))\n\n source = target\n source_local = target_local\n\n values = []\n for filename, transformation in zip(filenames, transformations):\n values.append([filename] + list(transformation.flatten()))\n df = pd.DataFrame(values, columns=GROUND_TRUTH_COLUMNS, index=None)\n df.to_csv(os.path.join(config['path'], 'ground_truth.csv'), index=False)\n\n\ndef downsample_and_transform_point_clouds(with_transformation: bool = True):\n with open(sys.argv[1], 'r') as stream:\n config = yaml.load(stream, Loader=yaml.Loader)\n dataset_name = os.path.basename(sys.argv[1])[:-5]\n filenames = list(sorted(filter(lambda f: f[-4:] == '.ply' and f.startswith(dataset_name), os.listdir(config['path']))))\n voxel_size = config['voxel_size']\n\n gt = {}\n if with_transformation:\n df = pd.read_csv(config['ground_truth'])\n for _, row in df.iterrows():\n gt[row[0]] = np.array(list(map(float, row[1:].values))).reshape((4, 4))\n\n path = os.path.join(config['path'], 'downsampled_' + str(voxel_size))\n if not os.path.exists(path):\n os.mkdir(path)\n\n iter_pbar = tqdm(filenames)\n for i, filename in enumerate(iter_pbar):\n iter_pbar.set_description(f'Processing {filename}..')\n pcd = o3d.io.read_point_cloud(os.path.join(config['path'], filename))\n pcd_down = pcd.voxel_down_sample(voxel_size)\n if with_transformation:\n pcd_down = pcd_down.transform(gt[filename])\n pcd_down.paint_uniform_color([np.random.rand(), np.random.rand(), np.random.rand()])\n pyntcloud.PyntCloud.from_instance(\"open3d\", pcd_down).to_file(os.path.join(path, filename))\n\n\nif __name__ == '__main__':\n run_global_registration()\n" ]
[ [ "numpy.eye", "pandas.read_csv", "numpy.linalg.inv", "pandas.DataFrame", "numpy.asarray", "numpy.count_nonzero", "numpy.random.rand", "numpy.linalg.norm" ] ]
avdarekar/color-mag-diagram
[ "4d96df646dd909ac631c627a52696ae6c4034f39" ]
[ "Colormag.py" ]
[ "#import xlrd, matplotlib.pyplot, and math libraries\nimport xlrd\nimport matplotlib.pyplot as plt\nimport math \nfrom xlrd import open_workbook\n\n#open Gaia data .xlsx file from computer\nbench = open_workbook('/Users/adbreeze13/Desktop/UNCResearch/Test/finaldata.xlsx',on_demand=True)\n\n#declare arrays for apparent magnitude, bp-rp, parallax, parallax over error, g mean flux over error, bp flux over error, rp flux over error, and absolute magnitude respectively\nappmagcol1 = []\nbp_rpcol2 = []\nparallaxcol3 = []\npoecol4 = []\ngfloecol5 = []\nbpfloecol6 = []\nrpfloecol7 = []\nabsmag = []\n\n#iterate over each row in .xlsx file\nfor name in bench.sheet_names():\n sheetnew = bench.sheet_by_name(name)\n for row in range(0, 100000):\n \n #assign type of cell in each column to corresponding column variable \n appmagcoltype = sheetnew.cell_type(row, 0)\n bp_rpcoltype = sheetnew.cell_type(row, 1)\n parallaxcoltype = sheetnew.cell_type(row, 2)\n poecoltype = sheetnew.cell_type(row, 3)\n gfloecoltype = sheetnew.cell_type(row, 4)\n bpfloecoltype = sheetnew.cell_type(row, 5)\n rpfloecoltype = sheetnew.cell_type(row, 6)\n \n #if cells in all columns are not empty \n if not appmagcoltype == xlrd.XL_CELL_EMPTY and not bp_rpcoltype == xlrd.XL_CELL_EMPTY and not parallaxcoltype == xlrd.XL_CELL_EMPTY and not poecoltype == xlrd.XL_CELL_EMPTY and not gfloecoltype == xlrd.XL_CELL_EMPTY and not bpfloecoltype == xlrd.XL_CELL_EMPTY and not rpfloecoltype == xlrd.XL_CELL_EMPTY:\n #if cell values in parallax over error, g mean flux over error, bp flux over error, and rp flux over error columns are above 20, append cell values in each column to corresponding column array\n if sheetnew.cell_value(row, 3) > 20 and sheetnew.cell_value(row, 4) > 20 and sheetnew.cell_value(row, 5) > 20 and sheetnew.cell_value(row,6) > 20:\n appmagcol1.append(sheetnew.cell_value(row, 0))\n bp_rpcol2.append(sheetnew.cell_value(row, 1))\n parallaxcol3.append(sheetnew.cell_value(row,2))\n poecol4.append(sheetnew.cell_value(row, 3))\n gfloecol5.append(sheetnew.cell_value(row, 4))\n bpfloecol6.append(sheetnew.cell_value(row, 5))\n rpfloecol7.append(sheetnew.cell_value(row, 6))\n\n \n\n \n\n#convert apparent magnitude to absolute magnitude and append these values to absmag array\nfor i in range(len(appmagcol1)):\n abscalc = appmagcol1[i]-(5*math.log((1000/parallaxcol3[i]), 10))+5\n absmag.append(abscalc)\n\n\n\n\n#create scatter plot - bp-rp on x-axis, absolute magnitude on y-axis, invert the y-axis, size of marker is .01 \nplt.scatter(bp_rpcol2, absmag, s=.01)\nplt.xlabel('Bp-Rp')\nplt.ylabel('Absolute Magnitude')\nplt.gca().invert_yaxis()\nplt.title('Color-Magnitude Diagram')\nplt.show()\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
IrvingShu/batch-feature-erasing-network
[ "534616c09dade92561a0203797892a63a072b1b4" ]
[ "utils/loss.py" ]
[ "# encoding: utf-8\nimport random\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef topk_mask(input, dim, K = 10, **kwargs):\n index = input.topk(max(1, min(K, input.size(dim))), dim = dim, **kwargs)[1]\n return torch.autograd.Variable(torch.zeros_like(input.data)).scatter(dim, index, 1.0)\n\ndef pdist(A, squared = False, eps = 1e-4):\n prod = torch.mm(A, A.t())\n norm = prod.diag().unsqueeze(1).expand_as(prod)\n res = (norm + norm.t() - 2 * prod).clamp(min = 0)\n return res if squared else res.clamp(min = eps).sqrt()\n\n\ndef normalize(x, axis=-1):\n \"\"\"Normalizing to unit length along the specified dimension.\n Args:\n x: pytorch Variable\n Returns:\n x: pytorch Variable, same shape as input\n \"\"\"\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x\n\n\ndef euclidean_dist(x, y):\n \"\"\"\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n \"\"\"\n m, n = x.size(0), y.size(0)\n xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)\n yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()\n dist = xx + yy\n dist.addmm_(1, -2, x, y.t())\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n return dist\n\n\ndef hard_example_mining(dist_mat, labels, margin, return_inds=False):\n \"\"\"For each anchor, find the hardest positive and negative sample.\n Args:\n dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]\n labels: pytorch LongTensor, with shape [N]\n return_inds: whether to return the indices. Save time if `False`(?)\n Returns:\n dist_ap: pytorch Variable, distance(anchor, positive); shape [N]\n dist_an: pytorch Variable, distance(anchor, negative); shape [N]\n p_inds: pytorch LongTensor, with shape [N];\n indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1\n n_inds: pytorch LongTensor, with shape [N];\n indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1\n NOTE: Only consider the case in which all labels have same num of samples,\n thus we can cope with all anchors in parallel.\n \"\"\"\n\n torch.set_printoptions(threshold=5000) \n assert len(dist_mat.size()) == 2\n assert dist_mat.size(0) == dist_mat.size(1)\n N = dist_mat.size(0)\n\n # shape [N, N]\n is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())\n is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())\n # `dist_ap` means distance(anchor, positive)\n # both `dist_ap` and `relative_p_inds` with shape [N, 1]\n dist_ap, relative_p_inds = torch.max(\n dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)\n # `dist_an` means distance(anchor, negative)\n # both `dist_an` and `relative_n_inds` with shape [N, 1]\n dist_an, relative_n_inds = torch.min(\n dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)\n # shape [N]\n dist_ap = dist_ap.squeeze(1)\n dist_an = dist_an.squeeze(1)\n\n if return_inds:\n # shape [N, N]\n ind = (labels.new().resize_as_(labels)\n .copy_(torch.arange(0, N).long())\n .unsqueeze(0).expand(N, N))\n # shape [N, 1]\n p_inds = torch.gather(\n ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)\n n_inds = torch.gather(\n ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)\n # shape [N]\n p_inds = p_inds.squeeze(1)\n n_inds = n_inds.squeeze(1)\n return dist_ap, dist_an, p_inds, n_inds\n\n return dist_ap, dist_an\n\n\nclass TripletLoss(object):\n \"\"\"Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).\n Related Triplet Loss theory can be found in paper 'In Defense of the Triplet\n Loss for Person Re-Identification'.\"\"\"\n\n def __init__(self, margin=None):\n self.margin = margin\n if margin is not None:\n self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n else:\n self.ranking_loss = nn.SoftMarginLoss()\n\n def __call__(self, global_feat, labels, normalize_feature=False):\n if normalize_feature:\n global_feat = normalize(global_feat, axis=-1)\n dist_mat = euclidean_dist(global_feat, global_feat)\n dist_ap, dist_an = hard_example_mining(dist_mat, labels, self.margin)\n y = dist_an.new().resize_as_(dist_an).fill_(1)\n if self.margin is not None:\n loss = self.ranking_loss(dist_an, dist_ap, y)\n else:\n loss = self.ranking_loss(dist_an - dist_ap, y)\n return loss, dist_ap, dist_an\n\n\nclass CrossEntropyLabelSmooth(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n Equation: y = (1 - epsilon) * y + epsilon / K.\n Args:\n num_classes (int): number of classes.\n epsilon (float): weight.\n \"\"\"\n\n def __init__(self, num_classes, epsilon=0.1, use_gpu=True):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.use_gpu = use_gpu\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)\n if self.use_gpu: targets = targets.cuda()\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss\n\nclass Margin:\n def __call__(self, embeddings, labels):\n embeddings = F.normalize(embeddings)\n alpha = 0.2\n beta = 1.2\n distance_threshold = 0.5\n inf = 1e6\n eps = 1e-6\n distance_weighted_sampling = True\n d = pdist(embeddings)\n pos = torch.eq(*[labels.unsqueeze(dim).expand_as(d) for dim in [0, 1]]).type_as(d) - torch.autograd.Variable(torch.eye(len(d))).type_as(d)\n num_neg = int(pos.data.sum() / len(pos))\n if distance_weighted_sampling:\n '''\n dim = embeddings.size(-1)\n distance = d.data.clamp(min = distance_threshold)\n distribution = distance.pow(dim - 2) * ((1 - distance.pow(2) / 4).pow(0.5 * (dim - 3)))\n weights = distribution.reciprocal().masked_fill_(pos.data + torch.eye(len(d)).type_as(d.data) > 0, eps)\n samples = torch.multinomial(weights, replacement = False, num_samples = num_neg)\n neg = torch.autograd.Variable(torch.zeros_like(pos.data).scatter_(1, samples, 1))\n '''\n neg = torch.autograd.Variable(torch.zeros_like(pos.data).scatter_(1, torch.multinomial((d.data.clamp(min = distance_threshold).pow(embeddings.size(-1) - 2) * (1 - d.data.clamp(min = distance_threshold).pow(2) / 4).pow(0.5 * (embeddings.size(-1) - 3))).reciprocal().masked_fill_(pos.data + torch.eye(len(d)).type_as(d.data) > 0, eps), replacement = False, num_samples = num_neg), 1))\n else:\n neg = topk_mask(d + inf * ((pos > 0) + (d < distance_threshold)).type_as(d), dim = 1, largest = False, K = num_neg)\n L = F.relu(alpha + (pos * 2 - 1) * (d - beta))\n M = ((pos + neg > 0) * (L > 0)).float()\n return (M * L).sum() / M.sum(), 0\n\n" ]
[ [ "torch.nn.MarginRankingLoss", "torch.nn.functional.normalize", "torch.set_printoptions", "torch.zeros_like", "torch.nn.LogSoftmax", "torch.nn.functional.relu", "torch.norm", "torch.arange", "torch.nn.SoftMarginLoss", "torch.pow" ] ]
failure-to-thrive/addons
[ "63c82e318e68b07eb1162d1ff247fe9f4d3194fc" ]
[ "tensorflow_addons/optimizers/cyclical_learning_rate.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Cyclical Learning Rate Schedule policies for TensorFlow.\"\"\"\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass CyclicalLearningRate(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses cyclical schedule.\"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n maximal_learning_rate,\n step_size,\n scale_fn,\n scale_mode=\"cycle\",\n name=None,\n ):\n \"\"\"Applies cyclical schedule to the learning rate.\n\n See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186\n\n\n ```python\n lr_schedule = tf.keras.optimizers.schedules.CyclicalLearningRate(\n initial_learning_rate=1e-4,\n maximal_learning_rate=1e-2,\n step_size=2000,\n scale_fn=lambda x: 1.,\n scale_mode=\"cycle\",\n name=\"MyCyclicScheduler\")\n\n model.compile(optimizer=tf.keras.optimizers.SGD(\n learning_rate=lr_schedule),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(data, labels, epochs=5)\n ```\n\n You can pass this schedule directly into a\n `tf.keras.optimizers.Optimizer` as the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The initial learning rate.\n maximal_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The maximum learning rate.\n step_size: A scalar `float32` or `float64` `Tensor` or a\n Python number. Step size.\n scale_fn: A function. Scheduling function applied in cycle\n scale_mode: ['cycle', 'iterations']. Mode to apply during cyclic\n schedule\n name: (Optional) Name for the operation.\n\n Returns:\n Updated learning rate value.\n \"\"\"\n super().__init__()\n self.initial_learning_rate = initial_learning_rate\n self.maximal_learning_rate = maximal_learning_rate\n self.step_size = step_size\n self.scale_fn = scale_fn\n self.scale_mode = scale_mode\n self.name = name\n\n def __call__(self, step):\n with tf.name_scope(self.name or \"CyclicalLearningRate\"):\n initial_learning_rate = tf.convert_to_tensor(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n dtype = initial_learning_rate.dtype\n maximal_learning_rate = tf.cast(self.maximal_learning_rate, dtype)\n step_size = tf.cast(self.step_size, dtype)\n cycle = tf.floor(1 + step / (2 * step_size))\n x = tf.abs(step / step_size - 2 * cycle + 1)\n\n mode_step = cycle if self.scale_mode == \"cycle\" else step\n\n return initial_learning_rate + (\n maximal_learning_rate - initial_learning_rate\n ) * tf.maximum(tf.cast(0, dtype), (1 - x)) * self.scale_fn(mode_step)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"maximal_learning_rate\": self.maximal_learning_rate,\n \"step_size\": self.step_size,\n \"scale_mode\": self.scale_mode,\n }\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass TriangularCyclicalLearningRate(CyclicalLearningRate):\n def __init__(\n self,\n initial_learning_rate,\n maximal_learning_rate,\n step_size,\n scale_mode=\"cycle\",\n name=\"TriangularCyclicalLearningRate\",\n ):\n \"\"\"Applies triangular cyclical schedule to the learning rate.\n\n See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186\n\n\n ```python\n from tf.keras.optimizers import schedules\n\n lr_schedule = schedules.TriangularCyclicalLearningRate(\n initial_learning_rate=1e-4,\n maximal_learning_rate=1e-2,\n step_size=2000,\n scale_mode=\"cycle\",\n name=\"MyCyclicScheduler\")\n\n model.compile(optimizer=tf.keras.optimizers.SGD(\n learning_rate=lr_schedule),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(data, labels, epochs=5)\n ```\n\n You can pass this schedule directly into a\n `tf.keras.optimizers.Optimizer` as the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The initial learning rate.\n maximal_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The maximum learning rate.\n step_size: A scalar `float32` or `float64` `Tensor` or a\n Python number. Step size.\n scale_fn: A function. Scheduling function applied in cycle\n scale_mode: ['cycle', 'iterations']. Mode to apply during cyclic\n schedule\n name: (Optional) Name for the operation.\n\n Returns:\n Updated learning rate value.\n \"\"\"\n super().__init__(\n initial_learning_rate=initial_learning_rate,\n maximal_learning_rate=maximal_learning_rate,\n step_size=step_size,\n scale_fn=lambda x: 1.0,\n scale_mode=scale_mode,\n name=name,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass Triangular2CyclicalLearningRate(CyclicalLearningRate):\n def __init__(\n self,\n initial_learning_rate,\n maximal_learning_rate,\n step_size,\n scale_mode=\"cycle\",\n name=\"Triangular2CyclicalLearningRate\",\n ):\n \"\"\"Applies triangular2 cyclical schedule to the learning rate.\n\n See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186\n\n\n ```python\n from tf.keras.optimizers import schedules\n\n lr_schedule = schedules.Triangular2CyclicalLearningRate(\n initial_learning_rate=1e-4,\n maximal_learning_rate=1e-2,\n step_size=2000,\n scale_mode=\"cycle\",\n name=\"MyCyclicScheduler\")\n\n model.compile(optimizer=tf.keras.optimizers.SGD(\n learning_rate=lr_schedule),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(data, labels, epochs=5)\n ```\n\n You can pass this schedule directly into a\n `tf.keras.optimizers.Optimizer` as the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The initial learning rate.\n maximal_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The maximum learning rate.\n step_size: A scalar `float32` or `float64` `Tensor` or a\n Python number. Step size.\n scale_fn: A function. Scheduling function applied in cycle\n scale_mode: ['cycle', 'iterations']. Mode to apply during cyclic\n schedule\n name: (Optional) Name for the operation.\n\n Returns:\n Updated learning rate value.\n \"\"\"\n super().__init__(\n initial_learning_rate=initial_learning_rate,\n maximal_learning_rate=maximal_learning_rate,\n step_size=step_size,\n scale_fn=lambda x: 1 / (2.0 ** (x - 1)),\n scale_mode=scale_mode,\n name=name,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass ExponentialCyclicalLearningRate(CyclicalLearningRate):\n def __init__(\n self,\n initial_learning_rate,\n maximal_learning_rate,\n step_size,\n scale_mode=\"iterations\",\n gamma=1.0,\n name=\"ExponentialCyclicalLearningRate\",\n ):\n \"\"\"Applies exponential cyclical schedule to the learning rate.\n\n See Cyclical Learning Rates for Training Neural Networks. https://arxiv.org/abs/1506.01186\n\n\n ```python\n from tf.keras.optimizers import schedules\n\n lr_schedule = ExponentialCyclicalLearningRate(\n initial_learning_rate=1e-4,\n maximal_learning_rate=1e-2,\n step_size=2000,\n scale_mode=\"cycle\",\n gamma=0.96,\n name=\"MyCyclicScheduler\")\n\n model.compile(optimizer=tf.keras.optimizers.SGD(\n learning_rate=lr_schedule),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(data, labels, epochs=5)\n ```\n\n You can pass this schedule directly into a\n `tf.keras.optimizers.Optimizer` as the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The initial learning rate.\n maximal_learning_rate: A scalar `float32` or `float64` `Tensor` or\n a Python number. The maximum learning rate.\n step_size: A scalar `float32` or `float64` `Tensor` or a\n Python number. Step size.\n scale_fn: A function. Scheduling function applied in cycle\n scale_mode: ['cycle', 'iterations']. Mode to apply during cyclic\n schedule\n gamma: A scalar `float32` or `float64` `Tensor` or a\n Python number. Gamma value.\n name: (Optional) Name for the operation.\n\n Returns:\n Updated learning rate value.\n \"\"\"\n super().__init__(\n initial_learning_rate=initial_learning_rate,\n maximal_learning_rate=maximal_learning_rate,\n step_size=step_size,\n scale_fn=lambda x: gamma ** x,\n scale_mode=scale_mode,\n name=name,\n )\n" ]
[ [ "tensorflow.cast", "tensorflow.name_scope", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.abs", "tensorflow.convert_to_tensor", "tensorflow.floor" ] ]
apprenticeadi/gbs
[ "642d6a3abdc2a698c3bb7bca46c86afd858855f9" ]
[ "loop_hafnian_batch.py" ]
[ "import numpy as np \nimport numba \nfrom _loop_hafnian_subroutines import (\n precompute_binoms,\n nb_ix,\n matched_reps,\n find_kept_edges,\n f_loop,\n f_loop_odd,\n get_submatrices,\n get_submatrix_batch_odd0,\n eigvals\n )\n\[email protected](nopython=True, parallel=True, cache=True)\ndef _calc_loop_hafnian_batch_even(A, D, fixed_edge_reps,\n batch_max, odd_cutoff,\n glynn=True):\n\n oddloop = D[0]\n oddV = A[0,:]\n\n n = A.shape[0]\n N_fixed = 2 * fixed_edge_reps.sum() # number of photons\n\n N_max = N_fixed + 2 * batch_max + odd_cutoff\n\n edge_reps = np.concatenate((np.array([batch_max]), fixed_edge_reps))\n steps = np.prod(edge_reps + 1)\n # precompute binomial coefficients \n max_binom = edge_reps.max() + odd_cutoff\n binoms = precompute_binoms(max_binom)\n\n H_batch = np.zeros(2*batch_max+odd_cutoff+1, dtype=np.complex128)\n for j in numba.prange(steps):\n\n Hnew = np.zeros(2*batch_max+odd_cutoff+1, dtype=np.complex128)\n\n kept_edges = find_kept_edges(j, edge_reps)\n edges_sum = kept_edges.sum()\n\n binom_prod = 1.\n for i in range(1, n//2):\n binom_prod *= binoms[edge_reps[i], kept_edges[i]]\n \n if glynn:\n delta = 2 * kept_edges - edge_reps\n else:\n delta = kept_edges\n\n AX_S, XD_S, D_S, oddVX_S = get_submatrices(delta, A, D, oddV)\n\n E = eigvals(AX_S) # O(n^3) step\n\n f_even = f_loop(E, AX_S, XD_S, D_S, N_max)\n f_odd = f_loop_odd(E, AX_S, XD_S, D_S, N_max, oddloop, oddVX_S)\n\n for N_det in range(2*kept_edges[0], 2*batch_max+odd_cutoff+1):\n N = N_fixed + N_det\n plus_minus = (-1.) ** (N // 2 - edges_sum)\n\n n_det_binom_prod = binoms[N_det//2, kept_edges[0]] * binom_prod\n\n if N_det % 2 == 0:\n Hnew[N_det] += n_det_binom_prod * plus_minus * f_even[N//2]\n else:\n Hnew[N_det] += n_det_binom_prod * plus_minus * f_odd[N]\n\n H_batch += Hnew\n\n if glynn:\n for j in range(H_batch.shape[0]):\n x = N_fixed + j \n H_batch[j] *= 0.5 ** (x // 2)\n\n return H_batch\n\[email protected](nopython=True, parallel=True, cache=True)\ndef _calc_loop_hafnian_batch_odd(A, D, fixed_edge_reps,\n batch_max, even_cutoff, oddmode,\n glynn=True):\n\n oddloop = D[0]\n oddV = A[0,:]\n\n #when I added the extra edges, I place the edge which goes from the oddmode to \n #to the current mode in the index 1 position of the array\n oddloop0 = D[1]\n oddV0 = A[1,:]\n\n n = A.shape[0]\n N_fixed = 2 * fixed_edge_reps.sum() + 1\n N_max = N_fixed + 2 * batch_max + even_cutoff + 1\n\n edge_reps = np.concatenate((np.array([batch_max, 1]), fixed_edge_reps))\n steps = np.prod(edge_reps + 1)\n # precompute binomial coefficients \n max_binom = edge_reps.max() + even_cutoff\n binoms = precompute_binoms(max_binom)\n\n H_batch = np.zeros(2*batch_max+even_cutoff+2, dtype=np.complex128)\n for j in numba.prange(steps):\n \n Hnew = np.zeros(2*batch_max+even_cutoff+2, dtype=np.complex128)\n\n kept_edges = find_kept_edges(j, edge_reps)\n edges_sum = kept_edges.sum()\n\n binom_prod = 1.\n for i in range(1, n//2):\n binom_prod *= binoms[edge_reps[i], kept_edges[i]]\n \n if glynn:\n delta = 2 * kept_edges - edge_reps\n else:\n delta = kept_edges\n\n AX_S, XD_S, D_S, oddVX_S = get_submatrices(delta, A, D, oddV)\n\n E = eigvals(AX_S) # O(n^3) step\n\n if kept_edges[0] == 0 and kept_edges[1] == 0:\n oddVX_S0 = get_submatrix_batch_odd0(delta, oddV0)\n plus_minus = (-1) ** (N_fixed // 2 - edges_sum)\n f = f_loop_odd(E, AX_S, XD_S, D_S, N_fixed, oddloop0, oddVX_S0)[N_fixed]\n H_batch[0] += binom_prod * plus_minus * f \n\n f_even = f_loop(E, AX_S, XD_S, D_S, N_max)\n f_odd = f_loop_odd(E, AX_S, XD_S, D_S, N_max, oddloop, oddVX_S)\n\n for N_det in range(2*kept_edges[0]+1, 2*batch_max+even_cutoff+2):\n N = N_fixed + N_det\n plus_minus = (-1) ** (N // 2 - edges_sum)\n\n n_det_binom_prod = binoms[(N_det-1)//2, kept_edges[0]] * binom_prod\n\n if N % 2 == 0:\n Hnew[N_det] += n_det_binom_prod * plus_minus * f_even[N//2]\n else: \n Hnew[N_det] += n_det_binom_prod * plus_minus * f_odd[N]\n \n H_batch += Hnew\n\n if glynn:\n for j in range(H_batch.shape[0]):\n x = N_fixed + j \n H_batch[j] *= 0.5 ** (x // 2)\n\n return H_batch\n\ndef add_batch_edges_even(fixed_edges):\n if len(fixed_edges) == 0:\n return np.array([0,0], dtype=int)\n n_edges = fixed_edges.shape[0]\n edges = np.zeros(n_edges+2, dtype=int)\n new_edge = max(fixed_edges) + 1\n edges[0] = new_edge\n edges[1:n_edges//2+1] = fixed_edges[:n_edges//2] \n edges[n_edges//2+1] = new_edge\n edges[n_edges//2+2:] = fixed_edges[n_edges//2:] \n return edges\n\ndef add_batch_edges_odd(fixed_edges, oddmode):\n if len(fixed_edges) == 0:\n return np.array([1,oddmode,1,1], dtype=int)\n n_edges = fixed_edges.shape[0]\n edges = np.zeros(n_edges+4, dtype=int)\n new_edge = max(max(fixed_edges), oddmode) + 1\n edges[0] = new_edge \n edges[1] = oddmode\n edges[2:n_edges//2+2] = fixed_edges[:n_edges//2]\n edges[n_edges//2+2] = new_edge\n edges[n_edges//2+3] = new_edge\n edges[n_edges//2+4:] = fixed_edges[n_edges//2:]\n return edges\n\ndef loop_hafnian_batch(A, D, fixed_reps, N_cutoff, glynn=True):\n # checks \n n = A.shape[0]\n assert A.shape[1] == n\n assert D.shape == (n,)\n assert len(fixed_reps) == n - 1 \n\n nz = np.nonzero(list(fixed_reps) + [1])[0]\n Anz = A[np.ix_(nz, nz)]\n Dnz = D[nz]\n\n fixed_reps = np.asarray(fixed_reps)\n fixed_reps_nz = fixed_reps[nz[:-1]]\n\n fixed_edges, fixed_m_reps, oddmode = matched_reps(fixed_reps_nz)\n\n if oddmode is None:\n batch_max = N_cutoff // 2\n odd_cutoff = N_cutoff % 2\n edges = add_batch_edges_even(fixed_edges)\n Ax = Anz[np.ix_(edges, edges)].astype(np.complex128)\n Dx = Dnz[edges].astype(np.complex128)\n return _calc_loop_hafnian_batch_even(Ax, Dx, fixed_m_reps, batch_max, odd_cutoff,\n glynn=glynn)\n else:\n edges = add_batch_edges_odd(fixed_edges, oddmode)\n Ax = Anz[np.ix_(edges, edges)].astype(np.complex128)\n Dx = Dnz[edges].astype(np.complex128)\n batch_max = (N_cutoff-1) // 2\n even_cutoff = 1 - (N_cutoff % 2)\n return _calc_loop_hafnian_batch_odd(Ax, Dx, fixed_m_reps, batch_max, even_cutoff, oddmode,\n glynn=glynn)\n\n# compile and quick test upon importing\nA = np.ones((4,4))\nbatch = loop_hafnian_batch(A, A.diagonal(), [1,1,2], 4, glynn=False)\nassert np.allclose(batch, [10,26,76,232,764])\nbatch = loop_hafnian_batch(A, A.diagonal(), [1,1,2], 4, glynn=True)\nassert np.allclose(batch, [10,26,76,232,764])\nbatch = loop_hafnian_batch(A, A.diagonal(), [1,1,1], 5, glynn=False)\nassert np.allclose(batch, [4,10,26,76,232,764])\nbatch = loop_hafnian_batch(A, A.diagonal(), [1,1,1], 5, glynn=True)\nassert np.allclose(batch, [4,10,26,76,232,764])\n########################################" ]
[ [ "numpy.ones", "numpy.allclose", "numpy.ix_", "numpy.zeros", "numpy.asarray", "numpy.prod", "numpy.array" ] ]
mayou36/probability
[ "f185c852146894af6dc02223020413bf26ecdd5c" ]
[ "tensorflow_probability/python/internal/nest_util.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utilities dealing with nested structures.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n 'broadcast_structure',\n 'expand_as_args',\n 'call_fn',\n]\n\n_is_namedtuple = nest._is_namedtuple # pylint: disable=protected-access\n\n\ndef broadcast_structure(to_structure, from_structure):\n \"\"\"Broadcasts `from_structure` to `to_structure`.\n\n This is useful for downstream usage of `zip` or `tf.nest.map_structure`.\n\n If `from_structure` is a singleton, it is tiled to match the structure of\n `to_structure`. Note that the elements in `from_structure` are not copied if\n this tiling occurs.\n\n Args:\n to_structure: A structure.\n from_structure: A structure.\n\n Returns:\n new_from_structure: Same structure as `to_structure`.\n\n #### Example:\n\n ```python\n a_structure = ['a', 'b', 'c']\n b_structure = broadcast_structure(a_structure, 'd')\n # -> ['d', 'd', 'd']\n c_structure = tf.nest.map_structure(\n lambda a, b: a + b, a_structure, b_structure)\n # -> ['ad', 'bd', 'cd']\n ```\n \"\"\"\n from_parts = tf.nest.flatten(from_structure)\n if len(from_parts) == 1:\n from_structure = tf.nest.map_structure(lambda _: from_parts[0],\n to_structure)\n return from_structure\n\n\ndef _force_leaf(struct):\n # Returns `True` if `struct` should be treated as a leaf, rather than\n # expanded/recursed into.\n return hasattr(struct, '_tfp_nest_expansion_force_leaf')\n\n\ndef _force_expand_as_args(struct):\n return hasattr(struct, '_tfp_nest_expansion_force_args')\n\n\ndef expand_as_args(args):\n \"\"\"Returns `True` if `args` should be expanded as `*args`.\"\"\"\n return ((isinstance(args, collections.Sequence) and\n not _is_namedtuple(args) and not _force_leaf(args)) or\n _force_expand_as_args(args))\n\n\ndef _expand_as_kwargs(args):\n # Returns `True` if `args` should be expanded as `**args`.\n return isinstance(args, collections.Mapping) and not _force_leaf(args)\n\n\ndef _maybe_convertible_to_tensor(struct):\n # Returns `True` if `struct` should be passed to `convert_to_tensor`.\n return not _is_namedtuple(struct) or _force_leaf(struct)\n\n\ndef _get_shallow_structure(struct):\n # Get a shallow version of struct where the children are replaced by\n # 'False'.\n return nest.get_traverse_shallow_structure(lambda s: s is struct, struct)\n\n\ndef _nested_convert_to_tensor(struct, dtype=None, name=None):\n \"\"\"Eagerly converts struct to Tensor, recursing upon failure.\"\"\"\n if dtype is not None or not tf.nest.is_nested(struct):\n return tf.convert_to_tensor(struct, dtype=dtype)\n\n if _maybe_convertible_to_tensor(struct):\n try:\n # Try converting the structure wholesale.\n return tf.convert_to_tensor(struct, name=name)\n except (ValueError, TypeError):\n # Unfortunately Eager/Graph mode don't agree on the error type.\n pass\n # Try converting all of its children.\n shallow_struct = _get_shallow_structure(struct)\n return nest.map_structure_up_to(\n shallow_struct, lambda s: _nested_convert_to_tensor(s, name=name), struct)\n\n\ndef convert_args_to_tensor(args, dtype=None, name=None):\n \"\"\"Converts `args` to `Tensor`s.\n\n Use this when it is necessary to convert user-provided arguments that will\n then be passed to user-provided callables.\n\n When `dtype` is `None` this function behaves as follows:\n\n 1A. If the top-level structure is a `list`/`tuple` but not a `namedtuple`,\n then it is left as is and only its elements are converted to `Tensor`s.\n\n 2A. The sub-structures are converted to `Tensor`s eagerly. E.g. if `args` is\n `{'arg': [[1], [2]]}` it is converted to\n `{'arg': tf.constant([[1], [2]])}`. If the conversion fails, it will\n attempt to recurse into its children.\n\n When `dtype` is specified, it acts as both a structural and numeric type\n constraint. `dtype` can be a single `DType`, `None` or a nested collection\n thereof. The conversion rule becomes as follows:\n\n 1B. The return value of this function will have the same structure as `dtype`.\n\n 2B. If the leaf of `dtype` is a concrete `DType`, then the corresponding\n sub-structure in `args` is converted to a `Tensor`.\n\n 3B. If the leaf of `dtype` is `None`, then the corresponding sub-structure is\n converted eagerly as described in the rule 2A above.\n\n Args:\n args: Arguments to convert to `Tensor`s.\n dtype: Optional structure/numeric type constraint.\n name: Optional name-scope to use.\n\n Returns:\n args: Converted `args`.\n\n #### Examples.\n\n This table shows some useful conversion cases. `T` means `Tensor`, `NT` means\n `namedtuple` and `CNT` means a `namedtuple` with a `Tensor`-conversion\n function registered.\n\n | args | dtype | output |\n |:------------:|:----------:|:------------------:|\n | `{\"a\": 1}` | `None` | `{\"a\": T(1)}` |\n | `T(1)` | `None` | `T(1)` |\n | `[1]` | `None` | `[T(1)]` |\n | `[1]` | `tf.int32` | `T([1])` |\n | `[[T(1)]]` | `None` | `[T([1])]` |\n | `[[T(1)]]` | `[[None]]` | `[[T(1)]]` |\n | `NT(1, 2)` | `None` | `NT(T(1), T(2))` |\n | `NT(1, 2)` | `tf.int32` | `T([1, 2])` |\n | `CNT(1, 2)` | `None` | `T(...)` |\n | `[[1, [2]]]` | `None` | `[[T(1), T([2])]]` |\n\n \"\"\"\n if dtype is None:\n if expand_as_args(args) or _expand_as_kwargs(args):\n shallow_args = _get_shallow_structure(args)\n return nest.map_structure_up_to(\n shallow_args, lambda s: _nested_convert_to_tensor(s, name=name), args)\n else:\n return _nested_convert_to_tensor(args, name=name)\n else:\n return nest.map_structure_up_to(\n dtype, lambda s, dtype: _nested_convert_to_tensor(s, dtype, name), args,\n dtype)\n\n\ndef call_fn(fn, args):\n \"\"\"Calls `fn` with `args`, possibly expanding `args`.\n\n Use this function when calling a user-provided callable using user-provided\n arguments.\n\n The expansion rules are as follows:\n\n `fn(*args)` if `args` is a `list` or a `tuple`, but not a `namedtuple`.\n `fn(**args)` if `args` is a `dict`.\n `fn(args)` otherwise.\n\n Args:\n fn: A callable that takes either `args` as an argument(s).\n args: Arguments to `fn`.\n\n Returns:\n result: Return value of `fn`.\n \"\"\"\n\n if expand_as_args(args):\n return fn(*args)\n elif _expand_as_kwargs(args):\n return fn(**args)\n else:\n return fn(args)\n\n\ndef convert_to_nested_tensor(value, dtype=None, dtype_hint=None,\n allow_packing=False,\n name=None):\n \"\"\"Converts the given `value` to a (structure of) `Tensor`.\n\n This function converts Python objects of various types to a (structure of)\n `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and\n Python scalars.\n\n Args:\n value: An object whose structure matches that of `dtype ` and for which each\n leaf has a registered `Tensor` conversion function.\n dtype: Optional structure of dtypes defining the structure of outputs and\n the `dtype` argument for nested calls to `convert_to_tensor`. If not\n nested, will be broadcasted to match the structure of `dtype_hint`.\n dtype_hint: Optional structure of dtypes defining the structure of outputs\n and the `dtype_hint` argument for nested calls to `convert_to_tensor`. If\n not nested, will be broadcasted to match the structure of `dtype`.\n allow_packing: Python `bool`, default `False`. If `True`, allow\n `convert_to_nested_tensor` to stack nested lists of Tensors along the\n leading dimension. Otherwise, raise.\n name: Optional name to use if a new `Tensor` is created. If inputs are\n structured, elements are named accoring to '{name}/{path}.{to}.{elem}'.\n\n Returns:\n tensor: A (structure of) `Tensor` based on `value`.\n \"\"\"\n dtype_is_nested = nest.is_nested(dtype)\n hint_is_nested = nest.is_nested(dtype_hint)\n # If only one of dtype/dtype_hint is nested, broadcast the atom to match.\n if dtype_is_nested and hint_is_nested:\n nest.assert_same_structure(dtype, dtype_hint)\n elif dtype_is_nested:\n dtype_hint = broadcast_structure(dtype, dtype_hint)\n elif hint_is_nested:\n dtype = broadcast_structure(dtype_hint, dtype)\n\n def convert_fn(path, value, dtype, dtype_hint, name=None):\n if not allow_packing and nest.is_nested(value) and any(\n # Treat arrays like Tensors for full parity in JAX backend.\n tf.is_tensor(x) or isinstance(x, np.ndarray)\n for x in nest.flatten(value)):\n raise NotImplementedError(('Cannot convert a structure of tensors to a '\n 'single tensor. Saw {} at path {}.'\n ).format(value, path))\n return tf.convert_to_tensor(value, dtype, dtype_hint, name=name)\n\n ### The following branches only affect naming.\n # For unstructured calls, just use the provided name.\n if not nest.is_nested(dtype):\n return convert_fn((), value, dtype, dtype_hint, name=name)\n # For structured calls where name is provided, include a scope and name\n # members according to \"{path}.{to}.{element}\".\n elif name is not None:\n with tf.name_scope(name):\n convert_with_name = lambda path, *args: convert_fn( # pylint: disable=g-long-lambda\n path, *args, name='.'.join(map(str, path)))\n return nest.map_structure_with_tuple_paths_up_to(\n dtype, convert_with_name, value, dtype, dtype_hint, check_types=False)\n # For structured calls without name, skip the scope and don't pass a\n # struct-path to convert-to-tensor.\n else:\n return nest.map_structure_with_tuple_paths_up_to(\n dtype, convert_fn, value, dtype, dtype_hint, check_types=False)\n" ]
[ [ "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.is_tensor", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.python.util.nest.flatten", "tensorflow.compat.v2.nest.flatten", "tensorflow.python.util.nest.get_traverse_shallow_structure", "tensorflow.python.util.nest.is_nested", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.util.nest.map_structure_with_tuple_paths_up_to", "tensorflow.compat.v2.nest.is_nested", "tensorflow.compat.v2.nest.map_structure" ] ]
sacherjj/python-ivi
[ "6dd1ba93d65dc30a652a3a1b34c66921d94315e8" ]
[ "ivi/ivi.py" ]
[ "\"\"\"\n\nPython Interchangeable Virtual Instrument Library\n\nCopyright (c) 2012-2017 Alex Forencich\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\"\"\"\n\n# import libraries\nimport inspect\nimport numpy as np\nimport re\nfrom functools import partial\n\n# try importing drivers\n# python-vxi11 for LAN instruments\ntry:\n import vxi11\nexcept ImportError:\n pass\n\n# python-usbtmc for USBTMC instrument support\ntry:\n import usbtmc\nexcept ImportError:\n pass\n\n# linuxgpib wrapper for linux-gpib Gpib class\n# for GPIB interfaces\ntry:\n from .interface import linuxgpib\nexcept ImportError:\n pass\n\n# pySerial wrapper for serial instrument support\ntry:\n from .interface import pyserial\nexcept ImportError:\n pass\n\n# pyvisa wrapper for PyVISA library support\ntry:\n from .interface import pyvisa\nexcept ImportError:\n pass\n\n# set to True to try loading PyVISA first before\n# other interface libraries\n_prefer_pyvisa = False\n\ndef get_prefer_pyvisa():\n global _prefer_pyvisa\n return _prefer_pyvisa\n\ndef set_prefer_pyvisa(value=True):\n global _prefer_pyvisa\n _prefer_pyvisa = bool(value)\n\n# version information\nfrom .version import __version__\nversion = __version__\n\n# Exceptions\nclass IviException(Exception): pass\nclass IviDriverException(IviException): pass\nclass FileFormatException(IviDriverException): pass\nclass IdQueryFailedException(IviDriverException): pass\nclass InstrumentStatusExcpetion(IviDriverException): pass\nclass InvalidOptionValueException(IviDriverException): pass\nclass IOException(IviDriverException): pass\nclass IOTimeoutException(IviDriverException): pass\nclass MaxTimeoutExceededException(IviDriverException): pass\nclass NotInitializedException(IviDriverException): pass\nclass OperationNotSupportedException(IviDriverException): pass\nclass OperationPendingException(IviDriverException): pass\nclass OptionMissingException(IviDriverException): pass\nclass OptionStringFormatException(IviDriverException): pass\nclass OutOfRangeException(IviDriverException): pass\nclass ResetFailedException(IviDriverException): pass\nclass ResetNotSupportedException(IviDriverException): pass\nclass SelectorFormatException(IviDriverException): pass\nclass SelectorHierarchyException(IviDriverException): pass\nclass SelectorNameException(IviDriverException): pass\nclass SelectorNameRequiredException(IviDriverException): pass\nclass SelectorRangeException(IviDriverException): pass\nclass SimulationStateException(IviDriverException): pass\nclass TriggerNotSoftwareException(IviDriverException): pass\nclass UnexpectedResponseException(IviDriverException): pass\nclass UnknownOptionException(IviDriverException): pass\nclass UnknownPhysicalNameException(IviDriverException): pass\nclass ValueNotSupportedException(IviDriverException): pass\n\n\ndef get_index(l, i):\n \"\"\"Validate index from list or dict of possible values\"\"\"\n if type(l) is dict:\n try:\n return l[i]\n except KeyError:\n if type(i) is int:\n raise SelectorRangeException()\n raise SelectorNameException()\n\n if i in l:\n return l.index(i)\n if type(i) == int:\n if i < 0 or i >= len(l):\n raise SelectorRangeException()\n return i\n raise SelectorNameException()\n\n\ndef get_index_dict(l):\n \"\"\"Construct a dict object for faster index lookups\"\"\"\n d = {}\n for i in range(len(l)):\n d[l[i]] = i\n d[i] = i\n return d\n\n\nclass PropertyCollection(object):\n \"A building block to create hierarchical trees of methods and properties\"\n def __init__(self):\n d = object.__getattribute__(self, '__dict__')\n d.setdefault('_props', dict())\n d.setdefault('_docs', dict())\n d.setdefault('_locked', False)\n \n def _add_property(self, name, fget=None, fset=None, fdel=None, doc=None):\n \"Add a managed property\"\n d = object.__getattribute__(self, '__dict__')\n d['_props'][name] = (fget, fset, fdel)\n d['_docs'][name] = doc\n d[name] = None\n \n def _add_method(self, name, f=None, doc=None):\n \"Add a managed method\"\n d = object.__getattribute__(self, '__dict__')\n d['_docs'][name] = doc\n d[name] = f\n \n def _del_property(self, name):\n \"Remove managed property or method\"\n d = object.__getattribute__(self, '__dict__')\n del d['_props'][name]\n del d['_docs'][name]\n del d[name]\n \n def _lock(self, lock=True):\n \"Set lock state to prevent creation or deletion of unmanaged members\"\n d = object.__getattribute__(self, '__dict__')\n d['_locked'] = lock\n \n def _unlock(self):\n \"Unlock object to allow creation or deletion of unmanaged members, equivalent to _lock(False)\"\n self._lock(False)\n \n def __getattribute__(self, name):\n if name == '__dict__':\n return object.__getattribute__(self, name)\n d = object.__getattribute__(self, '__dict__')\n d.setdefault('_props', dict())\n d.setdefault('_locked', False)\n if name in d['_props']:\n f = d['_props'][name][0]\n if f is None:\n raise AttributeError(\"unreadable attribute\")\n return f()\n return object.__getattribute__(self, name)\n \n def __setattr__(self, name, value):\n d = object.__getattribute__(self, '__dict__')\n d.setdefault('_props', dict())\n d.setdefault('_locked', False)\n if name in d['_props']:\n f = d['_props'][name][1]\n if f is None:\n raise AttributeError(\"can't set attribute\")\n f(value)\n return\n if name not in d and self._locked:\n raise AttributeError(\"locked\")\n object.__setattr__(self, name, value)\n \n def __delattr__(self, name):\n d = object.__getattribute__(self, '__dict__')\n d.setdefault('_props', dict())\n d.setdefault('_locked', False)\n if name in d['_props']:\n f = d['_props'][name][2]\n if f is None:\n raise AttributeError(\"can't delete attribute\")\n f()\n return\n if name not in d and self._locked:\n raise AttributeError(\"locked\")\n object.__delattr__(self, name)\n \n\nclass IndexedPropertyCollection(object):\n \"A building block to create hierarchical trees of methods and properties with an index that is converted to a parameter\"\n def __init__(self):\n self._props = dict()\n self._docs = dict()\n self._indicies = list()\n self._indicies_dict = dict()\n self._objs = list()\n \n def _add_property(self, name, fget=None, fset=None, fdel=None, doc=None, props = None, docs = None):\n \"Add a managed property\"\n if props is None:\n props = self._props\n if docs is None:\n docs = self._docs\n l = name.split('.',1)\n n = l[0]\n r = ''\n if len(l) > 1: r = l[1]\n if n not in props:\n props[n] = dict()\n docs[n] = dict()\n if type(props[n]) != dict:\n raise AttributeError(\"property already defined\")\n if len(r) > 0:\n self._add_property(r, fget, fset, fdel, doc, props[n], docs[n])\n else:\n props[n] = (fget, fset, fdel)\n docs[n] = doc\n \n def _add_method(self, name, f=None, doc=None, props = None, docs = None):\n \"Add a managed method\"\n if props is None:\n props = self._props\n if docs is None:\n docs = self._docs\n l = name.split('.',1)\n n = l[0]\n r = ''\n if len(l) > 1: r = l[1]\n if n not in props:\n props[n] = dict()\n docs[n] = dict()\n if type(props[n]) != dict:\n raise AttributeError(\"property already defined\")\n if len(r) > 0:\n self._add_method(r, f, doc, props[n], docs[n])\n else:\n props[n] = f\n docs[n] = doc\n \n def _add_sub_property(self, sub, name, fget=None, fset=None, fdel=None, doc=None):\n \"Add a sub-property (equivalent to _add_property('sub.name', ...))\"\n self._add_property(sub+'.'+name, fget, fset, fdel, doc)\n \n def _add_sub_method(self, sub, name, f=None, doc=None):\n \"Add a sub-method (equivalent to _add_method('sub.name', ...))\"\n self._add_method(sub+'.'+name, f, doc)\n \n def _del_property(self, name):\n \"Delete property\"\n l = name.split('.',1)\n n = l[0]\n r = ''\n if len(l) > 1: r = l[1]\n if len(r) > 0:\n self._del_property(r)\n else:\n del self._props[name]\n del self._docs[name]\n \n def _build_obj(self, props, docs, i):\n \"Build a tree of PropertyCollection objects with the proper index associations\"\n obj = PropertyCollection()\n for n in props:\n itm = props[n]\n doc = docs[n]\n if type(itm) == tuple:\n fget, fset, fdel = itm\n fgeti = fseti = fdeli = None\n if fget is not None: fgeti = partial(fget, i)\n if fset is not None: fseti = partial(fset, i)\n if fdel is not None: fdeli = partial(fdel, i)\n obj._add_property(n, fgeti, fseti, fdeli, doc)\n elif type(itm) == dict:\n o2 = self._build_obj(itm, doc, i)\n obj.__dict__[n] = o2\n elif hasattr(itm, \"__call__\"):\n obj._add_method(n, partial(itm, i), doc)\n obj._lock()\n return obj\n \n def _set_list(self, l):\n \"Set a list of allowable indicies as an associative array\"\n self._indicies = list(l)\n self._indicies_dict = get_index_dict(self._indicies)\n self._objs = list()\n for i in range(len(self._indicies)):\n self._objs.append(self._build_obj(self._props, self._docs, i))\n \n def __getitem__(self, key):\n if type(key) is slice:\n return self._objs[key]\n i = get_index(self._indicies_dict, key)\n return self._objs[i]\n\n def __iter__(self):\n return self._objs.__iter__()\n \n def __len__(self):\n return len(self._indicies)\n \n def count(self):\n return len(self._indicies)\n\n\nclass IviContainer(PropertyCollection):\n def __init__(self, *args, **kwargs):\n super(IviContainer, self).__init__(*args, **kwargs)\n\n def _add_attribute(self, name, attr, doc = None):\n cur_obj = self\n\n # iterate over name\n rest = name\n while len(rest) > 0:\n # split at first dot\n l = rest.split('.',1)\n base = l[0]\n rest = ''\n\n # save the rest\n if len(l) > 1:\n rest = l[1]\n\n # is it an indexed object?\n k = base.find('[')\n if k > 0:\n # if so, stop here and add an indexed property collection\n base = base[:k]\n cur_obj.__dict__.setdefault(base, IndexedPropertyCollection())\n cur_obj = cur_obj.__dict__[base]\n base = rest\n rest = ''\n else:\n # if not, add a property collection and keep going\n cur_obj.__dict__.setdefault(base, PropertyCollection())\n cur_obj = cur_obj.__dict__[base]\n\n if type(doc) == Doc:\n doc.name = name\n\n if cur_obj == self:\n if type(attr) == tuple:\n fget, fset, fdel = attr\n PropertyCollection._add_property(self, base, fget, fset, fdel, doc)\n else:\n PropertyCollection._add_method(self, base, attr, doc)\n else:\n if type(attr) == tuple:\n fget, fset, fdel = attr\n cur_obj._add_property(base, fget, fset, fdel, doc)\n else:\n cur_obj._add_method(base, attr, doc)\n\n def _add_method(self, name, f, doc = None):\n self._add_attribute(name, f, doc)\n\n def _add_property(self, name, fget, fset = None, fdel = None, doc = None):\n self._add_attribute(name, (fget, fset, fdel), doc)\n\n\nclass Doc(object):\n \"IVI documentation object\"\n def __init__(self, doc = '', cls = '', grp = '', section = '', name = ''):\n self.doc = trim_doc(doc)\n self.name = name\n self.cls = cls\n self.grp = grp\n self.section = section\n \n def render(self):\n txt = '.. attribute:: ' + self.name + '\\n\\n'\n if self.cls != '':\n txt += ' *IVI class ' + self.cls + \\\n ', capability group ' + self.cls + self.grp + \\\n ', section ' + self.section + '*\\n\\n'\n txt += '\\n'.join(' ' + x for x in self.doc.splitlines())\n txt += '\\n'\n return txt\n \n def __str__(self):\n return self.doc\n\n\nclass TraceY(object):\n \"Y trace object\"\n def __init__(self):\n self.average_count = 1\n self.y_increment = 0\n self.y_origin = 0\n self.y_reference = 0\n self.y_raw = None\n self.y_hole = None\n\n @property\n def y(self):\n y = np.array(self.y_raw)\n yf = y.astype(float)\n if self.y_hole is not None:\n yf[y == self.y_hole] = float('nan')\n return ((yf - self.y_reference) * self.y_increment) + self.y_origin\n\n def __getitem__(self, index):\n y = self.y_raw[index]\n if y == self.y_hole:\n y = float('nan')\n return ((y - self.y_reference) * self.y_increment) + self.y_origin\n\n def __iter__(self):\n return (float('nan') if y == self.y_hole else ((y - self.y_reference) * self.y_increment + self.y_origin) for i, y in enumerate(self.y_raw))\n\n def __len__(self):\n return len(self.y_raw)\n\n def count(self):\n return len(self.y_raw)\n\n\nclass TraceYT(TraceY):\n \"Y-T trace object\"\n def __init__(self):\n super(TraceYT, self).__init__()\n self.x_increment = 0\n self.x_origin = 0\n self.x_reference = 0\n\n @property\n def x(self):\n return ((np.arange(len(self.y_raw)) - self.x_reference) * self.x_increment) + self.x_origin\n\n @property\n def t(self):\n return self.x\n\n def __getitem__(self, index):\n y = self.y_raw[index]\n if y == self.y_hole:\n y = float('nan')\n return (((index - self.x_reference) * self.x_increment) + self.x_origin, ((y - self.y_reference) * self.y_increment) + self.y_origin)\n\n def __iter__(self):\n return ((((i - self.x_reference) * self.x_increment) + self.x_origin, float('nan') if y == self.y_hole else ((y - self.y_reference) * self.y_increment) + self.y_origin) for i, y in enumerate(self.y_raw))\n\n\ndef add_attribute(obj, name, attr, doc = None):\n IviContainer._add_attribute(obj, name, attr, doc)\n\n\ndef add_method(obj, name, f, doc = None):\n add_attribute(obj, name, f, doc)\n\n\ndef add_property(obj, name, fget, fset = None, fdel = None, doc = None):\n add_attribute(obj, name, (fget, fset, fdel), doc)\n\n\ndef add_group_capability(obj, cap):\n obj.__dict__.setdefault('_identity_group_capabilities', list())\n obj._identity_group_capabilities.insert(0, cap)\n\n\ndef build_ieee_block(data):\n \"Build IEEE block\"\n # IEEE block binary data is prefixed with #lnnnnnnnn\n # where l is length of n and n is the\n # length of the data\n # ex: #800002000 prefixes 2000 data bytes\n return str('#8%08d' % len(data)).encode('utf-8') + data\n\n \ndef decode_ieee_block(data):\n \"Decode IEEE block\"\n # IEEE block binary data is prefixed with #lnnnnnnnn\n # where l is length of n and n is the\n # length of the data\n # ex: #800002000 prefixes 2000 data bytes\n if len(data) == 0:\n return b''\n \n ind = 0\n c = '#'.encode('utf-8')\n while data[ind:ind+1] != c:\n ind += 1\n \n ind += 1\n l = int(data[ind:ind+1])\n ind += 1\n \n if (l > 0):\n num = int(data[ind:ind+l].decode('utf-8'))\n ind += l\n \n return data[ind:ind+num]\n else:\n return data[ind:]\n\n\ndef get_sig(sig):\n \"Parse various signal inputs into x and y components\"\n if type(sig) == tuple and len(sig) == 2:\n # tuple of two lists or arrays\n x, y = sig\n x = np.array(x)\n y = np.array(y)\n elif type(sig) == list and type(sig[0]) == tuple and len(sig[0]) == 2:\n # list of tuples\n x, y = zip(*sig)\n x = np.array(x)\n y = np.array(y)\n elif (type(sig) == np.ndarray or type(sig) == np.matrix) and len(sig.shape) == 2 and sig.shape[0] == 2:\n # 2D array, hieght 2\n x = np.array(sig[0])\n y = np.array(sig[1])\n elif (type(sig) == np.ndarray or type(sig) == np.matrix) and len(sig.shape) == 2 and sig.shape[1] == 2:\n # 2D array, width 2\n x = np.array(sig[:,0])\n y = np.array(sig[:,1])\n else:\n raise Exception('Unknown argument')\n \n if len(x) != len(y):\n raise Exception('Signals must be the same length!')\n \n return x, y\n\n\ndef rms(y):\n \"Calculate the RMS value of the signal\"\n return np.linalg.norm(y) / np.sqrt(y.size)\n\n\ndef trim_doc(docstring):\n if not docstring:\n return ''\n docstring = str(docstring)\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = 10000\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < 10000:\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)\n\ndef doc(obj=None, itm=None, docs=None, prefix=None):\n \"\"\"Python IVI documentation generator\"\"\"\n st = \"\"\n \n # add a dot to prefix when needed\n if prefix is None or len(prefix) == 0:\n prefix = ''\n elif not prefix[-1] == '.':\n prefix += '.'\n \n # if something passed in docs, iterate over it\n if docs is not None:\n for n in sorted(docs.keys()):\n d = docs[n]\n if type(d) == dict:\n # recurse into node\n st += doc(docs=d, prefix=prefix+n)\n else:\n # print leaf (method or property)\n st += prefix + n + \"\\n\"\n \n return st\n \n if itm is not None:\n # split off first component before the dot\n l = itm.split('.',1)\n n = l[0]\n r = ''\n \n # remove brackets\n k = n.find('[')\n if k > 0:\n n = n[:k]\n \n # if there is more left, need to recurse\n if len(l) > 1:\n r = l[1]\n \n # hand off to parent\n if type(obj) == dict and n in obj:\n return doc(obj[n], r, prefix=prefix+n)\n \n elif n in obj.__dict__:\n return doc(obj.__dict__[n], r, prefix=prefix+n)\n \n elif hasattr(obj, '_docs') and n in obj._docs:\n d = obj._docs[n]\n if type(d) == dict:\n return doc(d, r, prefix=prefix+n)\n \n else:\n \n d = None\n \n # return documentation if present\n if type(obj) == dict and n in obj:\n d = obj[n]\n \n elif hasattr(obj, '_docs') and n in obj._docs:\n d = obj._docs[n]\n\n elif hasattr(obj, n) and hasattr(getattr(obj, n), '_docs'):\n return doc(getattr(obj, n))\n\n if type(d) == Doc:\n return d\n elif type(d) == str:\n return trim_doc(d)\n \n return \"error\"\n \n \n if hasattr(obj, '__dict__'):\n # if obj has __dict__, iterate over it\n for n in sorted(obj.__dict__.keys()):\n o = obj.__dict__[n]\n \n # add brackets for indexed property collections\n extra = ''\n if type(o) == IndexedPropertyCollection:\n extra = '[]'\n \n if n == '_docs':\n # process documentation dict\n st += doc(docs=o, prefix=prefix)\n elif hasattr(o, '_docs'):\n # process object that contains a documentation dict\n st += doc(o, prefix=prefix+n)\n \n # if we got something, return it\n if len(st) > 0:\n return st\n \n return \"error\"\n\ndef help(obj=None, itm=None, complete=False, indent=0):\n \"\"\"Python IVI help system\"\"\"\n if complete:\n l = doc(obj).split('\\n')\n l = sorted(filter(None, l))\n for m in l:\n d = doc(obj, m)\n \n if type(d) == Doc:\n print(d.render())\n if type(d) == str:\n print((indent * ' ') + '.. attribute:: ' + m + '\\n')\n #print('-'*len(m)+'\\n')\n d = '\\n'.join(((indent + 3) * ' ') + x for x in d.splitlines())\n print(d)\n print('\\n')\n elif obj is not None:\n if itm is not None and type(itm) is not str:\n print(doc(itm))\n else:\n print(doc(obj, itm))\n else:\n print(trim_doc(\"\"\"\n Using Python IVI help\n ---------------------\n \n Use the help method to get documentation on IVI methods and properties. The\n IVI help system is a little different from the built-in Python help system.\n Here are some examples on how to use it correctly:\n\n This help method can be called with no parameters:\n\n import ivi\n instr = ivi.Driver()\n instr.help()\n\n This will print a list of all of the available methods and properties,\n like so:\n\n close\n initialized\n initialize\n driver_operation.cache\n driver_operation.clear_interchange_warnings\n driver_operation.driver_setup\n ...\n\n The higher level groups can also be passed to the help method:\n\n import ivi\n instr = ivi.Driver()\n instr.help(instr.identity)\n\n This will output everything inside of the sub group:\n\n get_supported_instrument_models\n get_group_capabilities\n specification_major_version\n ...\n\n Finally, individual methods and properties can be passed as strings:\n\n import ivi\n instr = ivi.Driver()\n instr.help(\"identity.supported_instrument_models\")\n\n This will result in the complete documentation:\n\n Returns a comma-separated list of names of instrument models with which\n the IVI specific driver is compatible. The string has no white space\n ...\n \"\"\"))\n\n\ndef list_resources():\n res = []\n\n if 'vxi11' in globals():\n # search for VXI11 devices\n try:\n res.extend(vxi11.list_resources())\n except:\n pass\n\n if 'usbtmc' in globals():\n # search for USBTMC devices\n try:\n res.extend(usbtmc.list_resources())\n except:\n pass\n\n return res\n\n\nclass DriverOperation(IviContainer):\n \"Inherent IVI methods for driver operation\"\n \n def __init__(self, *args, **kwargs):\n super(DriverOperation, self).__init__(*args, **kwargs)\n \n self._driver_operation_cache = True\n self._driver_operation_driver_setup = \"\"\n self._driver_operation_interchange_check = False\n self._driver_operation_logical_name = \"\"\n self._driver_operation_query_instrument_status = False\n self._driver_operation_range_check = True\n self._driver_operation_record_coercions = False\n self._driver_operation_io_resource_descriptor = \"\"\n self._driver_operation_simulate = False\n \n self._driver_operation_interchange_warnings = list()\n self._driver_operation_coercion_records = list()\n \n self._add_property('driver_operation.cache',\n self._get_driver_operation_cache,\n self._set_driver_operation_cache,\n None,\n \"\"\"\n If True, the specific driver caches the value of attributes, and the IVI\n specific driver keeps track of the current instrument settings so that it\n can avoid sending redundant commands to the instrument. If False, the\n specific driver does not cache the value of attributes.\n \n The default value is True. When the user opens an instrument session\n through an IVI class driver or uses a logical name to initialize a\n specific driver, the user can override this value by specifying a value in\n the IVI configuration store. The Initialize function allows the user to\n override both the default value and the value that the user specifies in\n the IVI configuration store.\n \"\"\")\n self._add_property('driver_operation.driver_setup',\n self._get_driver_operation_driver_setup,\n None,\n None,\n \"\"\"\n Returns the driver setup string that the user specified in the IVI\n configuration store when the instrument driver session was initialized or\n passes in the OptionString parameter of the Initialize function. Refer to\n Section 6.14, Initialize, for the restrictions on the format of the driver\n setup string.\n \n The string that this attribute returns does not have a predefined maximum\n length.\n \"\"\")\n self._add_property('driver_operation.interchange_check',\n self._get_driver_operation_interchange_check,\n self._set_driver_operation_interchange_check,\n None,\n \"\"\"\n If True, the specific driver performs interchangeability checking. If the\n Interchange Check attribute is enabled, the specific driver maintains a\n record of each interchangeability warning that it encounters. The user\n calls the Get Next Interchange Warning function to extract and delete the\n oldest interchangeability warning from the list. Refer to Section 6.11,\n Get Next Interchange Warning, Section 6.2, Clear Interchange Warnings,\n and Section 6.18, Reset Interchange Check, for more information. If False,\n the specific driver does not perform interchangeability checking.\n \n If the user opens an instrument session through an IVI class driver and\n the Interchange Check attribute is enabled, the IVI class driver may\n perform additional interchangeability checking. The IVI class driver\n maintains a list of the interchangeability warnings that it encounters.\n The user can retrieve both class driver interchangeability warnings and\n specific driver interchangeability warnings by calling the Get Next\n Interchange Warning function on the class driver session.\n \n If the IVI specific driver does not implement interchangeability checking,\n the specific driver returns the Value Not Supported error when the user\n attempts to set the Interchange Check attribute to True. If the specific\n driver does implement interchangeability checking and the user opens an\n instrument session through an IVI class driver, the IVI class driver\n accepts True as a valid value for the Interchange Check attribute even if\n the class driver does not implement interchangeability checking\n capabilities of its own.\n \n The default value is False. If the user opens an instrument session\n through an IVI class driver or initializes an IVI specific driver with a\n logical name, the user can override this value in the IVI configuration\n store. The Initialize function allows the user to override both the\n default value and the value that the userspecifies in the IVI\n configuration store.\n \"\"\")\n self._add_property('driver_operation.logical_name',\n self._get_driver_operation_logical_name,\n None,\n None,\n \"\"\"\n Returns the IVI logical name that the user passed to the Initialize\n function. If the user initialized the IVI specific driver directly and did\n not pass a logical name, then this attribute returns an empty string.\n Refer to IVI-3.5: Configuration Server Specification for restrictions on\n the format of IVI logical names.\n \n The string that this attribute returns contains a maximum of 256\n characters including the NULL character.\n \"\"\")\n self._add_property('driver_operation.query_instrument_status',\n self._get_driver_operation_query_instrument_status,\n self._set_driver_operation_query_instrument_status,\n None,\n \"\"\"\n If True, the IVI specific driver queries the instrument status at the end\n of each user operation. If False, the IVI specific driver does not query\n the instrument status at the end of each user operation. Querying the\n instrument status is very useful for debugging. After validating the\n program, the user can set this attribute to False to disable status\n checking and maximize performance. The user specifies this value for the\n entire IVI driver session.\n \n The default value is False. When the user opens an instrument session\n through an IVI class driver or uses a logical name to initialize an IVI\n specific driver, the user can override this value by specifying a value in\n the IVI configuration store. The Initialize function allows the user to\n override both the default value and the value that the user specifies in\n the IVI configuration store.\n \"\"\")\n self._add_property('driver_operation.range_check',\n self._get_driver_operation_range_check,\n self._set_driver_operation_range_check,\n None,\n \"\"\"\n If True, the IVI specific driver validates attribute values and function\n parameters. If False, the IVI specific driver does not validate attribute\n values and function parameters.\n \n If range check is enabled, the specific driver validates the parameter\n values that users pass to driver functions. Validating attribute values\n and function parameters is useful for debugging. After validating the\n program, the user can set this attribute to False to disable range\n checking and maximize performance. The default value is True. When the\n user opens an instrument session through an IVI class driver or uses a\n logical name to initialize an IVI specific driver, the user can override\n this value by specifying a value in the IVI configuration store. The\n Initialize function allows the user to override both the default value and\n the value that the user specifies in the IVI configuration store.\n \"\"\")\n self._add_property('driver_operation.record_coercions',\n self._get_driver_operation_record_coercions,\n self._set_driver_operation_record_coercions,\n None,\n \"\"\"\n If True, the IVI specific driver keeps a list of the value coercions it\n makes for ViInt32 and ViReal64 attributes. If False, the IVI specific\n driver does not keep a list of the value coercions it makes for ViInt32 and\n ViReal64 attributes.\n \n If the Record Value Coercions attribute is enabled, the specific driver\n maintains a record of each coercion. The user calls the Get Next Coercion\n Record function to extract and delete the oldest coercion record from the\n list. Refer to Section 6.10, Get Next Coercion Record, for more\n information.\n \n If the IVI specific driver does not implement coercion recording, the\n specific driver returns the Value Not Supported error when the user\n attempts to set the Record Value Coercions attribute to True.\n \n The default value is False. When the user opens an instrument session\n through an IVI class driver or uses a logical name to initialize a IVI\n specific driver, the user can override this value by specifying a value in\n the IVI configuration store. The Initialize function allows the user to\n override both the default value and the value that the user specifies in\n the IVI configuration store.\n \"\"\")\n self._add_property('driver_operation.io_resource_descriptor',\n self._get_driver_operation_io_resource_descriptor,\n None,\n None,\n \"\"\"\n Returns the resource descriptor that the user specified for the physical\n device. The user specifies the resource descriptor by editing the IVI\n configuration store or by passing a resource descriptor to the Initialize\n function of the specific driver. Refer to Section 6.14, Initialize, for the\n restrictions on the contents of the resource descriptor string.\n \n The string that this attribute returns contains a maximum of 256 characters\n including the NULL character.\n \"\"\")\n self._add_property('driver_operation.simulate',\n self._get_driver_operation_simulate,\n None,\n None,\n \"\"\"\n If True, the IVI specific driver simulates instrument driver I/O\n operations. If False, the IVI specific driver communicates directly with\n the instrument.\n \n If simulation is enabled, the specific driver functions do not perform\n instrument I/O. For output parameters that represent instrument data, the\n specific driver functions return simulated values.\n \n The default value is False. When the user opens an instrument session\n through an IVI class driver or uses a logical name to initialize an IVI\n specific driver, the user can override this value by specifying a value in\n the IVI configuration store. The Initialize function allows the user to\n override both the default value and the value that the user specifies in\n the IVI configuration store.\n \"\"\")\n self._add_method('driver_operation.clear_interchange_warnings',\n self._driver_operation_clear_interchange_warnings,\n \"\"\"\n This function clears the list of interchangeability warnings that the IVI\n specific driver maintains.\n \n When this function is called on an IVI class driver session, the function\n clears the list of interchangeability warnings that the class driver and\n the specific driver maintain.\n \n Refer to the Interchange Check attribute for more information on\n interchangeability checking.\n \"\"\")\n self._add_method('driver_operation.get_next_coercion_record',\n self._driver_operation_get_next_coercion_record,\n \"\"\"\n If the Record Value Coercions attribute is set to True, the IVI specific\n driver keeps a list of all value coercions it makes on integer and\n floating point attributes. This function obtains the coercion information\n associated with the IVI session. It retrieves and clears the oldest\n instance in which the specific driver coerced a value the user specified\n to another value.\n \n The function returns an empty string in the CoercionRecord parameter if no\n coercion records remain for the session.\n \n The coercion record string shall contain the following information:\n \n * The name of the attribute that was coerced. This can be the generic name,\n the COM property name, or the C defined constant.\n * If the attribute applies to a repeated capability, the name of the\n virtual or physical repeated capability identifier.\n * The value that the user specified for the attribute.\n * The value to which the attribute was coerced.\n \n A recommended format for the coercion record string is as follows::\n \n \" Attribute \" + <attribute name> + [\" on <repeated capability> \" +\n <repeated capability identifier>] + \" was coerced from \" +\n <desiredVal> + \" to \" + <coercedVal>\n \n .\n \n And example coercion record string is as follows::\n \n Attribute TKTDS500_ATTR_VERTICAL_RANGE on channel ch1 was coerced from\n 9.0 to 10.0.\n \n \"\"\")\n self._add_method('driver_operation.get_next_interchange_warning',\n self._driver_operation_get_next_interchange_warning,\n \"\"\"\n If the Interchange Check attribute is set to True, the IVI specific driver\n keeps a list of all interchangeability warnings that it encounters. This\n function returns the interchangeability warnings associated with the IVI\n session. It retrieves and clears the oldest interchangeability warning\n from the list. Interchangeability warnings indicate that using the\n application with a different instrument might cause different behavior.\n \n When this function is called on an IVI class driver session, it may return\n interchangeability warnings generated by the IVI class driver as well as\n interchangeability warnings generated by the IVI specific driver. The IVI\n class driver determines the relative order in which the IVI class driver\n warnings are returned in relation to the IVI specific driver warnings.\n \n The function returns an empty string in the InterchangeWarning parameter\n if no interchangeability warnings remain for the session.\n \n Refer to the Interchange Check attribute for more information on\n interchangeability checking.\n \"\"\")\n self._add_method('driver_operation.invalidate_all_attributes',\n self._driver_operation_invalidate_all_attributes,\n \"\"\"\n This function invalidates the cached values of all attributes for the\n session.\n \"\"\")\n self._add_method('driver_operation.reset_interchange_check',\n self._driver_operation_reset_interchange_check,\n \"\"\"\n This function resets the interchangeability checking algorithms of the IVI\n specific driver so that specific driver functions that execute prior to\n calling this function have no effect on whether future calls to the\n specific driver generate interchangeability warnings.\n \n When developing a complex test system that consists of multiple test\n modules, it is generally a good idea to design the test modules so that\n they can run in any order. To do so requires ensuring that each test\n module completely configures the state of each instrument it uses. If a\n particular test module does not completely configure the state of an\n instrument, the state of the instrument depends on the configuration from\n a previously executed test module. If the test modules execute in a\n different order, the behavior of the instrument and therefore the entire\n test module is likely to change. This change in behavior is generally\n instrument specific and represents an interchangeability problem.\n \n Users can use this function to test for such cases. By calling this\n function at the beginning of a test module, users can determine whether\n the test module has dependencies on the operation of previously executed\n test modules. Any interchangeability warnings that occur after the user\n calls this function indicate that the section of the test program that\n executes after this function and prior to the generation of the warning\n does not completely configure the instrument and that the user is likely\n to experience different behavior if the user changes the execution order\n of the test modules or if the user changes instruments.\n \n Note: This function does not clear interchangeability warnings from the\n list of interchangeability warnings. To guarantee that the Get Next\n Interchange Warning function returns interchangeability warnings that\n occur only after the program calls function, the user must clear the list\n of interchangeability warnings by calling the Clear Interchange Warnings\n function.\n \n Refer to the Interchange Check attribute for more information on\n interchangeability checking.\n \"\"\")\n \n \n def _get_driver_operation_cache(self):\n return self._driver_operation_cache\n \n def _set_driver_operation_cache(self, value):\n self._driver_operation_cache = bool(value)\n \n def _get_driver_operation_driver_setup(self):\n return self._driver_operation_driver_setup\n \n def _get_driver_operation_interchange_check(self):\n return self._driver_operation_interchange_check\n \n def _set_driver_operation_interchange_check(self, value):\n self._driver_operation_interchange_check = bool(value)\n \n def _get_driver_operation_logical_name(self):\n return self._driver_operation_logical_name\n \n def _get_driver_operation_query_instrument_status(self):\n return self._driver_operation_query_instrument_status\n \n def _set_driver_operation_query_instrument_status(self, value):\n self._driver_operation_query_instrument_status = bool(value)\n \n def _get_driver_operation_range_check(self):\n return self._driver_operation_range_check\n \n def _set_driver_operation_range_check(self, value):\n self._driver_operation_range_check = bool(value)\n \n def _get_driver_operation_record_coercions(self):\n return self._driver_operation_record_coercions\n \n def _set_driver_operation_record_coercions(self, value):\n self._driver_operation_record_coercions = bool(value)\n \n def _get_driver_operation_io_resource_descriptor(self):\n return self._driver_operation_io_resource_descriptor\n \n def _get_driver_operation_simulate(self):\n return self._driver_operation_simulate\n \n def _set_driver_operation_simulate(self, value):\n value = bool(value)\n if self._driver_operation_simulate and not value:\n raise SimulationStateException()\n self._driver_operation_simulate = value\n \n def _driver_operation_clear_interchange_warnings(self):\n self._driver_operation_interchange_warnings = list()\n \n def _driver_operation_get_next_coercion_record(self):\n if len(self._driver_operation_coercion_records) > 0:\n return self._driver_operation_coercion_records.pop()\n return \"\"\n \n def _driver_operation_get_next_interchange_warning(self):\n if len(self._driver_operation_interchange_warnings) > 0:\n return self._driver_operation_interchange_warnings.pop()\n return \"\"\n \n def _driver_operation_invalidate_all_attributes(self):\n pass\n\n def _driver_operation_reset_interchange_check(self):\n pass\n\n\nclass DriverIdentity(IviContainer):\n \"Inherent IVI methods for identification\"\n\n def __init__(self, *args, **kwargs):\n super(DriverIdentity, self).__init__(*args, **kwargs)\n \n self._identity_description = \"Base IVI Driver\"\n self._identity_identifier = \"\"\n self._identity_revision = \"\"\n self._identity_vendor = \"\"\n self._identity_instrument_manufacturer = \"Cannot query from instrument\"\n self._identity_instrument_model = \"Cannot query from instrument\"\n self._identity_instrument_firmware_revision = \"Cannot query from instrument\"\n self._identity_specification_major_version = 0\n self._identity_specification_minor_version = 0\n self._identity_supported_instrument_models = list()\n self.__dict__.setdefault('_identity_group_capabilities', list())\n \n self._add_property('identity.description',\n self._get_identity_description,\n None,\n None,\n \"\"\"\n Returns a brief description of the IVI software component.\n \n The string that this attribute returns has no maximum size.\n \"\"\")\n self._add_property('identity.identifier',\n self._get_identity_identifier,\n None,\n None,\n \"\"\"\n Returns the case-sensitive unique identifier of the IVI software\n component. The string that this attribute returns contains a maximum of 32\n characters including the NULL character.\n \"\"\")\n self._add_property('identity.revision',\n self._get_identity_revision,\n None,\n None,\n \"\"\"\n Returns version information about the IVI software component. Refer to\n Section 3.1.2.2, Additional Compliance Rules for Revision String\n Attributes, for additional rules regarding this attribute.\n \n The string that this attribute returns has no maximum size.\n \"\"\")\n self._add_property('identity.vendor',\n self._get_identity_vendor,\n None,\n None,\n \"\"\"\n Returns the name of the vendor that supplies the IVI software component.\n \n The string that this attribute returns has no maximum size.\n \"\"\")\n self._add_property('identity.instrument_manufacturer',\n self._get_identity_instrument_manufacturer,\n None,\n None,\n \"\"\"\n Returns the name of the manufacturer of the instrument. The IVI specific\n driver returns the value it queries from the instrument as the value of\n this attribute or a string indicating that it cannot query the instrument\n identity.\n \n In some cases, it is not possible for the specific driver to query the\n manufacturer of the instrument. This can occur when the Simulate attribute\n is set to True or if the instrument is not capable of returning the\n manufacturer. For these cases, the specific driver returns defined strings\n for this attribute. If the Simulate attribute is set to True, the specific\n driver returns \"Not available while simulating\" as the value of this\n attribute. If the instrument is not capable of returning the manufacturer\n and the Simulate attribute is set to False, the specific driver returns\n \"Cannot query from instrument\" as the value of this attribute.\n \n The string that this attribute returns does not have a predefined maximum\n length.\n \"\"\")\n self._add_property('identity.instrument_model',\n self._get_identity_instrument_model,\n None,\n None,\n \"\"\"\n Returns the model number or name of the physical instrument. The IVI\n specific driver returns the value it queries from the instrument or a\n string indicating that it cannot query the instrument identity.\n \n In some cases, it is not possible for the specific driver to query the\n model of the instrument. This can occur when the Simulate attribute is\n set to True or if the instrument is not capable of returning the model.\n For these cases, the specific driver returns defined strings for this\n attribute. If the Simulate attribute is set to True, the specific driver\n returns \"Not available while simulating\" as the value of this attribute.\n If the instrument is not capable of returning the model and the Simulate\n attribute is set to False, the specific driver returns \"Cannot query\n from instrument\" as the value of this attribute.\n \n The string that this attribute returns does not have a predefined maximum\n length.\n \"\"\")\n self._add_property('identity.instrument_firmware_revision',\n self._get_identity_instrument_firmware_revision,\n None,\n None,\n \"\"\"\n Returns an instrument specific string that contains the firmware\n revision information of the physical instrument. The IVI specific driver\n returns the value it queries from the instrument as the value of this\n attribute or a string indicating that it cannot query the instrument\n identity.\n \n In some cases, it is not possible for the specific driver to query the\n firmware revision of the instrument. This can occur when the Simulate\n attribute is set to True or if the instrument is not capable of returning\n the firmware revision. For these cases, the specific driver returns\n defined strings for this attribute. If the Simulate attribute is set to\n True, the specific driver returns \"Not available while simulating\" as the\n value of this attribute. If the instrument is not capable of returning the\n firmware version and the Simulate attribute is set to False, the specific\n driver returns \"Cannot query from instrument\" as the value of this\n attribute.\n \n The string that this attribute returns does not have a predefined maximum\n length.\n \"\"\")\n self._add_property('identity.specification_major_version',\n self._get_identity_specification_major_version,\n None,\n None,\n \"\"\"\n Returns the major version number of the class specification in accordance\n with which the IVI software component was developed. The value is a\n positive integer value.\n \n If the software component is not compliant with a class specification, the\n software component returns zero as the value of this attribute.\n \"\"\")\n self._add_property('identity.specification_minor_version',\n self._get_identity_specification_minor_version,\n None,\n None,\n \"\"\"\n Returns the minor version number of the class specification in accordance\n with which the IVI software component was developed. The value is a\n positive integer value.\n \n If the software component is not compliant with a class specification, the\n software component returns zero as the value of this attribute.\n \"\"\")\n self._add_property('identity.supported_instrument_models',\n self._get_identity_supported_instrument_models,\n None,\n None,\n \"\"\"\n Returns a comma-separated list of names of instrument models with which\n the IVI specific driver is compatible. The string has no white space\n except possibly embedded in the instrument model names. An example of a\n string that this attribute might return is \"TKTDS3012,TKTDS3014,TKTDS3016\".\n \n It is not necessary for the string to include the abbreviation for the\n manufacturer if it is the same for all models. In the example above, it is\n valid for the attribute to return the string \"TDS3012,TDS3014,TDS3016\".\n \n The string that this attribute returns does not have a predefined maximum\n length.\n \"\"\")\n self._add_property('identity.group_capabilities',\n self._get_identity_group_capabilities,\n None,\n None,\n \"\"\"\n Returns a comma-separated list that identifies the class capability groups\n that the IVI specific driver implements. The items in the list are\n capability group names that the IVI class specifications define. The\n string has no white space except for white space that might be embedded in\n a capability group name.\n \n If the IVI specific driver does not comply with an IVI class specification,\n the specific driver returns an empty string as the value of this attribute.\n \n The string that this attribute returns does not have a predefined maximum\n length.\n \"\"\")\n self._add_method('identity.get_group_capabilities',\n self._identity_get_group_capabilities,\n \"\"\"\n Returns a list of names of class capability groups that the IVI specific\n driver implements. The items in the list are capability group names that\n the IVI class specifications define. The list is returned as a list of\n strings.\n \n If the IVI specific driver does not comply with an IVI class specification,\n the specific driver returns an array with zero elements.\n \"\"\")\n self._add_method('identity.get_supported_instrument_models',\n self._identity_get_supported_instrument_models,\n \"\"\"\n Returns a list of names of instrument models with which the IVI specific\n driver is compatible. The list is returned as a list of strings. For\n example, this attribute might return the strings \"TKTDS3012\", \"TKTDS3014\",\n and \"TKTDS3016\" .\n \n It is not necessary for the string to include the abbreviation for the\n manufacturer if it is the same for all models. In the example above, it is\n valid for the attribute to return the strings \"TDS3012\", \"TDS3014\", and\n \"TDS3016\".\n \"\"\")\n \n \n def _add_group_capability(self, name):\n self.__dict__.setdefault('_identity_group_capabilities', list())\n self._identity_group_capabilities.insert(0, name)\n \n def _get_identity_description(self):\n return self._identity_description\n \n def _get_identity_identifier(self):\n return self._identity_identifier\n \n def _get_identity_revision(self):\n return self._identity_revision\n \n def _get_identity_vendor(self):\n return self._identity_vendor\n \n def _get_identity_instrument_manufacturer(self):\n return self._identity_instrument_manufacturer\n \n def _get_identity_instrument_model(self):\n return self._identity_instrument_model\n \n def _get_identity_instrument_firmware_revision(self):\n return self._identity_instrument_firmware_revision\n \n def _get_identity_specification_major_version(self):\n return self._identity_specification_major_version\n \n def _get_identity_specification_minor_version(self):\n return self._identity_specification_minor_version\n \n def _get_identity_supported_instrument_models(self):\n return \",\".join(self._identity_supported_instrument_models)\n \n def _get_identity_group_capabilities(self):\n return \",\".join(self._identity_group_capabilities)\n \n def _identity_get_group_capabilities(self):\n return self._identity_group_capabilities\n \n def _identity_get_supported_instrument_models(self):\n return self._identity_supported_instrument_models\n\n\nclass DriverUtility(IviContainer):\n \"Inherent IVI utility methods\"\n\n def __init__(self, *args, **kwargs):\n super(DriverUtility, self).__init__(*args, **kwargs)\n \n self._add_method('utility.disable',\n self._utility_disable,\n \"\"\"\n The Disable operation places the instrument in a quiescent state as\n quickly as possible. In a quiescent state, an instrument has no or minimal\n effect on the external system to which it is connected. The Disable\n operation might be similar to the Reset operation in that it places the\n instrument in a known state. However, the Disable operation does not\n perform the other operations that the Reset operation performs such as\n configuring the instrument options on which the IVI specific driver\n depends. For some instruments, the disable function may do nothing.\n \n The IVI class specifications define the exact behavior of this function\n for each instrument class. Refer to the IVI class specifications for more\n information on the behavior of this function.\n \"\"\")\n self._add_method('utility.error_query',\n self._utility_error_query,\n \"\"\"\n Queries the instrument and returns instrument specific error information.\n \n Generally, the user calls this function after another function in the IVI\n driver returns the Instrument Status error. The IVI specific driver\n returns the Instrument Status error when the instrument indicates that it\n encountered an error and its error queue is not empty. Error Query\n extracts an error out of the instrument's error queue.\n \n For instruments that have status registers but no error queue, the IVI\n specific driver emulates an error queue in software.\n \n The method returns a tuple containing the error code and error message.\n \"\"\")\n self._add_method('utility.lock_object',\n self._utility_lock_object,\n \"\"\"\n This function obtains a multithread lock for this instance of the driver.\n Before it does so, Lock Session waits until all other execution threads\n have released their locks or for the length of time specified by the\n maximum time parameter, whichever come first. The type of lock obtained\n depends upon the parameters passed to the specific driver constructor.\n \n The user can use Lock Session with IVI specific drivers to protect a\n section of code that requires exclusive access to the instrument. This\n occurs when the user takes multiple actions that affect the instrument\n and the user wants to ensure that other execution threads do not disturb\n the instrument state until all the actions execute. For example, if the\n user sets various instrument attributes and then triggers a measurement,\n the user must ensure no other execution thread modifies the attribute\n values until the user finishes taking the measurement. \n \n It is important to note that this lock is not related to I/O locks such as\n the VISA resource locking mechanism.\n \n The user can safely make nested calls to Lock Session within the same\n thread. To completely unlock the session, the user must balance each call\n to Lock Session with a call to Unlock Session. Calls to Lock Session must\n always obtain the same lock that is used internally by the IVI driver to\n guard individual method calls.\n \"\"\")\n self._add_method('utility.reset',\n self._utility_reset,\n \"\"\"\n This function performs the following actions:\n \n * Places the instrument in a known state. In an IEEE 488.2 instrument, the\n Reset function sends the command string ``*RST`` to the instrument.\n * Configures instrument options on which the IVI specific driver depends.\n A specific driver might enable or disable headers or enable binary mode\n for waveform transfers.\n \n The user can either call the Reset function separately or specify that it\n be called from the Initialize function. The Initialize function performs\n additional operations after performing the reset operation to place the\n instrument in a state more suitable for interchangeable programming. To\n reset the device and perform these additional operations, call the Reset\n With Defaults function instead of the Reset function.\n \"\"\")\n self._add_method('utility.reset_with_defaults',\n self._utility_reset_with_defaults,\n \"\"\"\n The Reset With Defaults function performs the same operations that the\n Reset function performs and then performs the following additional\n operations in the specified order:\n \n * Disables the class extension capability groups that the IVI specific\n driver implements.\n * If the class specification with which the IVI specific driver is\n compliant defines initial values for attributes, this function sets\n those attributes to the initial values that the class specification\n defines.\n * Configures the initial settings for the specific driver and instrument\n based on the information retrieved from the IVI configuration store when\n the instrument driver session was initialized.\n \n Notice that the Initialize function also performs these functions. To\n place the instrument and the IVI specific driver in the exact same state\n that they attain when the user calls the Initialize function, the user\n must first call the Close function and then the Initialize function.\n \"\"\")\n self._add_method('utility.self_test',\n self._utility_self_test,\n \"\"\"\n Causes the instrument to perform a self test. Self Test waits for the\n instrument to complete the test. It then queries the instrument for the\n results of the self test and returns the results to the user.\n \n If the instrument passes the self test, this function returns the tuple::\n \n (0, 'Self test passed')\n \n Otherwise, the function returns a tuple of the result code and message.\n \"\"\")\n self._add_method('utility.unlock_object',\n self._utility_unlock_object,\n \"\"\"\n This function releases a lock that the Lock Session function acquires.\n \n Refer to Lock Session for additional information on IVI session locks.\n \"\"\")\n \n \n def _utility_disable(self):\n pass\n \n def _utility_error_query(self):\n error_code = 0\n error_message = \"No error\"\n return (error_code, error_message)\n \n def _utility_lock_object(self):\n pass\n \n def _utility_reset(self):\n pass\n \n def _utility_reset_with_defaults(self):\n self.utility_reset()\n \n def _utility_self_test(self):\n code = 0\n message = \"Self test passed\"\n return (code, message)\n \n def _utility_unlock_object(self):\n pass\n\n\nclass Driver(DriverOperation, DriverIdentity, DriverUtility):\n \"Inherent IVI methods for all instruments\"\n\n def __init__(self, resource = None, id_query = False, reset = False, *args, **kwargs):\n # process out args for initialize\n kw = {}\n for k in ('range_check', 'query_instr_status', 'cache', 'simulate', 'record_coercions',\n 'interchange_check', 'driver_setup', 'prefer_pyvisa'):\n if k in kwargs:\n kw[k] = kwargs.pop(k)\n \n self._interface = None\n self._initialized = False\n self.__dict__.setdefault('_instrument_id', '')\n self._cache_valid = dict()\n \n super(Driver, self).__init__(*args, **kwargs)\n \n self._add_method('initialize',\n self._initialize,\n \"\"\"\n The user must call the Initialize function prior to calling other IVI\n driver functions that access the instrument. The Initialize function is\n called automatically by the constructor if a resource string is passed as\n the first argument to the constructor. \n \n If simulation is disabled when the user calls the Initialize function, the\n function performs the following actions:\n \n * Opens and configures an I/O session to the instrument.\n * If the user passes True for the IdQuery parameter, the function queries\n the instrument for its ID and verifies that the IVI specific driver\n supports the particular instrument model. If the instrument cannot\n return its ID, the specific driver returns the ID Query Not Supported\n warning.\n * If the user passes True for the Reset parameter, the function places the\n instrument in a known state. In an IEEE 488.2 instrument, the function\n sends the command string \"*RST\" to the instrument. If the instrument\n cannot perform a reset, the IVI specific driver returns the Reset Not\n Supported warning. \n * Configures instrument options on which the IVI specific driver depends.\n For example, a specific driver might enable or disable headers or enable\n binary mode for waveform transfers.\n * Performs the following operations in the given order:\n 1. Disables the class extension capability groups that the IVI\n specific driver does not implement.\n 2. If the class specification with which the IVI specific driver is\n compliant defines initial values for attributes, this function sets\n the attributes to the values that the class specification defines.\n 3. If the ResourceName parameter is a logical name, the IVI specific\n driver configures the initial settings for the specific driver and\n instrument based on the configuration of the logical name in the IVI \n configuration store.\n \n If simulation is enabled when the user calls the Initialize function, the\n function performs the following actions:\n \n * If the user passes True for the IdQuery parameter and the instrument\n cannot return its ID, the IVI specific driver returns the ID Query Not\n Supported warning.\n * If the user passes True for the Reset parameter and the instrument\n cannot perform a reset, the IVI specific driver returns the Reset Not\n Supported warning.\n * If the ResourceName parameter is a logical name, the IVI specific driver\n configures the initial settings for the specific driver based on the\n configuration of the logical name in the IVI configuration store.\n \n Some instrument driver operations require or take into account information\n from the IVI configuration store. Examples of such information are virtual\n repeated capability name mappings and the value of certain inherent\n attributes. An IVI driver shall retrieve all the information for a session\n from the IVI configuration store during the Initialization function. The\n IVI driver shall not read any information from the IVI configuration store\n for a session after the Initialization function completes. Refer to\n Section 3.2.3, Instantiating the Right Configuration Store From Software\n Modules, of IVI-3.5: Configuration Server Specification for details on how\n to correctly instantiate the configuration store.\n \n The ResourceName parameter must contain either a logical name that is\n defined in the IVI configuration store or an instrument specific string\n that identifies the I/O address of the instrument, such as a VISA resource\n descriptor string. Refer to IVI-3.5: Configuration Server Specification\n for restrictions on the format of IVI logical names. Refer to the\n VXIplug&play specifications for the grammar of VISA resource descriptor\n strings. \n \n Example resource strings::\n \n 'TCPIP::10.0.0.1::INSTR'\n 'TCPIP0::10.0.0.1::INSTR'\n 'TCPIP::10.0.0.1::gpib,5::INSTR'\n 'TCPIP0::10.0.0.1::gpib,5::INSTR'\n 'TCPIP0::10.0.0.1::usb0::INSTR'\n 'TCPIP0::10.0.0.1::usb0[1234::5678::MYSERIAL::0]::INSTR'\n 'USB::1234::5678::INSTR'\n 'USB::1234::5678::SERIAL::INSTR'\n 'USB0::0x1234::0x5678::INSTR'\n 'USB0::0x1234::0x5678::SERIAL::INSTR'\n 'GPIB::10::INSTR'\n 'GPIB0::10::INSTR'\n 'ASRL1::INSTR'\n 'ASRL::COM1,9600,8n1::INSTR'\n 'ASRL::/dev/ttyUSB0,9600::INSTR'\n 'ASRL::/dev/ttyUSB0,9600,8n1::INSTR'\n \n The user can use additional parameters to specify the initial values of\n certain IVI inherent attributes for the session. The following table lists\n the inherent attributes that the user can set through these named\n parameters. The user does not have to specify all or any of the\n attributes. If the user does not specify the initial value of an inherent\n attribute, the initial value of the attribute depends on the value of the\n ResourceName parameter:\n \n * If the ResourceName parameter contains an IVI logical name, the IVI\n specific driver configures the initial settings based on the\n configuration of the logical name in the IVI configuration store.\n * If the ResourceName parameter contains a resource descriptor string that\n identifies the I/O address of the instrument, the IVI specific driver\n sets inherent attributes to their default initial values. The following\n table shows the default initial value for each attribute.\n \n The following table lists the IVI inherent attributes that the user can\n set, their default initial values, and the name that represents each\n attribute. These options are passed to the initialize function or the\n constructor as key-value pairs. \n \n +-------------------------+----------------------+---------------------+\n | Attribute | Default Inital Value | Options String Name |\n +=========================+======================+=====================+\n | Range Check | True | range_check |\n +-------------------------+----------------------+---------------------+\n | Query Instrument Status | False | query_instr_status |\n +-------------------------+----------------------+---------------------+\n | Cache | True | cache |\n +-------------------------+----------------------+---------------------+\n | Simulate | False | simulate |\n +-------------------------+----------------------+---------------------+\n | Record Value Coercions | False | record_coercions |\n +-------------------------+----------------------+---------------------+\n | Interchange Check | False | interchange_check |\n +-------------------------+----------------------+---------------------+\n | Driver Setup | '' | driver_setup |\n +-------------------------+----------------------+---------------------+\n | Prefer PyVISA | False | prefer_pyvisa |\n +-------------------------+----------------------+---------------------+\n \n Each IVI specific driver defines it own meaning and valid values for the\n Driver Setup attribute. Many specific drivers ignore the value of the\n Driver Setup attribute. Other specific drivers use the Driver Setup string\n to configure instrument specific features at initialization. For example,\n if a specific driver supports a family of instrument models, the driver\n can use the Driver Setup attribute to allow the user to specify a\n particular instrument model to simulate.\n \n If the user attempts to initialize the instrument a second time without\n first calling the Close function, the Initialize function returns the\n Already Initialized error.\n \"\"\")\n self._add_property('initialized',\n self._get_initialized,\n None,\n None,\n \"\"\"\n Returns a value that indicates whether the IVI specific driver is in the\n initialized state. After the specific driver is instantiated and before\n the Initialize function successfully executes, this attribute returns\n False. After the Initialize function successfully executes and prior to\n the execution of the Close function, this attribute returns True. After\n the Close function executes, this attribute returns False. \n \n The Initialized attribute is one of the few IVI specific driver attributes\n that can be accessed while the specific driver is not in the initialized\n state. All the attributes of an IVI specific driver that can be accessed\n while the specific driver is not in the initialized state are listed below.\n \n * Component Class Spec Major Version\n * Component Class Spec Minor Version\n * Component Description\n * Component Prefix\n * Component Identifier\n * Component Revision\n * Component Vendor\n * Initialized\n * Supported Instrument Models\n \"\"\")\n self._add_method('close',\n self._close,\n \"\"\"\n When the user finishes using a Python IVI driver, the user should call\n either the Close method or __del__. Note that __del__ will call close\n automatically. \n \n This function also does the following:\n \n * Prevents the user from calling other functions in the driver that\n access the instrument until the user calls the Initialize function\n again.\n * May deallocate internal resources used by the IVI session.\n \"\"\")\n\n # inherit prefer_pyvisa from global setting\n self._prefer_pyvisa = _prefer_pyvisa\n\n # call initialize if resource string or other args present\n self._initialized_from_constructor = False\n if resource is not None or len(kw) > 0:\n self._initialized_from_constructor = True\n self.initialize(resource, id_query, reset, **kw)\n\n def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):\n \"Opens an I/O session to the instrument.\"\n\n # decode options\n for op in keywargs:\n val = keywargs[op]\n if op == 'range_check':\n self._driver_operation_range_check = bool(val)\n elif op == 'query_instr_status':\n self._driver_operation_query_instrument_status = bool(val)\n elif op == 'cache':\n self._driver_operation_cache = bool(val)\n elif op == 'simulate':\n self._driver_operation_simulate = bool(val)\n elif op == 'record_coercions':\n self._driver_operation_record_coercions = bool(val)\n elif op == 'interchange_check':\n self._driver_operation_interchange_check = bool(val)\n elif op == 'driver_setup':\n self._driver_operation_driver_setup = val\n elif op == 'prefer_pyvisa':\n self._prefer_pyvisa = bool(val)\n else:\n raise UnknownOptionException('Invalid option')\n\n # process resource\n if self._driver_operation_simulate:\n print(\"Simulating; ignoring resource\")\n elif resource is None:\n raise IOException('No resource specified!')\n elif type(resource) == str:\n # parse VISA resource string\n # valid resource strings:\n # TCPIP::10.0.0.1::INSTR\n # TCPIP0::10.0.0.1::INSTR\n # TCPIP::10.0.0.1::gpib,5::INSTR\n # TCPIP0::10.0.0.1::gpib,5::INSTR\n # TCPIP0::10.0.0.1::usb0::INSTR\n # TCPIP0::10.0.0.1::usb0[1234::5678::MYSERIAL::0]::INSTR\n # USB::1234::5678::INSTR\n # USB::1234::5678::SERIAL::INSTR\n # USB0::0x1234::0x5678::INSTR\n # USB0::0x1234::0x5678::SERIAL::INSTR\n # USB0::0x1234::0x5678::SERIAL::0::INSTR\n # GPIB::10::INSTR\n # GPIB0::10::INSTR\n # ASRL1::INSTR\n # ASRL::COM1,9600,8n1::INSTR\n # ASRL::/dev/ttyUSB0,9600::INSTR\n # ASRL::/dev/ttyUSB0,9600,8n1::INSTR\n m = re.match('^(?P<prefix>(?P<type>TCPIP|USB|GPIB|ASRL)\\d*)(::(?P<arg1>[^\\s:]+))?(::(?P<arg2>[^\\s:]+(\\[.+\\])?))?(::(?P<arg3>[^\\s:]+))?(::(?P<arg4>[^\\s:]+))?(::(?P<suffix>INSTR))$', resource, re.I)\n if m is None:\n if 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n else:\n raise IOException('Invalid resource string')\n else:\n res_type = m.group('type').upper()\n res_prefix = m.group('prefix')\n res_arg1 = m.group('arg1')\n res_arg2 = m.group('arg2')\n res_arg3 = m.group('arg3')\n res_suffix = m.group('suffix')\n\n if res_type == 'TCPIP':\n # TCP connection\n if self._prefer_pyvisa and 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n elif 'vxi11' in globals():\n # connect with VXI-11\n self._interface = vxi11.Instrument(resource)\n elif 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n else:\n raise IOException('Cannot use resource type %s' % res_type)\n elif res_type == 'USB':\n # USB connection\n if self._prefer_pyvisa and 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n elif 'usbtmc' in globals():\n # connect with USBTMC\n self._interface = usbtmc.Instrument(resource)\n elif 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n else:\n raise IOException('Cannot use resource type %s' % res_type)\n elif res_type == 'GPIB':\n # GPIB connection\n if self._prefer_pyvisa and 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n elif 'linuxgpib' in globals():\n # connect with linux-gpib\n self._interface = linuxgpib.LinuxGpibInstrument(resource)\n elif 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n else:\n raise IOException('Cannot use resource type %s' % res_type)\n elif res_type == 'ASRL':\n # Serial connection\n if self._prefer_pyvisa and 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n elif 'pyserial' in globals():\n # connect with PySerial\n self._interface = pyserial.SerialInstrument(resource)\n elif 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n else:\n raise IOException('Cannot use resource type %s' % res_type)\n\n elif 'pyvisa' in globals():\n # connect with PyVISA\n self._interface = pyvisa.PyVisaInstrument(resource)\n else:\n raise IOException('Unknown resource type %s' % res_type)\n\n self._driver_operation_io_resource_descriptor = resource\n\n elif 'vxi11' in globals() and resource.__class__ == vxi11.Instrument:\n # Got a vxi11 instrument, can use it as is\n self._interface = resource\n elif 'usbtmc' in globals() and resource.__class__ == usbtmc.Instrument:\n # Got a usbtmc instrument, can use it as is\n self._interface = resource\n elif set(['read_raw', 'write_raw']).issubset(set(resource.__class__.__dict__)):\n # has read_raw and write_raw, so should be a usable interface\n self._interface = resource\n else:\n # don't have a usable resource\n raise IOException('Invalid resource')\n\n self.driver_operation.invalidate_all_attributes()\n\n self._initialized = True\n\n\n def _close(self):\n \"Closes an IVI session\"\n if self._interface:\n try:\n self._interface.close()\n except:\n pass\n\n self._interface = None\n self._initialized = False\n\n\n def _get_initialized(self):\n \"Returnes initialization state of driver\"\n return self._initialized\n \n def _get_cache_tag(self, tag=None, skip=1):\n if tag is None:\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n tag = stack[start][3] \n \n if tag[0:4] == \"_get\": tag = tag[4:]\n if tag[0:4] == \"_set\": tag = tag[4:]\n if tag[0] == \"_\": tag = tag[1:]\n \n return tag\n\n def _get_cache_valid(self, tag=None, index=-1, skip_disable=False):\n if not skip_disable and not self._driver_operation_cache:\n return False\n tag = self._get_cache_tag(tag, 2)\n if index >= 0:\n tag = tag + '_%d' % index\n try:\n return self._cache_valid[tag]\n except KeyError:\n self._cache_valid[tag] = False\n return False\n\n def _set_cache_valid(self, valid=True, tag=None, index=-1):\n tag = self._get_cache_tag(tag, 2)\n if index >= 0:\n tag = tag + '_%d' % index\n self._cache_valid[tag] = valid\n\n def _driver_operation_invalidate_all_attributes(self):\n self._cache_valid = dict()\n\n def _write_raw(self, data):\n \"Write binary data to instrument\"\n if self._driver_operation_simulate:\n print(\"[simulating] Call to write_raw\")\n return\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n self._interface.write_raw(data)\n \n def _read_raw(self, num=-1):\n \"Read binary data from instrument\"\n if self._driver_operation_simulate:\n print(\"[simulating] Call to read_raw\")\n return b''\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n return self._interface.read_raw(num)\n \n def _ask_raw(self, data, num=-1):\n \"Write then read binary data\"\n if self._driver_operation_simulate:\n print(\"[simulating] Call to ask_raw\")\n return b''\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n try:\n return self._interface.ask_raw(data, num)\n except AttributeError:\n # if interface does not implement ask_raw, emulate it\n self._write_raw(data)\n return self._read_raw(num)\n \n def _write(self, data, encoding = 'utf-8'):\n \"Write string to instrument\"\n if self._driver_operation_simulate:\n print(\"[simulating] Write (%s) '%s'\" % (encoding, data))\n return\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n try:\n self._interface.write(data, encoding)\n except AttributeError:\n if type(data) is tuple or type(data) is list:\n # recursive call for a list of commands\n for data_i in data:\n self._write(data_i, encoding)\n return\n\n self._write_raw(str(data).encode(encoding))\n \n def _read(self, num=-1, encoding = 'utf-8'):\n \"Read string from instrument\"\n if self._driver_operation_simulate:\n print(\"[simulating] Read (%s)\" % encoding)\n return ''\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n try:\n return self._interface.read(num, encoding)\n except AttributeError:\n return self._read_raw(num).decode(encoding).rstrip('\\r\\n')\n \n def _ask(self, data, num=-1, encoding = 'utf-8'):\n \"Write then read string\"\n if self._driver_operation_simulate:\n print(\"[simulating] Ask (%s) '%s'\" % (encoding, data))\n return ''\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n try:\n return self._interface.ask(data, num, encoding)\n except AttributeError:\n # if interface does not implement ask, emulate it\n if type(data) is tuple or type(data) is list:\n # # recursive call for a list of commands\n val = list()\n for data_i in data:\n val.append(self._ask(data_i, num, encoding))\n return val\n\n self._write(data, encoding)\n return self._read(num, encoding)\n \n def _ask_for_values(self, msg, delim=',', converter=float, array=True):\n '''\n write then read a list or array of data\n \n Parameters\n --------------\n msg : str\n message to write to instrument\n delim : str\n delimeter\n converter : type\n a datatype used to typecase the elements in the returned list\n array: bool\n convert the output to a numpy array \n \n '''\n s = self._ask(msg)\n s_split = s.split(delim)\n out = map(converter, s_split)\n if array:\n out = np.array(out)\n return out\n \n def _read_stb(self):\n \"Read status byte\"\n if self._driver_operation_simulate:\n print(\"[simulating] Read status\")\n return 0\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n try:\n return self._interface.read_stb()\n except (AttributeError, NotImplementedError):\n return int(self._ask(\"*STB?\"))\n \n def _trigger(self):\n \"Device trigger\"\n if self._driver_operation_simulate:\n print(\"[simulating] Trigger\")\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n try:\n self._interface.trigger()\n except (AttributeError, NotImplementedError):\n self._write(\"*TRG\")\n \n def _clear(self):\n \"Device clear\"\n if self._driver_operation_simulate:\n print(\"[simulating] Clear\")\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n try:\n return self._interface.clear()\n except (AttributeError, NotImplementedError):\n self._write(\"*CLS\")\n \n def _remote(self):\n \"Device set remote\"\n if self._driver_operation_simulate:\n print(\"[simulating] Remote\")\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n return self._interface.remote()\n \n def _local(self):\n \"Device set local\"\n if self._driver_operation_simulate:\n print(\"[simulating] Local\")\n if not self._initialized or self._interface is None:\n raise NotInitializedException()\n return self._interface.local()\n \n def _read_ieee_block(self):\n \"Read IEEE block\"\n # IEEE block binary data is prefixed with #lnnnnnnnn\n # where l is length of n and n is the\n # length of the data\n # ex: #800002000 prefixes 2000 data bytes\n\n ch = self._read_raw(1)\n\n if len(ch) == 0:\n return b''\n\n while ch != b'#':\n ch = self._read_raw(1)\n\n l = int(self._read_raw(1))\n if l > 0:\n num = int(self._read_raw(l))\n raw_data = self._read_raw(num)\n else:\n raw_data = self._read_raw()\n\n return raw_data\n \n def _ask_for_ieee_block(self, data, encoding = 'utf-8'):\n \"Write string then read IEEE block\"\n self._write(data, encoding)\n return self._read_ieee_block()\n\n def _write_ieee_block(self, data, prefix = None, encoding = 'utf-8'):\n \"Write IEEE block\"\n # IEEE block binary data is prefixed with #lnnnnnnnn\n # where l is length of n and n is the\n # length of the data\n # ex: #800002000 prefixes 2000 data bytes\n \n block = b''\n \n if type(prefix) == str:\n block = prefix.encode(encoding)\n elif type(prefix) == bytes:\n block = prefix\n \n block = block + build_ieee_block(data)\n \n self._write_raw(block)\n \n def doc(self, obj=None, itm=None, docs=None, prefix=None):\n \"\"\"Python IVI documentation generator\"\"\"\n \n # need an obj, if none specified, use self\n if obj is None:\n obj = self\n \n # if first arg is a string, put in itm and use self for obj\n if type(obj) == str:\n itm = obj\n obj = self\n \n return doc(obj, itm, docs, prefix)\n \n def help(self, itm=None, complete=False, indent=0):\n \"\"\"Python IVI help system\"\"\"\n return help(self, itm, complete, indent)\n \n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.sqrt" ] ]
pantheon5100/DeACL
[ "32cf8182f2ef271fab7453bc5cc1ddea6dfa3c22" ]
[ "solo/methods/mocov2_distillation_AT_dual_bn.py" ]
[ "# Copyright 2021 solo-learn development team.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\n# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport argparse\nfrom typing import Any, Dict, List, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom solo.losses.moco import moco_loss_func\n# from solo.methods.base import BaseDistillationMethod\nfrom solo.methods.base_for_AT_dual_bn import BaseDistillationATMethodDualBN\nfrom solo.utils.momentum import initialize_momentum_params\nfrom solo.utils.misc import gather\n\nfrom torchvision import models\nfrom solo.utils.metrics import accuracy_at_k, weighted_mean\n\n\nclass MoCoV2KDATDualBN(BaseDistillationATMethodDualBN):\n queue: torch.Tensor\n\n def __init__(\n self,\n proj_output_dim: int,\n proj_hidden_dim: int,\n temperature: float,\n queue_size: int,\n **kwargs\n ):\n \"\"\"Implements MoCo V2+ (https://arxiv.org/abs/2011.10566).\n\n Args:\n proj_output_dim (int): number of dimensions of projected features.\n proj_hidden_dim (int): number of neurons of the hidden layers of the projector.\n temperature (float): temperature for the softmax in the contrastive loss.\n queue_size (int): number of samples to keep in the queue.\n \"\"\"\n\n super().__init__(**kwargs)\n\n self.temperature = temperature\n self.queue_size = queue_size\n\n # projector\n self.projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_output_dim),\n )\n\n # Make StudentNetwork have same weight with Teacher\n self.projector.load_state_dict(self.projector_state_dict)\n\n\n # momentum projector\n self.momentum_projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_output_dim),\n )\n # initialize_momentum_params(self.projector, self.momentum_projector)\n self.momentum_projector.load_state_dict(self.projector_state_dict)\n\n # create the queue\n self.register_buffer(\"queue\", torch.randn(2, proj_output_dim, queue_size))\n self.queue = nn.functional.normalize(self.queue, dim=1)\n self.register_buffer(\"queue_ptr\", torch.zeros(1, dtype=torch.long))\n\n # To remove both projector\n self.projector = nn.Identity()\n self.momentum_projector = nn.Identity()\n\n @staticmethod\n def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parent_parser = super(MoCoV2KDATDualBN, MoCoV2KDATDualBN).add_model_specific_args(parent_parser)\n parser = parent_parser.add_argument_group(\"mocov2_kd_at\")\n\n # projector\n parser.add_argument(\"--proj_output_dim\", type=int, default=128)\n parser.add_argument(\"--proj_hidden_dim\", type=int, default=2048)\n\n # parameters\n parser.add_argument(\"--temperature\", type=float, default=0.1)\n\n # queue settings\n parser.add_argument(\"--queue_size\", default=65536, type=int)\n\n # parser.add_argument(\"--limit_val_batches\", type=float, default=0.2)\n\n return parent_parser\n\n @property\n def learnable_params(self) -> List[dict]:\n \"\"\"Adds projector parameters together with parent's learnable parameters.\n\n Returns:\n List[dict]: list of learnable parameters.\n \"\"\"\n\n extra_learnable_params = [{\"params\": self.projector.parameters()}]\n return super().learnable_params + extra_learnable_params\n\n @property\n def momentum_pairs(self) -> List[Tuple[Any, Any]]:\n \"\"\"Adds (projector, momentum_projector) to the parent's momentum pairs.\n\n Returns:\n List[Tuple[Any, Any]]: list of momentum pairs.\n \"\"\"\n\n extra_momentum_pairs = [(self.projector, self.momentum_projector)]\n return super().momentum_pairs + extra_momentum_pairs\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, keys: torch.Tensor):\n \"\"\"Adds new samples and removes old samples from the queue in a fifo manner.\n\n Args:\n keys (torch.Tensor): output features of the momentum backbone.\n \"\"\"\n\n batch_size = keys.shape[1]\n ptr = int(self.queue_ptr) # type: ignore\n assert self.queue_size % batch_size == 0 # for simplicity\n\n # replace the keys at ptr (dequeue and enqueue)\n keys = keys.permute(0, 2, 1)\n self.queue[:, :, ptr : ptr + batch_size] = keys\n ptr = (ptr + batch_size) % self.queue_size # move pointer\n self.queue_ptr[0] = ptr # type: ignore\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:\n \"\"\"Performs the forward pass of the online backbone and projector.\n\n Args:\n X (torch.Tensor): a batch of images in the tensor format.\n\n Returns:\n Dict[str, Any]: a dict containing the outputs of the parent and the projected features.\n \"\"\"\n\n out = super().forward(X, *args, **kwargs)\n z = F.normalize(self.projector(out[\"feats\"]), dim=-1)\n return {**out, \"z\": z}\n\n def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:\n \"\"\"\n Training step for MoCo reusing BaseMomentumMethod training step.\n\n Args:\n batch (Sequence[Any]): a batch of data in the\n format of [img_indexes, [X], Y], where [X] is a list of size self.num_large_crops\n containing batches of images.\n batch_idx (int): index of the batch.\n\n Returns:\n torch.Tensor: total loss composed of MOCO loss and classification loss.\n\n \"\"\"\n\n\n self.momentum_backbone.eval()\n\n # import ipdb; ipdb.set_trace()\n image_tau1, image_weak = batch[1]\n targets = batch[2]\n\n ############################################################################\n # Adversarial Training (DUAL BN)\n ############################################################################\n\n away_target = self.momentum_projector(self.momentum_backbone(image_weak))\n AE_generation_image = image_weak\n image_AE = self.generate_training_AE(AE_generation_image, away_target)\n\n student_logtis_clean = self.backbone(image_weak, \"normal\")\n student_logits_AE = self.backbone(image_AE, \"pgd\")\n\n adv_loss = -F.cosine_similarity(student_logits_AE, away_target).mean()\n adv_loss += -3*F.cosine_similarity(student_logits_AE, student_logtis_clean).mean()\n\n ############################################################################\n # Adversarial Training (DUAL BN)\n ############################################################################\n\n\n\n\n\n\n\n\n ############################################################################\n # Adversarial Training (CAT)\n ############################################################################\n\n # away_target = self.momentum_projector(self.momentum_backbone(image_weak))\n # AE_generation_image = image_weak\n # image_AE = self.generate_training_AE(AE_generation_image, away_target)\n\n # image_CAT = torch.cat([image_weak, image_AE])\n # logits_all = self.backbone(image_CAT)\n # bs = image_weak.size(0)\n # student_logtis_clean = logits_all[:bs]\n # student_logits_AE = logits_all[bs:]\n\n # adv_loss = -F.cosine_similarity(student_logits_AE, away_target).mean()\n # adv_loss += -3*F.cosine_similarity(student_logits_AE, student_logtis_clean).mean()\n\n ############################################################################\n # Adversarial Training (CAT)\n ############################################################################\n\n\n\n\n\n\n ############################################################################\n # Adversarial Training (CODE Experiment)\n ############################################################################\n\n # # NOTE: Check Wether to Make Student Start Same Weight with Teacher\n # # CODE Definition. 0: Weak augmentation 1: Tau1 augmentation\n # # CS_Teacher$1_Student-AE_CS_Teacher$2_Student$3, so case 1 is 110\n # # 1. CS_TeacherTau1_Student-AE_CS_TeacherTau1_StudentWeak\n # # 110\n # # with torch.no_grad():\n # # logits_TeacherTau1 = self.projector(self.momentum_backbone(image_tau1))\n\n # # image_AE = self.generate_training_AE(image_weak, logits_TeacherTau1)\n # # student_logit = self.projector(self.backbone(image_AE))\n # # adv_loss = -F.cosine_similarity(student_logit, logits_TeacherTau1).mean()\n\n # # 2. CS_TeacherTau1_Student-AE_CS_TeacherTau1_StudentTau1\n # # 111\n # # with torch.no_grad():\n # # logits_TeacherTau1 = self.projector(self.momentum_backbone(image_tau1))\n\n # # image_AE = self.generate_training_AE(image_tau1, logits_TeacherTau1)\n # # student_logit = self.projector(self.backbone(image_AE))\n # # adv_loss = -F.cosine_similarity(student_logit, logits_TeacherTau1).mean()\n\n # # CODE Based experiment\n # # 110\n # # 111\n # # experiment_code = [1, 0, 1]\n # # experiment_code = [1, 0, 0]\n\n # experiment_code = [0, 0, 0]\n # # experiment_code = [0, 1, 1]\n # # experiment_code = [0, 0, 1]\n # # experiment_code = [0, 1, 0]\n\n # with torch.no_grad():\n # # away target\n # if experiment_code[1] == 0:\n # # away_target = self.projector(self.momentum_backbone(image_weak))\n # away_target = self.momentum_projector(self.momentum_backbone(image_weak))\n\n # elif experiment_code[1] == 1:\n # # away_target = self.projector(self.momentum_backbone(image_tau1))\n # away_target = self.momentum_projector(self.momentum_backbone(image_tau1))\n\n\n # # Loss learning target\n # if experiment_code[0] == experiment_code[1]:\n # learning_target = away_target\n # elif experiment_code[0] == 0:\n # # learning_target = self.projector(self.momentum_backbone(image_weak))\n # learning_target = self.momentum_projector(self.momentum_backbone(image_weak))\n\n # elif experiment_code[0] == 1:\n # # learning_target = self.projector(self.momentum_backbone(image_tau1))\n # learning_target = self.momentum_projector(self.momentum_backbone(image_tau1))\n\n\n # if experiment_code[2] == 0:\n # AE_generation_image = image_weak\n # elif experiment_code[2] == 1:\n # AE_generation_image = image_tau1\n\n # image_AE = self.generate_training_AE(AE_generation_image, away_target)\n # student_logit = self.projector(self.backbone(image_AE))\n # adv_loss = -F.cosine_similarity(student_logit, learning_target).mean()\n\n ############################################################################\n # Adversarial Training (CODE Experiment)\n ############################################################################\n\n\n\n\n\n ############################################################################\n # Online clean classifier training\n ############################################################################\n\n # Bug Fix: train classifier using evaluation mode\n self.backbone.eval()\n outs_image_weak = self._base_shared_step(image_weak, targets, bn_name=\"pgd\")\n self.backbone.train()\n\n metrics = {\n \"train_class_loss\": outs_image_weak[\"loss\"],\n \"train_acc1\": outs_image_weak[\"acc1\"],\n \"train_acc5\": outs_image_weak[\"acc5\"],\n }\n class_loss_clean = outs_image_weak[\"loss\"]\n self.log_dict(metrics, on_epoch=True)\n ############################################################################\n # Online clean classifier training\n ############################################################################\n\n\n\n\n ############################################################################\n # Online adv classifier training\n ############################################################################\n\n # Bug Fix: train classifier using evaluation mode\n # logits = self.classifier_adv(student_logits_AE)\n\n self.backbone.eval()\n AE_feats = self.backbone(image_AE, \"pgd\")\n logits = self.classifier_adv(AE_feats.detach())\n self.backbone.train()\n class_loss_adv = F.cross_entropy(logits, targets, ignore_index=-1)\n\n # handle when the number of classes is smaller than 5\n top_k_max = min(5, logits.size(1))\n acc1, acc5 = accuracy_at_k(logits, targets, top_k=(1, top_k_max))\n\n metrics = {\n \"train_class_loss_adv_classifier\": class_loss_adv,\n \"train_acc1_adv_classifier\": acc1,\n \"train_acc5_adv_classifier\": acc5,\n }\n class_loss_clean = outs_image_weak[\"loss\"]\n self.log_dict(metrics, on_epoch=True)\n ############################################################################\n # Online adv classifier training\n ############################################################################\n\n\n\n self.log(\"adv_loss\", adv_loss, on_epoch=True, sync_dist=True)\n\n return adv_loss + class_loss_adv + class_loss_clean\n\n\n\n def generate_training_AE(self, image: torch.Tensor, away_target: torch.Tensor):\n \"\"\"\n images_org: weak aug\n away_target: from teacher\n \"\"\"\n\n self.epsilon = 8/255.\n self.num_steps = 5\n self.step_size = 2/255.\n\n x_cl = image.clone().detach()\n\n # if self.rand:\n x_cl = x_cl + torch.zeros_like(image).uniform_(-self.epsilon, self.epsilon)\n\n # f_ori_proj = self.model(images_org).detach()\n # Change the attack process of model to eval\n self.backbone.eval()\n\n for i in range(self.num_steps):\n x_cl.requires_grad_()\n with torch.enable_grad():\n f_proj = self.projector(self.backbone(x_cl, \"pgd\"))\n\n # loss_contrast = -F.cosine_similarity(f_proj, f_ori_proj, dim=1).mean()\n loss_contrast = -F.cosine_similarity(f_proj, away_target, dim=1).sum() *256\n loss = loss_contrast\n\n # import ipdb ;ipdb.set_trace()\n grad_x_cl = torch.autograd.grad(loss, x_cl)[0]\n # grad_x_cl = torch.autograd.grad(loss, x_cl, grad_outputs=torch.ones_like(loss))[0]\n x_cl = x_cl.detach() + self.step_size * torch.sign(grad_x_cl.detach())\n x_cl = torch.min(torch.max(x_cl, image - self.epsilon), image + self.epsilon)\n x_cl = torch.clamp(x_cl, 0, 1)\n\n self.backbone.train()\n\n return x_cl\n\n" ]
[ [ "torch.max", "torch.autograd.grad", "torch.nn.Linear", "torch.randn", "torch.nn.functional.normalize", "torch.zeros_like", "torch.no_grad", "torch.enable_grad", "torch.nn.functional.cross_entropy", "torch.nn.Identity", "torch.zeros", "torch.nn.ReLU", "torch.clamp", "torch.nn.functional.cosine_similarity" ] ]
abdouaziz/wolof-translation
[ "505324f8a7c5a91a42e2c775495fc3bdebc8f761" ]
[ "src/t5.py" ]
[ "import torch\nimport torch.nn as nn \nimport argparse\nimport numpy as np\nfrom torch.utils.data import DataLoader , Dataset\nimport pandas as pd \nfrom tqdm import tqdm \nfrom transformers import ( \n BertTokenizer,\n AdamW , \n get_linear_schedule_with_warmup ,\n T5Tokenizer,\n T5ForConditionalGeneration)\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser(description=\"Pretrained Machine Translation French to Wolof\")\n \n parser.add_argument(\n \"--train_file\", type=str, default=None, help=\"A csv file containing the training data.\"\n )\n \n\n parser.add_argument(\n \"--max_source_length\",\n type=int,\n default=150,\n help=\"The maximum total input sequence length after \"\n \"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\n \"--max_target_length\",\n type=int,\n default=150,\n help=\"The maximum total sequence length for target text after \"\n \"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.\"\n \n )\n\n parser.add_argument(\n \"--number_epochs\",\n type=int,\n default=3,\n help=\"Total number of training steps to perform the model .\",\n ) \n\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=3e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n\n parser.add_argument(\n \"--epsilone\",\n type=float,\n default=1e-8,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n ) \n\n parser.add_argument(\n \"--train_batch_size\",\n type=int,\n default=1,\n help=\"Batch size (per device) for the training dataloader.\",\n )\n\n parser.add_argument(\n \"--model_name\",\n type=str,\n default=\"t5-base\",\n help=\"Pretrained model name.\",\n )\n\n parser.add_argument(\n \"--task_prefix\",\n type=str,\n default=\"translate French to Wolof: \",\n help=\"The task prefix for the translation.\",\n )\n \n \n args = parser.parse_args()\n\n \n if args.train_file is not None:\n extension = args.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n \n \n return args\n \n \n\n\nclass NMTDataset(Dataset):\n \"\"\"\n Dataset Class \n \n \"\"\"\n def __init__(self, frenchs, wolofs , tokenizer , max_len_source , max_len_target):\n \"\"\"\n Parameters :\n ------------\n frenchs : \n wolofs : \n tokenizer:\n max_len_sourrce:\n max_len_target: \n \"\"\"\n self.frenchs = frenchs\n self.wolofs = wolofs\n self.tokenizer = tokenizer\n self.max_len_source = max_len_source\n self.max_len_target = max_len_target\n \n \n def __len__(self):\n return len(self.frenchs)\n \n def __getitem__(self, item):\n \n french = str(self.frenchs[item])\n wolof = str(self.wolofs[item])\n\n french_encoding = self.tokenizer(\n french,\n add_special_tokens=True,\n max_length=self.max_len_source,\n pad_to_max_length=True,\n return_attention_mask=True,\n return_tensors='pt') \n\n labels = self.tokenizer(\n wolof,\n add_special_tokens=True,\n max_length=self.max_len_target,\n return_token_type_ids=True,\n pad_to_max_length=True,\n return_attention_mask=True\n ).input_ids\n \n \n \n labels = [\n [(label if label != self.tokenizer.pad_token_id else -100) for label in labels]]\n\n labels = torch.tensor(labels)\n\n return {\n 'input_ids': french_encoding['input_ids'].flatten(),\n 'attention_mask':french_encoding['attention_mask'].flatten(),\n 'labels': labels.flatten()\n }\n\n\n\n\n\ndef NMTDataloader(df , batch_size , tokenizer , max_len_source , max_len_target):\n\n dataset = NMTDataset(df.french.values , df.wolof.values , tokenizer , max_len_source , max_len_target)\n dataloader = DataLoader(dataset , batch_size , num_workers= 4)\n return dataloader\n\n \n\ndef yield_optimizer(model):\n \"\"\"\n Returns optimizer for specific parameters\n \"\"\"\n param_optimizer = list(model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n\n optimizer_parameters = [\n {\n \"params\": [\n p for n, p in param_optimizer if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.001,\n },\n {\n \"params\": [\n p for n, p in param_optimizer if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n return AdamW(optimizer_parameters, lr=3e-5, eps=1e-8)\n\n\ndef train_epoch (model , data_loader, optimizer , device , scheduler):\n model.train()\n losses = []\n \n for step , d in tqdm(enumerate(data_loader) , total=len(data_loader)):\n \n input_ids =d['input_ids'].to(device)\n attention_mask = d['attention_mask'].to(device)\n labels = d['labels'].to(device)\n \n \n outputs = model(input_ids=input_ids , attention_mask=attention_mask , labels=labels)\n loss = outputs.loss\n\n losses.append(loss.item())\n\n\n loss.backward()\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n \n if (step + 1) % 10 == 0:\n \n \n print('Epoch: {} | loss: {} '.format(step+1, np.mean(losses)))\n\n\n \n\ndef train():\n\n args = parse_args()\n\n tokenizer = T5Tokenizer.from_pretrained(args.model_name)\n\n model= T5ForConditionalGeneration.from_pretrained(args.model_name)\n model.to(device)\n\n df = pd.read_csv(args.train_file)\n df[\"french\"]=df[\"french\"].apply(lambda x:args.task_prefix +x)\n\n\n\n train_data_loader= NMTDataloader(df,args.train_batch_size , tokenizer , args.max_source_length , args.max_target_length) \n \n nb_train_steps = int(len(train_data_loader) /args.train_batch_size * args.number_epochs)\n optimizer = yield_optimizer(model)\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=nb_train_steps)\n \n for epoch in range(args.number_epochs):\n \n print(f'Epoch {epoch + 1}')\n\n train_epoch(model,train_data_loader,optimizer,device,scheduler) \n \n \n return model.save_pretrained(\"../model/bert2bert\")\n\n\nif __name__ == '__main__':\n train()" ]
[ [ "torch.utils.data.DataLoader", "pandas.read_csv", "torch.tensor", "torch.cuda.is_available", "numpy.mean" ] ]
TWJianNuo/detectron2
[ "091bc43e85b8f7cefdccebf8d85afb7cfff2a3f0" ]
[ "kitti2cityscapesScripts/preparation/createPanopticImgs.py" ]
[ "#!/usr/bin/python\n#\n# Converts the *instanceIds.png annotations of the Cityscapes dataset\n# to COCO-style panoptic segmentation format (http://cocodataset.org/#format-data).\n# The convertion is working for 'fine' set of the annotations.\n#\n# By default with this tool uses IDs specified in labels.py. You can use flag\n# --use-train-id to get train ids for categories. 'ignoreInEval' categories are\n# removed during the conversion.\n#\n# In panoptic segmentation format image_id is used to match predictions and ground truth.\n# For cityscapes image_id has form <city>_123456_123456 and corresponds to the prefix\n# of cityscapes image files.\n#\n\n# python imports\nfrom __future__ import print_function, absolute_import, division, unicode_literals\nimport os\nimport glob\nimport sys\nimport argparse\nimport json\nimport numpy as np\n\n# Image processing\nfrom PIL import Image\n\n# cityscapes imports\nfrom kitti2cityscapesScripts.helpers.csHelpers import printError\nfrom kitti2cityscapesScripts.helpers.labels import id2label, labels\n\n\n# The main method\ndef convert2panoptic(cityscapesPath=None, outputFolder=None, useTrainId=False, setNames=[\"val\", \"train\", \"test\"]):\n # Where to look for Cityscapes\n if cityscapesPath is None:\n if 'CITYSCAPES_DATASET' in os.environ:\n cityscapesPath = os.environ['CITYSCAPES_DATASET']\n else:\n cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..')\n cityscapesPath = os.path.join(cityscapesPath, \"gtFine\")\n\n if outputFolder is None:\n outputFolder = cityscapesPath\n\n categories = []\n for label in labels:\n if label.ignoreInEval:\n continue\n categories.append({'id': int(label.trainId) if useTrainId else int(label.id),\n 'name': label.name,\n 'color': label.color,\n 'supercategory': label.category,\n 'isthing': 1 if label.hasInstances else 0})\n\n for setName in setNames:\n # how to search for all ground truth\n searchFine = os.path.join(cityscapesPath, setName, \"*\", \"*_instanceIds.png\")\n # search files\n filesFine = glob.glob(searchFine)\n filesFine.sort()\n\n files = filesFine\n # quit if we did not find anything\n if not files:\n printError(\n \"Did not find any files for {} set using matching pattern {}. Please consult the README.\".format(setName, searchFine)\n )\n # a bit verbose\n print(\"Converting {} annotation files for {} set.\".format(len(files), setName))\n\n trainIfSuffix = \"_trainId\" if useTrainId else \"\"\n outputBaseFile = \"cityscapes_panoptic_{}{}\".format(setName, trainIfSuffix)\n outFile = os.path.join(outputFolder, \"{}.json\".format(outputBaseFile))\n print(\"Json file with the annotations in panoptic format will be saved in {}\".format(outFile))\n panopticFolder = os.path.join(outputFolder, outputBaseFile)\n if not os.path.isdir(panopticFolder):\n print(\"Creating folder {} for panoptic segmentation PNGs\".format(panopticFolder))\n os.mkdir(panopticFolder)\n print(\"Corresponding segmentations in .png format will be saved in {}\".format(panopticFolder))\n\n images = []\n annotations = []\n for progress, f in enumerate(files):\n\n originalFormat = np.array(Image.open(f))\n\n fileName = os.path.basename(f)\n imageId = fileName.replace(\"_gtFine_instanceIds.png\", \"\")\n inputFileName = fileName.replace(\"_instanceIds.png\", \"_leftImg8bit.png\")\n outputFileName = fileName.replace(\"_instanceIds.png\", \"_panoptic.png\")\n # image entry, id for image is its filename without extension\n images.append({\"id\": imageId,\n \"width\": int(originalFormat.shape[1]),\n \"height\": int(originalFormat.shape[0]),\n \"file_name\": inputFileName})\n\n pan_format = np.zeros(\n (originalFormat.shape[0], originalFormat.shape[1], 3), dtype=np.uint8\n )\n\n segmentIds = np.unique(originalFormat)\n segmInfo = []\n for segmentId in segmentIds:\n if segmentId < 1000:\n semanticId = segmentId\n isCrowd = 1\n else:\n semanticId = segmentId // 1000\n isCrowd = 0\n labelInfo = id2label[semanticId]\n categoryId = labelInfo.trainId if useTrainId else labelInfo.id\n if labelInfo.ignoreInEval:\n continue\n if not labelInfo.hasInstances:\n isCrowd = 0\n\n mask = originalFormat == segmentId\n color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]\n pan_format[mask] = color\n\n area = np.sum(mask) # segment area computation\n\n # bbox computation for a segment\n hor = np.sum(mask, axis=0)\n hor_idx = np.nonzero(hor)[0]\n x = hor_idx[0]\n width = hor_idx[-1] - x + 1\n vert = np.sum(mask, axis=1)\n vert_idx = np.nonzero(vert)[0]\n y = vert_idx[0]\n height = vert_idx[-1] - y + 1\n bbox = [int(x), int(y), int(width), int(height)]\n\n segmInfo.append({\"id\": int(segmentId),\n \"category_id\": int(categoryId),\n \"area\": int(area),\n \"bbox\": bbox,\n \"iscrowd\": isCrowd})\n\n annotations.append({'image_id': imageId,\n 'file_name': outputFileName,\n \"segments_info\": segmInfo})\n\n Image.fromarray(pan_format).save(os.path.join(panopticFolder, outputFileName))\n\n print(\"\\rProgress: {:>3.2f} %\".format((progress + 1) * 100 / len(files)), end=' ')\n sys.stdout.flush()\n\n print(\"\\nSaving the json file {}\".format(outFile))\n d = {'images': images,\n 'annotations': annotations,\n 'categories': categories}\n with open(outFile, 'w') as f:\n json.dump(d, f, sort_keys=True, indent=4)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset-folder\",\n dest=\"cityscapesPath\",\n help=\"path to the Cityscapes dataset 'gtFine' folder\",\n default=None,\n type=str)\n parser.add_argument(\"--output-folder\",\n dest=\"outputFolder\",\n help=\"path to the output folder.\",\n default=None,\n type=str)\n parser.add_argument(\"--use-train-id\", action=\"store_true\", dest=\"useTrainId\")\n parser.add_argument(\"--set-names\",\n dest=\"setNames\",\n help=\"set names to which apply the function to\",\n nargs='+',\n default=[\"val\", \"train\", \"test\"],\n type=str)\n args = parser.parse_args()\n\n convert2panoptic(args.cityscapesPath, args.outputFolder, args.useTrainId, args.setNames)\n\n\n# call the main\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.sum", "numpy.nonzero", "numpy.unique", "numpy.zeros" ] ]
dankernel/mnist-qnn
[ "a8b9bfc0689625ee593d990be70e8b375f233d26" ]
[ "qnn_utils.py" ]
[ "\nimport os\nimport numpy as np\nfrom termcolor import colored\n\ndef ndarray_to_bin(ndarray, out_path: str):\n \"\"\"\n ndarray to bin file\n (4byte) dim\n (4byte) shape x dim\n\n :param ndarray: target numpy ndarrat\n :param str out_path: output path\n :return: None\n \"\"\"\n\n with open(out_path, 'wb') as file:\n dim = len(ndarray.shape)\n print('dim :', dim)\n file.write(dim.to_bytes(4, byteorder='little', signed=True))\n for s in range(dim):\n size = ndarray.shape[s]\n print('size :', size)\n file.write(size.to_bytes(4, byteorder='little', signed=True))\n file.write(ndarray.tobytes())\n \n\ndef print_debug_hex(input_array):\n \"\"\"\n Print HEX\n\n :param np.ndarray array: input array\n :return: None\n \"\"\"\n array = input_array.view(dtype=np.uint8)\n\n terminal_rows, terminal_columns = map(int, os.popen('stty size', 'r').read().split())\n\n print_hex_rows = min(array.shape[0] - 1, terminal_rows - 5)\n print_hex_columns = min(array.shape[1] - 1, (terminal_columns - 16) // 3)\n\n if input_array.dtype == np.int8 or input_array.dtype == np.uint8:\n print_hex_columns -= (print_hex_columns + 1) % 2\n elif input_array.dtype == np.int16 or input_array.dtype == np.uint16:\n print_hex_columns -= (print_hex_columns + 1) % 4\n\n is_ellipsis_rows = array.shape[0] - 1 > terminal_rows - 5\n is_ellipsis_columns = array.shape[1] - 1 > (terminal_columns - 16) // 3\n\n if __debug__:\n print('print_hex_rows :', print_hex_rows)\n print('print_hex_columns :', print_hex_columns)\n print('is_ellipsis_rows :', is_ellipsis_rows)\n print('is_ellipsis_columns :', is_ellipsis_columns)\n\n msgs = []\n # ..........0.........1....\n # ..........01234567890123\n msgs.append(' dd dddd ') # 0\n msgs.append(' ┌────┐') # 1\n msgs.append(' dd │ xx │') # 2\n msgs.append(' dddd └────┘') # 3\n\n # columns(X-axis) extend\n for i in range(len(msgs)):\n for j in range(print_hex_columns):\n if i == 0:\n msgs[i] = msgs[i][:7] + ' dd' + msgs[i][7:]\n elif i == 1 or i == 3:\n msgs[i] = msgs[i][:7] + '───' + msgs[i][7:]\n else:\n msgs[i] = msgs[i][:7] + ' xx' + msgs[i][7:]\n\n # rows(Y-axis) extend\n for i in range(print_hex_rows):\n msgs.insert(2, msgs[2])\n\n for i in range(len(msgs)):\n # dddd -> {:4} \n msgs[i] = msgs[i].replace('dddd', colored('{:4}', 'green'))\n\n # xx -> {:02X}\n # xx xx -> {:02X} {:02X}\n if input_array.dtype == np.int8 or input_array.dtype == np.uint8:\n temp = colored('{:02X} ', 'green') + colored('{:02X}', 'red')\n msgs[i] = msgs[i].replace('xx xx', temp)\n elif input_array.dtype == np.int16 or input_array.dtype == np.uint16:\n temp = colored('{:02X} {:02X} ', 'green') + colored('{:02X} {:02X}', 'red')\n msgs[i] = msgs[i].replace('xx xx xx xx', temp)\n\n # dd -> {:02}\n msgs[i] = msgs[i].replace('dd', '{:02}')\n\n # print all\n ellipsis_line = -3\n for i in range(len(msgs)):\n if i == 0:\n # columns index\n temp = list(range(print_hex_columns + 1))\n tepm = temp.append(array.shape[1])\n print(msgs[i].format(*temp))\n elif i == len(msgs) - 1:\n # rows index\n print(msgs[i].format(array.shape[0]))\n else:\n # data\n if is_ellipsis_columns:\n if len(msgs) + ellipsis_line - 1 == i:\n # ellipsis line ('..')\n msgs[i] = msgs[i].replace('{:02X}', '..')\n tepm = temp.insert(0, i - 2) # index\n print(msgs[i].format(*temp))\n elif len(msgs) + ellipsis_line - 2 < i:\n # afterword (-n)\n temp = list(array[i - print_hex_rows - 3]) # Hex datas\n tepm = temp.insert(0, i - print_hex_rows - 3) # index\n print(msgs[i].format(*temp))\n else:\n # general data (+n)\n temp = list(array[i-2]) # Hex datas\n tepm = temp.insert(0, i - 2) # index\n print(msgs[i].format(*temp))\n else:\n temp = list(array[i-2])\n tepm = temp.insert(0, i - 2)\n print(msgs[i].format(*temp))\n\n return\n\ndef test():\n\n # test mode\n TEST_MODE = 'NPY' # INT8 / INT16 / NPY\n \n # Init or Load\n if TEST_MODE == 'INT8':\n array = np.random.randint(0xFF, size=(500, 3000), dtype=np.uint8)\n elif TEST_MODE == 'INT16':\n array = np.random.randint(0xFFFF, size=(500, 3000), dtype=np.uint16)\n elif TEST_MODE == 'NPY':\n array = np.load('bin/FC1.npy')\n\n # Test\n print(array)\n print_debug_hex(array)\n pass\n\nif __name__ == '__main__':\n test()\n\n" ]
[ [ "numpy.load", "numpy.random.randint" ] ]
sunilmallya/gym-duckietown
[ "d915bbe0317ee355f82a7b22d3314fbab8563187", "d915bbe0317ee355f82a7b22d3314fbab8563187" ]
[ "pytorch_rl/main.py", "standalone.py" ]
[ "import copy\nimport glob\nimport os\nimport time\nimport operator\nfrom functools import reduce\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom arguments import get_args\nfrom vec_env.dummy_vec_env import DummyVecEnv\nfrom vec_env.subproc_vec_env import SubprocVecEnv\nfrom envs import make_env\nfrom kfac import KFACOptimizer\nfrom model import CNNPolicy, MLPPolicy\nfrom storage import RolloutStorage\nfrom visualize import visdom_plot\n\nargs = get_args()\n\nassert args.algo in ['a2c', 'ppo', 'acktr']\nif args.recurrent_policy:\n assert args.algo in ['a2c', 'ppo'], \\\n 'Recurrent policy is not implemented for ACKTR'\n\nnum_updates = int(args.num_frames) // args.num_steps // args.num_processes\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\ntry:\n os.makedirs(args.log_dir)\nexcept OSError:\n files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))\n for f in files:\n os.remove(f)\n\ndef main():\n os.environ['OMP_NUM_THREADS'] = '1'\n\n if args.vis:\n from visdom import Visdom\n viz = Visdom()\n win = None\n\n envs = [make_env(args.env_name, args.seed, i, args.log_dir, args.start_container)\n for i in range(args.num_processes)]\n\n if args.num_processes > 1:\n envs = SubprocVecEnv(envs)\n else:\n envs = DummyVecEnv(envs)\n\n obs_shape = envs.observation_space.shape\n obs_shape = (obs_shape[0] * args.num_stack, *obs_shape[1:])\n obs_numel = reduce(operator.mul, obs_shape, 1)\n\n if len(obs_shape) == 3 and obs_numel > 1024:\n actor_critic = CNNPolicy(obs_shape[0], envs.action_space, args.recurrent_policy)\n else:\n assert not args.recurrent_policy, \\\n \"Recurrent policy is not implemented for the MLP controller\"\n actor_critic = MLPPolicy(obs_numel, envs.action_space)\n\n modelSize = 0\n for p in actor_critic.parameters():\n pSize = reduce(operator.mul, p.size(), 1)\n modelSize += pSize\n print(str(actor_critic))\n print('Total model size: %d' % modelSize)\n\n if envs.action_space.__class__.__name__ == \"Discrete\":\n action_shape = 1\n else:\n action_shape = envs.action_space.shape[0]\n\n if args.cuda:\n actor_critic.cuda()\n\n if args.algo == 'a2c':\n optimizer = optim.RMSprop(actor_critic.parameters(), args.lr, eps=args.eps, alpha=args.alpha)\n elif args.algo == 'ppo':\n optimizer = optim.Adam(actor_critic.parameters(), args.lr, eps=args.eps)\n elif args.algo == 'acktr':\n optimizer = KFACOptimizer(actor_critic)\n\n rollouts = RolloutStorage(args.num_steps, args.num_processes, obs_shape, envs.action_space, actor_critic.state_size)\n current_obs = torch.zeros(args.num_processes, *obs_shape)\n\n def update_current_obs(obs):\n shape_dim0 = envs.observation_space.shape[0]\n obs = torch.from_numpy(obs).float()\n if args.num_stack > 1:\n current_obs[:, :-shape_dim0] = current_obs[:, shape_dim0:]\n current_obs[:, -shape_dim0:] = obs\n\n obs = envs.reset()\n update_current_obs(obs)\n\n rollouts.observations[0].copy_(current_obs)\n\n # These variables are used to compute average rewards for all processes.\n episode_rewards = torch.zeros([args.num_processes, 1])\n final_rewards = torch.zeros([args.num_processes, 1])\n reward_avg = 0\n\n if args.cuda:\n current_obs = current_obs.cuda()\n rollouts.cuda()\n\n start = time.time()\n for j in range(num_updates):\n for step in range(args.num_steps):\n # Sample actions\n value, action, action_log_prob, states = actor_critic.act(\n Variable(rollouts.observations[step]),\n Variable(rollouts.states[step]),\n Variable(rollouts.masks[step])\n )\n cpu_actions = action.data.squeeze(1).cpu().numpy()\n\n # Observation, reward and next obs\n obs, reward, done, info = envs.step(cpu_actions)\n\n # Maxime: clip the reward within [0,1] for more reliable training\n # This code deals poorly with large reward values\n reward = np.clip(reward, a_min=0, a_max=None) / 400\n\n reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float()\n episode_rewards += reward\n\n # If done then clean the history of observations.\n masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])\n final_rewards *= masks\n final_rewards += (1 - masks) * episode_rewards\n episode_rewards *= masks\n\n if args.cuda:\n masks = masks.cuda()\n\n if current_obs.dim() == 4:\n current_obs *= masks.unsqueeze(2).unsqueeze(2)\n else:\n current_obs *= masks\n\n update_current_obs(obs)\n rollouts.insert(step, current_obs, states.data, action.data, action_log_prob.data, value.data, reward, masks)\n\n next_value = actor_critic(\n Variable(rollouts.observations[-1]),\n Variable(rollouts.states[-1]),\n Variable(rollouts.masks[-1])\n )[0].data\n\n rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)\n\n if args.algo in ['a2c', 'acktr']:\n values, action_log_probs, dist_entropy, states = actor_critic.evaluate_actions(Variable(rollouts.observations[:-1].view(-1, *obs_shape)),\n Variable(rollouts.states[0].view(-1, actor_critic.state_size)),\n Variable(rollouts.masks[:-1].view(-1, 1)),\n Variable(rollouts.actions.view(-1, action_shape)))\n\n values = values.view(args.num_steps, args.num_processes, 1)\n action_log_probs = action_log_probs.view(args.num_steps, args.num_processes, 1)\n\n advantages = Variable(rollouts.returns[:-1]) - values\n value_loss = advantages.pow(2).mean()\n\n action_loss = -(Variable(advantages.data) * action_log_probs).mean()\n\n if args.algo == 'acktr' and optimizer.steps % optimizer.Ts == 0:\n # Sampled fisher, see Martens 2014\n actor_critic.zero_grad()\n pg_fisher_loss = -action_log_probs.mean()\n\n value_noise = Variable(torch.randn(values.size()))\n if args.cuda:\n value_noise = value_noise.cuda()\n\n sample_values = values + value_noise\n vf_fisher_loss = -(values - Variable(sample_values.data)).pow(2).mean()\n\n fisher_loss = pg_fisher_loss + vf_fisher_loss\n optimizer.acc_stats = True\n fisher_loss.backward(retain_graph=True)\n optimizer.acc_stats = False\n\n optimizer.zero_grad()\n (value_loss * args.value_loss_coef + action_loss - dist_entropy * args.entropy_coef).backward()\n\n if args.algo == 'a2c':\n nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm)\n\n optimizer.step()\n\n elif args.algo == 'ppo':\n advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)\n\n for e in range(args.ppo_epoch):\n if args.recurrent_policy:\n data_generator = rollouts.recurrent_generator(advantages,\n args.num_mini_batch)\n else:\n data_generator = rollouts.feed_forward_generator(advantages,\n args.num_mini_batch)\n\n for sample in data_generator:\n observations_batch, states_batch, actions_batch, \\\n return_batch, masks_batch, old_action_log_probs_batch, \\\n adv_targ = sample\n\n # Reshape to do in a single forward pass for all steps\n values, action_log_probs, dist_entropy, states = actor_critic.evaluate_actions(Variable(observations_batch),\n Variable(states_batch),\n Variable(masks_batch),\n Variable(actions_batch))\n\n adv_targ = Variable(adv_targ)\n ratio = torch.exp(action_log_probs - Variable(old_action_log_probs_batch))\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - args.clip_param, 1.0 + args.clip_param) * adv_targ\n action_loss = -torch.min(surr1, surr2).mean() # PPO's pessimistic surrogate (L^CLIP)\n\n value_loss = (Variable(return_batch) - values).pow(2).mean()\n\n optimizer.zero_grad()\n (value_loss + action_loss - dist_entropy * args.entropy_coef).backward()\n nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm)\n optimizer.step()\n\n rollouts.after_update()\n\n if j % args.save_interval == 0 and args.save_dir != \"\":\n save_path = os.path.join(args.save_dir, args.algo)\n try:\n os.makedirs(save_path)\n except OSError:\n pass\n\n # A really ugly way to save a model to CPU\n save_model = actor_critic\n if args.cuda:\n save_model = copy.deepcopy(actor_critic).cpu()\n\n save_model = [save_model,\n hasattr(envs, 'ob_rms') and envs.ob_rms or None]\n\n torch.save(save_model, os.path.join(save_path, args.env_name + \".pt\"))\n\n if j % args.log_interval == 0:\n reward_avg = 0.99 * reward_avg + 0.01 * final_rewards.mean()\n end = time.time()\n total_num_steps = (j + 1) * args.num_processes * args.num_steps\n\n print(\n \"Updates {}, num timesteps {}, FPS {}, running avg reward {:.3f}, entropy {:.5f}, value loss {:.5f}, policy loss {:.5f}\".\n format(\n j,\n total_num_steps,\n int(total_num_steps / (end - start)),\n reward_avg,\n dist_entropy.data[0],\n value_loss.data[0],\n action_loss.data[0]\n )\n )\n\n \"\"\"\n print(\"Updates {}, num timesteps {}, FPS {}, mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}, entropy {:.5f}, value loss {:.5f}, policy loss {:.5f}\".\n format(\n j,\n total_num_steps,\n int(total_num_steps / (end - start)),\n final_rewards.mean(),\n final_rewards.median(),\n final_rewards.min(),\n final_rewards.max(), dist_entropy.data[0],\n value_loss.data[0], action_loss.data[0])\n )\n \"\"\"\n\n if args.vis and j % args.vis_interval == 0:\n try:\n # Sometimes monitor doesn't properly flush the outputs\n win = visdom_plot(viz, win, args.log_dir, args.env_name, args.algo)\n except IOError:\n pass\n\nif __name__ == \"__main__\":\n main()\n", "#!/usr/bin/env python3\n\nimport sys\nimport argparse\n\nimport pyglet\nimport numpy as np\n\nimport gym\nimport gym_duckietown\nfrom gym_duckietown.envs import SimpleSimEnv\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--env-name', default='SimpleSim-v0')\nparser.add_argument('--map-name', default='udem1')\nparser.add_argument('--draw-curve', action='store_true', help='draw the lane following curve')\nparser.add_argument('--no-random', action='store_true', help='disable domain randomization')\n\nargs = parser.parse_args()\n\nif args.env_name == 'SimpleSim-v0':\n env = SimpleSimEnv(\n map_name = args.map_name,\n draw_curve = args.draw_curve,\n domain_rand = not args.no_random\n )\nelse:\n env = gym.make(args.env_name)\n\nenv.reset()\nenv.render()\n\ndef save_numpy_img(file_name, img):\n img = np.ascontiguousarray(img)\n img = (img * 255).astype(np.uint8)\n img = np.flip(img, 0)\n\n from skimage import io\n io.imsave(file_name, img)\n\nlastImgNo = 0\ndef save_img(img):\n global lastImgNo\n save_numpy_img('img_%03d.png' % lastImgNo, img)\n lastImgNo += 1\n\[email protected]\ndef on_key_press(symbol, modifiers):\n from pyglet.window import key\n\n action = None\n if symbol == key.LEFT:\n print('left')\n action = np.array([0.00, 0.40])\n elif symbol == key.RIGHT:\n print('right')\n action = np.array([0.40, 0.00])\n elif symbol == key.UP:\n print('forward')\n action = np.array([0.40, 0.40])\n elif symbol == key.BACKSPACE or symbol == key.SLASH:\n print('RESET')\n action = None\n env.reset()\n env.render()\n elif symbol == key.SPACE:\n action = np.array([0, 0])\n elif symbol == key.ESCAPE:\n env.close()\n sys.exit(0)\n else:\n return\n\n if action is not None:\n print('stepping')\n obs, reward, done, info = env.step(action)\n print('step_count = %s, reward=%.3f' % (env.step_count, reward))\n\n env.render()\n\n #save_img(obs)\n\n if done:\n print('done!')\n env.reset()\n env.render()\n\n# Enter main event loop\npyglet.app.run()\n\nenv.close()\n" ]
[ [ "torch.FloatTensor", "torch.min", "torch.cuda.manual_seed", "numpy.stack", "torch.manual_seed", "torch.autograd.Variable", "torch.from_numpy", "numpy.clip", "torch.zeros", "torch.clamp" ], [ "numpy.array", "numpy.ascontiguousarray", "numpy.flip" ] ]
ntropy-network/ntropy-sdk
[ "7fa1c1e90be64f27f5f5034f804b1eb04e78ad78" ]
[ "tests/test_benchmark.py" ]
[ "import sys\nimport tempfile\nimport pytest\nimport csv\nimport pandas as pd\n\nfrom tests import API_KEY\n\nfrom ntropy_sdk import SDK\nfrom ntropy_sdk.benchmark import main\n\n\nTRANSACTIONS = [\n {\n \"\": \"0\",\n \"account_id\": \"6039c4ac1c63e9c7\",\n \"description\": \"AMAZON WEB SERVICES AWS.AMAZON.CO WA Ref5543286P25S Crd15\",\n \"date\": \"2021-12-12\",\n \"amount\": \"2687\",\n \"entry_type\": \"debit\",\n \"iso_currency_code\": \"USD\",\n \"labels\": \"cloud computing - infrastructure\",\n \"source\": \"foo\",\n },\n {\n \"\": \"1\",\n \"account_id\": \"601343505fd633\",\n \"date\": \"2021-12-12\",\n \"description\": \"TARGET T- 5800 20th St 11/30/19 17:32\",\n \"amount\": \"22.5\",\n \"entry_type\": \"debit\",\n \"iso_currency_code\": \"USD\",\n \"labels\": \"goods - department stores\",\n \"source\": \"foo\",\n },\n]\n\n\[email protected]\ndef data_set_file():\n with tempfile.NamedTemporaryFile() as f:\n pd.DataFrame(TRANSACTIONS).to_csv(f)\n\n yield f.name\n\n\[email protected]\ndef sdk():\n return SDK(API_KEY)\n\n\ndef test_enrich_dataframe(sdk, data_set_file):\n with open(data_set_file) as f:\n df = pd.read_csv(f)\n df[\"iso_currency_code\"] = \"USD\"\n df[\"account_holder_id\"] = \"1\"\n df[\"account_holder_type\"] = \"business\"\n del df[\"labels\"]\n\n sdk.enrich_dataframe(df)\n\n\ndef test_command_line(data_set_file):\n with tempfile.NamedTemporaryFile() as output_file:\n sys.argv = [\n \"ntropy-benchmark\",\n \"--api-key\",\n API_KEY,\n \"--api-url\",\n \"https://api.ntropy.network\",\n \"--in-csv-file\",\n data_set_file,\n \"--out-csv-file\",\n output_file.name,\n \"--hardcoded-field\",\n '{\"account_holder_type\": \"business\", \"iso_currency_code\":\"USD\", \"account_holder_id\": \"1\"}',\n \"--poll-interval\",\n \"1\",\n \"--ground-truth-label-field\",\n \"labels\",\n \"--field-mapping\",\n '{\"labels\": \"predicted_labels\"}',\n \"--max-batch-size\",\n \"200\",\n ]\n\n main()\n\n result = pd.read_csv(output_file)\n\n assert result.shape[0] == len(TRANSACTIONS)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
PDR-benchmark-standardization-committee/LTS-benchmark-tool
[ "007da9bd3fb48996c0b97ad1e61549cb2ebb479e" ]
[ "dataloader.py" ]
[ "# coding: utf-8\nimport os\nimport sys\n\nimport cv2\nimport pandas as pd\nimport numpy as np\nfrom configparser import ConfigParser\nfrom logging import getLogger\n\n\nlogger = getLogger(\"__main__\").getChild(\"dataloader\")\n\n\ndef config(track, base_dname, config_file='config.ini'):\n '''\n Load ground_truth directory ini file\n\n Parameters\n ----------\n track : str \n 'VDR' or 'PDR'\n config_file : str\n Loading configuration file name\n \n Retruns\n -------\n conf : dictionary\n '''\n logger.debug('Loading configuration file: {}'.format(config_file))\n\n if not os.path.exists(config_file):\n raise FileExistsError('{} does not exist'.format(config_file))\n\n # Load ini file\n config_ini = ConfigParser()\n config_ini.optionxform = str\n config_ini.read(config_file, encoding='utf-8')\n conf = dict()\n\n ini_names ={'dir_name':['map_dname', 'ans_dname', 'ref_dname', 'ALIP_dname', 'BLE_dname'],\n 'file_name':['map_image_fname', 'map_size_fname', 'area_fname', \n 'ref_fname', 'ans_fname', 'ALIP_info_fname', 'BLE_info_fname'],\n 'map_color':['map_obstacle_color', 'map_trajectory_color', 'map_ref_color', \n 'map_BLE_color'],\n 'map_makersize':['map_trajectory_size', 'map_ref_size', 'map_BLE_size', 'map_grid']}\n\n for key, values in ini_names.items():\n for v in values:\n item = config_ini[track][v].strip(\"'\")\n if key == 'dir_name':\n item = os.path.join(base_dname, item)\n \n conf[v] = item\n logger.debug('{}: {}'.format(v, item))\n\n logger.debug(\"Configuration file load complete!\")\n return conf\n\ndef map_size(base_dname, map_size_fname):\n '''\n Load map size file\n\n Parameters\n ----------\n base_dname : str\n map_size_fname : str\n \n Returns\n -------\n map_size : ndarray of float\n map size [x, y]\n '''\n map_size_path = os.path.join(base_dname, map_size_fname)\n logger.debug('Loading Map size file : {}'.format(map_size_path))\n \n try:\n map_size_df = pd.read_csv(map_size_path, names=['x[m]', 'y[m]'])\n except FileNotFoundError:\n logger.debug('{} does not exists'.format(map_size_path))\n return None\n\n map_size = map_size_df.values[0]\n\n logger.debug('Map size load complete! map size: {}'.format(map_size))\n\n return map_size\n\ndef map_image(base_dname, map_image_fname):\n '''\n load map bitmap image\n\n Parameters\n ----------\n base_dname : str\n map_image_fname : str\n \n Returns\n -------\n bitmap : ndarray of int\n bitmap data \n '''\n\n map_image_path = os.path.join(base_dname, map_image_fname)\n logger.debug('Loading map image : {}'.format(map_image_path))\n\n map_img = cv2.imread(map_image_path, cv2.IMREAD_GRAYSCALE)\n \n # Value 1 is obstacle \n bitmap = np.where(map_img==255, 0, 1)\n logger.debug('map image load complete! image shape:{}'.format(bitmap.shape))\n\n return bitmap \n\ndef load_point(base_dname, point_fname):\n '''\n Load point data file\n\n Parameters\n ----------\n base_dname : str\n point_fname : str\n \n Returns\n -------\n point : DataFrame\n columns = ['unixtime', 'x_position_m', 'y_position_m']\n '''\n point_path = os.path.join(base_dname, point_fname)\n logger.debug('Loading point data: {}'.format(point_path))\n\n try:\n point = pd.read_csv(point_path, names=['unixtime', 'x_position_m', 'y_position_m'])\n except FileNotFoundError:\n logger.debug('{} does not exists'.format(point_path))\n return None\n \n logger.debug('Point data load complete! columns:{}, shape:{}'.\\\n format(point.columns, point.shape))\n \n return point\n\ndef ALIP_info(base_dname, ALIP_info_fname):\n '''\n Load true ALIP info file\n\n Parameters\n ----------\n base_dname : str\n ALIP_info_fname : str\n \n Returns\n -------\n ALIP_info : DataFrame\n columns = ['ALIP_start', 'ALIP_end']\n '''\n ALIP_info_path = os.path.join(base_dname, ALIP_info_fname)\n logger.debug('Loading ALIP info :{}'.format(ALIP_info_path))\n\n try:\n ALIP_info = pd.read_csv(ALIP_info_path)\n except FileNotFoundError:\n logger.debug('{} does not exist'.format(ALIP_info_path))\n return None\n \n ALIP_info.columns = ['ALIP_start', 'ALIP_end']\n\n logger.debug('ALIP info load complete! columns:{}, shape:{}'.\\\n format(ALIP_info.columns, ALIP_info.shape))\n return ALIP_info\n\ndef area_info(base_dname, area_fname):\n '''\n Area info files\n \n Parameters\n ----------\n base_dname : str\n area_fname : str\n\n Returns\n -------\n area_info : DataFrame\n DataFrame columns = ['area', 'x_position_m', 'y_position_m', 'x_length', 'y_length']\n '''\n area_info_path = os.path.join(base_dname, area_fname)\n logger.debug('Loading Area info file:{}'.format(area_info_path))\n \n try:\n area_info = pd.read_csv(area_info_path)\n except FileNotFoundError:\n logger.debug('{} does not exist'.format(area_info_path))\n return None\n\n area_info.columns = ['area', 'x_position_m', 'y_position_m', 'x_length', 'y_length']\n logger.debug('Area info load complete! columns:{}, shape:{}'.\\\n format(area_info.columns, area_info.shape))\n \n return area_info\n\ndef BLE_info(base_dname, BLE_fname):\n '''\n BLE info files\n \n Parameters\n ----------\n base_dname : str\n BLE_fname : str\n\n Returns\n -------\n BLE_info : DataFrame\n DataFrame columns = ['mac_address', 'orientation', 'x_position_m', 'y_position_m', 'Ptx', 'Lux']\n '''\n BLE_info_path = os.path.join(base_dname, BLE_fname)\n logger.debug('Loading Area info file:{}'.format(BLE_info_path))\n \n try:\n BLE_info = pd.read_csv(BLE_info_path)\n except FileNotFoundError:\n logger.debug('{} does not exist'.format(BLE_info_path))\n return None\n\n logger.debug('BLE info load complete! columns:{}'.format(BLE_info.columns))\n \n return BLE_info\n\ndef area_weights_config(track, config_file='area_weights_config.ini'):\n '''\n Load area weights configuration file for E_error_deviation\n \n Parameters\n ----------\n track : str \n 'VDR' or 'PDR'\n config_file : str\n area weights configuration file name\n \n Retruns\n -------\n area_weights : list of float\n '''\n logger.debug('Loading area weights configuration file.')\n logger.debug('track: {}, config file: {}'.format(track, config_file))\n\n if not os.path.exists(config_file):\n logger.error('FileExistsError {} does not exist'.format(config_file))\n return None\n \n config_ini = ConfigParser()\n config_ini.optionxform = str\n config_ini.read(config_file, encoding='utf-8')\n\n area_weights = list()\n for area, weight in config_ini[track].items():\n logger.debug('{}: {}'.format(area, weight))\n area_weights.append(float(weight))\n \n return area_weights\n\ndef drop_ans_duplicated_with_ref(ans_point, ref_point):\n '''\n drop duplicated raw with reference point from answer point\n\n Parameters\n ----------\n ans_point : DataFrame\n total ground truth point\n ref_point : DataFrame\n reference point which is not for evaluation\n\n Returns\n -------\n ans_ref_nonduplicated : DataFrame\n '''\n\n ans_point = ans_point.drop_duplicates()\n ref_point = ref_point.drop_duplicates()\n\n ref_unixtime = ref_point['unixtime']\n ans_duplicated_ref = ans_point[~ans_point['unixtime'].isin(ref_unixtime)]\n\n return ans_duplicated_ref\n\ndef filter_evaluation_data_ALIP(evaluation_point, ALIP_info, ALIP_flag):\n\n '''\n Filter data between ALIP or not\n\n Parameters\n ----------\n evaluation_point: DataFrame\n DataFrame columns = ['unixtime', 'x_position_m', 'y_position_m']\n ALIP_info : DataFrame\n ALIP period time information\n ALIP_flag : boolean\n filter point is between ALIP or not\n\n Returns\n -------\n eval_point : DataFrame\n evaluation point for indicator\n '''\n # Check weather unixtime is between start and end time of ALIP_info\n def is_unixtime_between_ALIP(x):\n for ALIP_start, ALIP_end in zip(ALIP_info['ALIP_start'].values, ALIP_info['ALIP_end'].values):\n if ALIP_start<= x <=ALIP_end:\n return True\n return False\n \n if ALIP_flag:\n # Boolean array\n is_unixtime_between_ALIPinfo = evaluation_point['unixtime'].apply(lambda x:is_unixtime_between_ALIP(x))\n eval_point = evaluation_point[is_unixtime_between_ALIPinfo]\n logger.debug('evaluation point BETWEEN ALIP period is selected')\n\n else:\n is_unixtime_out_of_ALIPinfo = [not i for i in evaluation_point['unixtime'].apply(lambda x:is_unixtime_between_ALIP(x))]\n eval_point = evaluation_point[is_unixtime_out_of_ALIPinfo]\n logger.debug('evaluation point OUT OF ALIP period is selected')\n \n return eval_point\n\ndef map_color(map_obstacle_color, map_trajectory_color, map_ref_color, map_BLE_color):\n '''\n Load map color\n\n Parameters\n ----------\n map_obstacle_color : str\n map_trajectory_color : str\n map_ref_color : str\n map_BLE_color : str\n\n Returns\n -------\n map_color : list\n '''\n \n map_color = [map_obstacle_color, map_trajectory_color, map_ref_color, map_BLE_color]\n \n return map_color\n\ndef map_makersize(map_trajectory_size, map_ref_size, map_BLE_size, map_grid):\n '''\n Load map color\n\n Parameters\n ----------\n map_obstacle_color : str\n map_trajectory_color : str\n map_ref_color : str\n map_BLE_color : str\n map_grid : str\n \n Returns\n -------\n map_makersize : list\n '''\n\n if map_grid is 'True':\n map_grid = True\n else:\n map_grid = False\n \n map_makersize = [map_trajectory_size, map_ref_size, map_BLE_size, map_grid]\n\n return map_makersize\n" ]
[ [ "pandas.read_csv", "numpy.where" ] ]
paulroujansky/mne-python
[ "6c36f8806dffe48bd82e461ad6cc8aad782e5f43" ]
[ "tutorials/intro/plot_10_overview.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n.. _tut-overview:\n\nOverview of MEG/EEG analysis with MNE-Python\n============================================\n\nThis tutorial covers the basic EEG/MEG pipeline for event-related analysis:\nloading data, epoching, averaging, plotting, and estimating cortical activity\nfrom sensor data. It introduces the core MNE-Python data structures\n:class:`~mne.io.Raw`, :class:`~mne.Epochs`, :class:`~mne.Evoked`, and\n:class:`~mne.SourceEstimate`, and covers a lot of ground fairly quickly (at the\nexpense of depth). Subsequent tutorials address each of these topics in greater\ndetail.\n\n.. contents:: Page contents\n :local:\n :depth: 1\n\nWe begin by importing the necessary Python modules:\n\"\"\"\n\nimport os\nimport numpy as np\nimport mne\n\n###############################################################################\n# Loading data\n# ^^^^^^^^^^^^\n#\n# MNE-Python data structures are based around the FIF file format from\n# Neuromag, but there are reader functions for :ref:`a wide variety of other\n# data formats <data-formats>`. MNE-Python also has interfaces to a\n# variety of :doc:`publicly available datasets <../../manual/datasets_index>`,\n# which MNE-Python can download and manage for you.\n#\n# We'll start this tutorial by loading one of the example datasets (called\n# \":ref:`sample-dataset`\"), which contains EEG and MEG data from one subject\n# performing an audiovisual experiment, along with structural MRI scans for\n# that subject. The :func:`mne.datasets.sample.data_path` function will\n# automatically download the dataset if it isn't found in one of the expected\n# locations, then return the directory path to the dataset (see the\n# documentation of :func:`~mne.datasets.sample.data_path` for a list of places\n# it checks before downloading). Note also that for this tutorial to run\n# smoothly on our servers, we're using a filtered and downsampled version of\n# the data (:file:`sample_audvis_filt-0-40_raw.fif`), but an unfiltered version\n# (:file:`sample_audvis_raw.fif`) is also included in the sample dataset and\n# could be substituted here when running the tutorial locally.\n\nsample_data_folder = mne.datasets.sample.data_path()\nsample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_filt-0-40_raw.fif')\nraw = mne.io.read_raw_fif(sample_data_raw_file)\n\n###############################################################################\n# By default, :func:`~mne.io.read_raw_fif` displays some information about the\n# file it's loading; for example, here it tells us that there are four\n# \"projection items\" in the file along with the recorded data; those are\n# :term:`SSP projectors <projector>` calculated to remove environmental noise\n# from the MEG signals, plus a projector to mean-reference the EEG channels;\n# these are discussed in the tutorial :ref:`tut-projectors-background`.\n# In addition to the information displayed during loading,\n# you can get a glimpse of the basic details of a :class:`~mne.io.Raw` object\n# by printing it; even more is available by printing its ``info`` attribute\n# (a :class:`dictionary-like object <mne.Info>` that is preserved across\n# :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked`\n# objects). The ``info`` data structure keeps track of channel locations,\n# applied filters, projectors, etc. Notice especially the ``chs`` entry,\n# showing that MNE-Python detects different sensor types and handles each\n# appropriately. See :ref:`tut-info-class` for more on the :class:`~mne.Info`\n# class.\n\nprint(raw)\nprint(raw.info)\n\n###############################################################################\n# :class:`~mne.io.Raw` objects also have several built-in plotting methods;\n# here we show the power spectral density (PSD) for each sensor type with\n# :meth:`~mne.io.Raw.plot_psd`, as well as a plot of the raw sensor traces with\n# :meth:`~mne.io.Raw.plot`. In the PSD plot, we'll only plot frequencies below\n# 50 Hz (since our data are low-pass filtered at 40 Hz). In interactive Python\n# sessions, :meth:`~mne.io.Raw.plot` is interactive and allows scrolling,\n# scaling, bad channel marking, annotation, projector toggling, etc.\n\nraw.plot_psd(fmax=50)\nraw.plot(duration=5, n_channels=30)\n\n###############################################################################\n# Preprocessing\n# ^^^^^^^^^^^^^\n#\n# MNE-Python supports a variety of preprocessing approaches and techniques\n# (maxwell filtering, signal-space projection, independent components analysis,\n# filtering, downsampling, etc); see the full list of capabilities in the\n# :mod:`mne.preprocessing` and :mod:`mne.filter` submodules. Here we'll clean\n# up our data by performing independent components analysis\n# (:class:`~mne.preprocessing.ICA`); for brevity we'll skip the steps that\n# helped us determined which components best capture the artifacts (see\n# :ref:`tut-artifact-ica` for a detailed walk-through of that process).\n\n# set up and fit the ICA\nica = mne.preprocessing.ICA(n_components=20, random_state=97, max_iter=800)\nica.fit(raw)\nica.exclude = [1, 2] # details on how we picked these are omitted here\nica.plot_properties(raw, picks=ica.exclude)\n\n###############################################################################\n# Once we're confident about which component(s) we want to remove, we pass them\n# as the ``exclude`` parameter and then apply the ICA to the raw signal. The\n# :meth:`~mne.preprocessing.ICA.apply` method requires the raw data to be\n# loaded into memory (by default it's only read from disk as-needed), so we'll\n# use :meth:`~mne.io.Raw.load_data` first. We'll also make a copy of the\n# :class:`~mne.io.Raw` object so we can compare the signal before and after\n# artifact removal side-by-side:\n\norig_raw = raw.copy()\nraw.load_data()\nica.apply(raw)\n\n# show some frontal channels to clearly illustrate the artifact removal\nchs = ['MEG 0111', 'MEG 0121', 'MEG 0131', 'MEG 0211', 'MEG 0221', 'MEG 0231',\n 'MEG 0311', 'MEG 0321', 'MEG 0331', 'MEG 1511', 'MEG 1521', 'MEG 1531',\n 'EEG 001', 'EEG 002', 'EEG 003', 'EEG 004', 'EEG 005', 'EEG 006',\n 'EEG 007', 'EEG 008']\nchan_idxs = [raw.ch_names.index(ch) for ch in chs]\norig_raw.plot(order=chan_idxs, start=12, duration=4)\nraw.plot(order=chan_idxs, start=12, duration=4)\n\n###############################################################################\n# .. _overview-tut-events-section:\n#\n# Detecting experimental events\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# The sample dataset includes several :term:`\"STIM\" channels <stim channel>`\n# that recorded electrical\n# signals sent from the stimulus delivery computer (as brief DC shifts /\n# squarewave pulses). These pulses (often called \"triggers\") are used in this\n# dataset to mark experimental events: stimulus onset, stimulus type, and\n# participant response (button press). The individual STIM channels are\n# combined onto a single channel, in such a way that voltage\n# levels on that channel can be unambiguously decoded as a particular event\n# type. On older Neuromag systems (such as that used to record the sample data)\n# this summation channel was called ``STI 014``, so we can pass that channel\n# name to the :func:`mne.find_events` function to recover the timing and\n# identity of the stimulus events.\n\nevents = mne.find_events(raw, stim_channel='STI 014')\nprint(events[:5]) # show the first 5\n\n###############################################################################\n# The resulting events array is an ordinary 3-column :class:`NumPy array\n# <numpy.ndarray>`, with sample number in the first column and integer event ID\n# in the last column; the middle column is usually ignored. Rather than keeping\n# track of integer event IDs, we can provide an *event dictionary* that maps\n# the integer IDs to experimental conditions or events. In this dataset, the\n# mapping looks like this:\n#\n# .. _sample-data-event-dict-table:\n#\n# +----------+----------------------------------------------------------+\n# | Event ID | Condition |\n# +==========+==========================================================+\n# | 1 | auditory stimulus (tone) to the left ear |\n# +----------+----------------------------------------------------------+\n# | 2 | auditory stimulus (tone) to the right ear |\n# +----------+----------------------------------------------------------+\n# | 3 | visual stimulus (checkerboard) to the left visual field |\n# +----------+----------------------------------------------------------+\n# | 4 | visual stimulus (checkerboard) to the right visual field |\n# +----------+----------------------------------------------------------+\n# | 5 | smiley face (catch trial) |\n# +----------+----------------------------------------------------------+\n# | 32 | subject button press |\n# +----------+----------------------------------------------------------+\n\nevent_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,\n 'visual/right': 4, 'smiley': 5, 'buttonpress': 32}\n\n###############################################################################\n# Event dictionaries like this one are used when extracting epochs from\n# continuous data; the ``/`` character in the dictionary keys allows pooling\n# across conditions by requesting partial condition descriptors (i.e.,\n# requesting ``'auditory'`` will select all epochs with Event IDs 1 and 2;\n# requesting ``'left'`` will select all epochs with Event IDs 1 and 3). An\n# example of this is shown in the next section. There is also a convenient\n# :func:`~mne.viz.plot_events` function for visualizing the distribution of\n# events across the duration of the recording (to make sure event detection\n# worked as expected). Here we'll also make use of the :class:`~mne.Info`\n# attribute to get the sampling frequency of the recording (so our x-axis will\n# be in seconds instead of in samples).\n\nfig = mne.viz.plot_events(events, event_id=event_dict, sfreq=raw.info['sfreq'])\nfig.subplots_adjust(right=0.7) # make room for the legend\n\n###############################################################################\n# For paradigms that are not event-related (e.g., analysis of resting-state\n# data), you can extract regularly spaced (possibly overlapping) spans of data\n# by creating events using :func:`mne.make_fixed_length_events` and then\n# proceeding with epoching as described in the next section.\n#\n#\n# Epoching continuous data\n# ^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# The :class:`~mne.io.Raw` object and the events array are the bare minimum\n# needed to create an :class:`~mne.Epochs` object, which we create with the\n# :class:`mne.Epochs` class constructor. Here we'll also specify some data\n# quality constraints: we'll reject any epoch where peak-to-peak signal\n# amplitude is beyond reasonable limits for that channel type. This is done\n# with a *rejection dictionary*; you may include or omit thresholds for any of\n# the channel types present in your data. The values given here are reasonable\n# for this particular dataset, but may need to be adapted for different\n# hardware or recording conditions. For a more automated approach, consider\n# using the `autoreject package`_.\n\nreject_criteria = dict(mag=4000e-15, # 4000 fT\n grad=4000e-13, # 4000 fT/cm\n eeg=150e-6, # 150 μV\n eog=250e-6) # 250 μV\n\n###############################################################################\n# We'll also pass the event dictionary as the ``event_id`` parameter (so we can\n# work with easy-to-pool event labels instead of the integer event IDs), and\n# specify ``tmin`` and ``tmax`` (the time relative to each event at which to\n# start and end each epoch). As mentioned above, by default\n# :class:`~mne.io.Raw` and :class:`~mne.Epochs` data aren't loaded into memory\n# (they're accessed from disk only when needed), but here we'll force loading\n# into memory using the ``preload=True`` parameter so that we can see the\n# results of the rejection criteria being applied:\n\nepochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.2, tmax=0.5,\n reject=reject_criteria, preload=True)\n\n###############################################################################\n# Next we'll pool across left/right stimulus presentations so we can compare\n# auditory versus visual responses. To avoid biasing our signals to the\n# left or right, we'll use :meth:`~mne.Epochs.equalize_event_counts` first to\n# randomly sample epochs from each condition to match the number of epochs\n# present in the condition with the fewest good epochs.\n\nconds_we_care_about = ['auditory/left', 'auditory/right',\n 'visual/left', 'visual/right']\nepochs.equalize_event_counts(conds_we_care_about) # this operates in-place\naud_epochs = epochs['auditory']\nvis_epochs = epochs['visual']\ndel raw, epochs # free up memory\n\n###############################################################################\n# Like :class:`~mne.io.Raw` objects, :class:`~mne.Epochs` objects also have a\n# number of built-in plotting methods. One is :meth:`~mne.Epochs.plot_image`,\n# which shows each epoch as one row of an image map, with color representing\n# signal magnitude; the average evoked response and the sensor location are\n# shown below the image:\n\naud_epochs.plot_image(picks=['MEG 1332', 'EEG 021'])\n\n##############################################################################\n# .. note::\n#\n# Both :class:`~mne.io.Raw` and :class:`~mne.Epochs` objects have\n# :meth:`~mne.Epochs.get_data` methods that return the underlying data\n# as a :class:`NumPy array <numpy.ndarray>`. Both methods have a ``picks``\n# parameter for subselecting which channel(s) to return; ``raw.get_data()``\n# has additional parameters for restricting the time domain. The resulting\n# matrices have dimension ``(n_channels, n_times)`` for\n# :class:`~mne.io.Raw` and ``(n_epochs, n_channels, n_times)`` for\n# :class:`~mne.Epochs`.\n#\n#\n# Time-frequency analysis\n# ^^^^^^^^^^^^^^^^^^^^^^^\n#\n# The :mod:`mne.time_frequency` submodule provides implementations of several\n# algorithms to compute time-frequency representations, power spectral density,\n# and cross-spectral density. Here, for example, we'll compute for the auditory\n# epochs the induced power at different frequencies and times, using Morlet\n# wavelets. On this dataset the result is not especially informative (it just\n# shows the evoked \"auditory N100\" response); see :ref:`here\n# <inter-trial-coherence>` for a more extended example on a dataset with richer\n# frequency content.\n\nfrequencies = np.arange(7, 30, 3)\npower = mne.time_frequency.tfr_morlet(aud_epochs, n_cycles=2, return_itc=False,\n freqs=frequencies, decim=3)\npower.plot(['MEG 1332'])\n\n###############################################################################\n# Estimating evoked responses\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# Now that we have our conditions in ``aud_epochs`` and ``vis_epochs``, we can\n# get an estimate of evoked responses to auditory versus visual stimuli by\n# averaging together the epochs in each condition. This is as simple as calling\n# the :meth:`~mne.Epochs.average` method on the :class:`~mne.Epochs` object,\n# and then using a function from the :mod:`mne.viz` module to compare the\n# global field power for each sensor type of the two :class:`~mne.Evoked`\n# objects:\n\naud_evoked = aud_epochs.average()\nvis_evoked = vis_epochs.average()\n\nmne.viz.plot_compare_evokeds(dict(auditory=aud_evoked, visual=vis_evoked),\n show_legend='upper left',\n show_sensors='upper right')\n\n###############################################################################\n# We can also get a more detailed view of each :class:`~mne.Evoked` object\n# using other plotting methods such as :meth:`~mne.Evoked.plot_joint` or\n# :meth:`~mne.Evoked.plot_topomap`. Here we'll examine just the EEG channels,\n# and see the classic auditory evoked N100-P200 pattern over dorso-frontal\n# electrodes, then plot scalp topographies at some additional arbitrary times:\n\n# sphinx_gallery_thumbnail_number = 13\naud_evoked.plot_joint(picks='eeg')\naud_evoked.plot_topomap(times=[0., 0.08, 0.1, 0.12, 0.2], ch_type='eeg')\n\n##############################################################################\n# Evoked objects can also be combined to show contrasts between conditions,\n# using the :func:`mne.combine_evoked` function. A simple difference can be\n# generated by negating one of the :class:`~mne.Evoked` objects passed into the\n# function. We'll then plot the difference wave at each sensor using\n# :meth:`~mne.Evoked.plot_topo`:\n\nevoked_diff = mne.combine_evoked([aud_evoked, -vis_evoked], weights='equal')\nevoked_diff.pick_types('mag').plot_topo(color='r', legend=False)\n\n##############################################################################\n# Inverse modeling\n# ^^^^^^^^^^^^^^^^\n#\n# Finally, we can estimate the origins of the evoked activity by projecting the\n# sensor data into this subject's :term:`source space` (a set of points either\n# on the cortical surface or within the cortical volume of that subject, as\n# estimated by structural MRI scans). MNE-Python supports lots of ways of doing\n# this (dynamic statistical parametric mapping, dipole fitting, beamformers,\n# etc.); here we'll use minimum-norm estimation (MNE) to generate a continuous\n# map of activation constrained to the cortical surface. MNE uses a linear\n# :term:`inverse operator` to project EEG+MEG sensor measurements into the\n# source space. The inverse operator is computed from the\n# :term:`forward solution` for this subject and an estimate of :ref:`the\n# covariance of sensor measurements <tut_compute_covariance>`. For this\n# tutorial we'll skip those computational steps and load a pre-computed inverse\n# operator from disk (it's included with the :ref:`sample data\n# <sample-dataset>`). Because this \"inverse problem\" is underdetermined (there\n# is no unique solution), here we further constrain the solution by providing a\n# regularization parameter specifying the relative smoothness of the current\n# estimates in terms of a signal-to-noise ratio (where \"noise\" here is akin to\n# baseline activity level across all of cortex).\n\n# load inverse operator\ninverse_operator_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis-meg-oct-6-meg-inv.fif')\ninv_operator = mne.minimum_norm.read_inverse_operator(inverse_operator_file)\n# set signal-to-noise ratio (SNR) to compute regularization parameter (λ²)\nsnr = 3.\nlambda2 = 1. / snr ** 2\n# generate the source time course (STC)\nstc = mne.minimum_norm.apply_inverse(vis_evoked, inv_operator,\n lambda2=lambda2,\n method='MNE') # or dSPM, sLORETA, eLORETA\n\n##############################################################################\n# Finally, in order to plot the source estimate on the subject's cortical\n# surface we'll also need the path to the sample subject's structural MRI files\n# (the ``subjects_dir``):\n\n# path to subjects' MRI files\nsubjects_dir = os.path.join(sample_data_folder, 'subjects')\n# plot\nstc.plot(initial_time=0.1, hemi='split', views=['lat', 'med'],\n subjects_dir=subjects_dir)\n\n##############################################################################\n# The remaining tutorials have *much more detail* on each of these topics (as\n# well as many other capabilities of MNE-Python not mentioned here:\n# connectivity analysis, encoding/decoding models, lots more visualization\n# options, etc). Read on to learn more!\n#\n#\n# .. LINKS\n#\n# .. _`autoreject package`: http://autoreject.github.io/\n" ]
[ [ "numpy.arange" ] ]
ilanbiala/16720-project
[ "08c9c898549fd42c60a3d5d21192ea6c7662aaa8" ]
[ "ibiala-code/helper.py" ]
[ "\"\"\"\nHomework4.\nHelper functions.\n\nWritten by Chen Kong, 2018.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\nimport submission as sub\n\ndef _epipoles(E):\n U, S, V = np.linalg.svd(E)\n e1 = V[-1, :]\n U, S, V = np.linalg.svd(E.T)\n e2 = V[-1, :]\n return e1, e2\n\ndef displayEpipolarF(I1, I2, F):\n e1, e2 = _epipoles(F)\n\n sy, sx, _ = I2.shape\n\n f, [ax1, ax2] = plt.subplots(1, 2, figsize=(12, 9))\n ax1.imshow(I1)\n ax1.set_title('Select a point in this image')\n ax1.set_axis_off()\n ax2.imshow(I2)\n ax2.set_title('Verify that the corresponding point \\n is on the epipolar line in this image')\n ax2.set_axis_off()\n\n while True:\n plt.sca(ax1)\n inpt = plt.ginput(1, mouse_stop=2)\n if not inpt:\n break\n x, y = inpt[0]\n\n xc = x\n yc = y\n v = np.array([xc, yc, 1])\n l = F.dot(v)\n s = np.sqrt(l[0]**2+l[1]**2)\n\n if s == 0:\n print('ERROR: Zero line vector in displayEpipolar')\n break\n\n l = l/s\n\n if l[0] != 0:\n ye = sy-1\n ys = 0\n xe = -(l[1] * ye + l[2])/l[0]\n xs = -(l[1] * ys + l[2])/l[0]\n else:\n xe = sx-1\n xs = 0\n ye = -(l[0] * xe + l[2])/l[1]\n ys = -(l[0] * xs + l[2])/l[1]\n\n # plt.plot(x,y, '*', 'MarkerSize', 6, 'LineWidth', 2)\n ax1.plot(x, y, '*', MarkerSize=6, linewidth=2)\n ax2.plot([xs, xe], [ys, ye], linewidth=2)\n plt.draw()\n\n\ndef _singularize(F):\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n F = U.dot(np.diag(S).dot(V))\n return F\n\ndef _objective_F(f, pts1, pts2):\n F = _singularize(f.reshape([3, 3]))\n num_points = pts1.shape[0]\n hpts1 = np.concatenate([pts1, np.ones([num_points, 1])], axis=1)\n hpts2 = np.concatenate([pts2, np.ones([num_points, 1])], axis=1)\n Fp1 = F.dot(hpts1.T)\n FTp2 = F.T.dot(hpts2.T)\n\n r = 0\n for fp1, fp2, hp2 in zip(Fp1.T, FTp2.T, hpts2):\n r += (hp2.dot(fp1))**2 * (1/(fp1[0]**2 + fp1[1]**2) + 1/(fp2[0]**2 + fp2[1]**2))\n return r\n\ndef refineF(F, pts1, pts2):\n f = scipy.optimize.fmin_powell(\n lambda x: _objective_F(x, pts1, pts2), F.reshape([-1]),\n maxiter=100000,\n maxfun=10000\n )\n return _singularize(f.reshape([3, 3]))\n\ndef camera2(E):\n U,S,V = np.linalg.svd(E)\n m = S[:2].mean()\n E = U.dot(np.array([[m,0,0], [0,m,0], [0,0,0]])).dot(V)\n U,S,V = np.linalg.svd(E)\n W = np.array([[0,-1,0], [1,0,0], [0,0,1]])\n\n if np.linalg.det(U.dot(W).dot(V))<0:\n W *= -1\n\n M2s = np.zeros([3,4,4])\n M2s[:,:,0] = np.concatenate([U.dot(W).dot(V), U[:,2].reshape([-1, 1])/abs(U[:,2]).max()], axis=1)\n M2s[:,:,1] = np.concatenate([U.dot(W).dot(V), -U[:,2].reshape([-1, 1])/abs(U[:,2]).max()], axis=1)\n M2s[:,:,2] = np.concatenate([U.dot(W.T).dot(V), U[:,2].reshape([-1, 1])/abs(U[:,2]).max()], axis=1)\n M2s[:,:,3] = np.concatenate([U.dot(W.T).dot(V), -U[:,2].reshape([-1, 1])/abs(U[:,2]).max()], axis=1)\n return M2s\n\ndef epipolarMatchGUI(I1, I2, F):\n e1, e2 = _epipoles(F)\n\n sy, sx, _ = I2.shape\n\n f, [ax1, ax2] = plt.subplots(1, 2, figsize=(12, 9))\n ax1.imshow(I1)\n ax1.set_title('Select a point in this image')\n ax1.set_axis_off()\n ax2.imshow(I2)\n ax2.set_title('Verify that the corresponding point \\n is on the epipolar line in this image')\n ax2.set_axis_off()\n\n while True:\n plt.sca(ax1)\n inpt = plt.ginput(1, mouse_stop=2)\n if not inpt:\n break\n x, y = inpt[0]\n\n xc = int(x)\n yc = int(y)\n v = np.array([xc, yc, 1])\n l = F.dot(v)\n s = np.sqrt(l[0]**2+l[1]**2)\n\n if s == 0:\n print('ERROR: Zero line vector in displayEpipolar')\n break\n\n l = l/s\n\n if l[0] != 0:\n ye = sy-1\n ys = 0\n xe = -(l[1] * ye + l[2])/l[0]\n xs = -(l[1] * ys + l[2])/l[0]\n else:\n xe = sx-1\n xs = 0\n ye = -(l[0] * xe + l[2])/l[1]\n ys = -(l[0] * xs + l[2])/l[1]\n\n # plt.plot(x,y, '*', 'MarkerSize', 6, 'LineWidth', 2)\n ax1.plot(x, y, '*', MarkerSize=6, linewidth=2)\n ax2.plot([xs, xe], [ys, ye], linewidth=2)\n\n # draw points\n x2, y2 = sub.epipolarCorrespondence(I1, I2, F, xc, yc)\n ax2.plot(x2, y2, 'ro', MarkerSize=8, linewidth=2)\n plt.draw()\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.ginput", "numpy.ones", "matplotlib.pyplot.draw", "numpy.zeros", "numpy.diag", "matplotlib.pyplot.subplots", "numpy.linalg.svd", "matplotlib.pyplot.sca", "numpy.array" ] ]
joshloyal/fully-differentiable-deep-ndf-tf
[ "b049ca86c7e0065af373c110d8bb8b5721fb25d1" ]
[ "test_ndf.py" ]
[ "import numpy as np\nimport skflow\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier\nimport tensorflow as tf\n\nimport ndf\n\nDEPTH = 4 # Depth of a tree (this includes the leaf probabilities)\nN_LEAF = 2 ** (DEPTH - 1) # Number of leaf nodes\nN_DECISION_NODES = 2 ** (DEPTH - 1) - 1 # These are all nodes but the leaves\nN_BATCH = 3\nN_LABELS = 10\n\n\nclass TestNDF(tf.test.TestCase):\n def test_routing_probability(self):\n rng = np.random.RandomState(1234)\n\n decision_p = rng.beta(\n 2, 2, N_DECISION_NODES * N_BATCH).reshape(\n N_BATCH, N_DECISION_NODES)\n\n decision_p = tf.constant(decision_p)\n with self.test_session():\n routing_proba = ndf.routing_probability(decision_p).eval()\n\n self.assertEquals(routing_proba.shape, (N_BATCH, N_LEAF))\n self.assertTrue(np.all(routing_proba < 1))\n self.assertTrue(np.all(routing_proba > 0))\n\n def test_leaf_probabilities(self):\n rng = np.random.RandomState(1234)\n\n decision_p = rng.beta(\n 2, 2, N_DECISION_NODES * N_BATCH).reshape(\n N_BATCH, N_DECISION_NODES).astype(np.float32)\n decision_p = tf.constant(decision_p)\n\n w_l = rng.uniform(-2, 2, [N_LEAF, N_LABELS]).astype(np.float32)\n w_l = tf.constant(w_l)\n\n with self.test_session():\n leaf_p = tf.nn.softmax(w_l)\n routing_proba = ndf.routing_probability(decision_p)\n py_x_tree = ndf.leaf_probability(routing_proba, leaf_p).eval()\n\n self.assertEqual(py_x_tree.shape, (N_BATCH, N_LABELS))\n self.assertTrue(np.all(routing_proba < 1))\n self.assertTrue(np.all(routing_proba > 0))\n\n\n def test_model_op(self):\n rng = np.random.RandomState(1234)\n X = tf.placeholder('float32', name='X', shape=[None, 5])\n y = tf.placeholder('float32', name='y', shape=[None, 10])\n model = ndf.neural_decision_tree(X, y)\n\n X_data = rng.randn(10, 5)\n y_data = np.eye(10)[rng.choice(np.arange(10), 10)]\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n preds, loss = sess.run(model, feed_dict={X: X_data, y: y_data})\n\n def test_skflow_tree(self):\n iris = datasets.load_iris()\n classifier = skflow.TensorFlowEstimator(\n ndf.neural_decision_tree_classifier,\n n_classes=3,\n optimizer='Adagrad',\n learning_rate=0.1,\n batch_size=100,\n verbose=True)\n classifier.fit(iris.data, iris.target, logdir='./model')\n score = metrics.accuracy_score(iris.target, classifier.predict(iris.data))\n print(score)\n\n def test_skflow_forest(self):\n iris = datasets.load_iris()\n classifier = skflow.TensorFlowEstimator(\n ndf.neural_decision_forest_classifier,\n n_classes=3,\n optimizer='Adagrad',\n learning_rate=0.1,\n verbose=True)\n classifier.fit(iris.data, iris.target, logdir='./model')\n score = metrics.accuracy_score(iris.target, classifier.predict(iris.data))\n print(score)\n print(classifier.predict_proba(iris.data[:5, :]))\n\n def test_dt(self):\n iris = datasets.load_iris()\n classifier = DecisionTreeClassifier(max_depth=4)\n classifier.fit(iris.data, iris.target)\n score = metrics.accuracy_score(iris.target, classifier.predict(iris.data))\n print(score)\n\n def test_flavour(self):\n import pandas as pd\n np.random.seed(123)\n df = pd.read_csv('./data/flavour_noleak.csv')\n indices = np.arange(df.shape[0])\n np.random.shuffle(indices)\n y = df.pop('signal').values[indices[:1000]]\n print(y)\n\n X = df.values[indices[:10000]]\n\n classifier = skflow.TensorFlowEstimator(\n ndf.neural_decision_forest_classifier,\n n_classes=2,\n optimizer='Adagrad',\n learning_rate=0.1,\n verbose=True)\n classifier.fit(X, y, logdir='./model')\n score = metrics.accuracy_score(y, classifier.predict(X))\n print(score)\n" ]
[ [ "tensorflow.initialize_all_variables", "numpy.eye", "tensorflow.placeholder", "numpy.random.shuffle", "sklearn.tree.DecisionTreeClassifier", "pandas.read_csv", "numpy.random.seed", "numpy.random.RandomState", "numpy.arange", "numpy.all", "tensorflow.constant", "tensorflow.nn.softmax", "sklearn.datasets.load_iris" ] ]
Holmes-Alan/Photo2Sketch
[ "43a0ca6bb8a8e645b35a2ab23d11ed5efe117e09" ]
[ "network.py" ]
[ "import torch\nfrom torch import nn, einsum\nimport torch.nn.functional as F\nimport torchvision.models as models\n\n\ndef exists(val):\n return val is not None\n\n# classes\n\n\nclass Inverse(nn.Module):\n def __init__(self):\n super(Inverse, self).__init__()\n\n self.E = Encoder()\n self.D = Img_decoder_v3()\n\n def forward(self, x):\n feat = self.E(x.repeat(1, 3, 1, 1))\n out = self.D(feat)\n\n return out\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n\n self.conv = nn.Sequential(\n nn.Conv2d(3, 64, 3, 1, 1),\n nn.InstanceNorm2d(64),\n nn.LeakyReLU(0.2),\n nn.Conv2d(64, 128, 3, 1, 1),\n nn.InstanceNorm2d(128),\n nn.LeakyReLU(0.2),\n nn.MaxPool2d(2, 2, 0),\n nn.Conv2d(128, 256, 3, 1, 1),\n nn.InstanceNorm2d(256),\n nn.LeakyReLU(0.2),\n nn.MaxPool2d(2, 2, 0),\n nn.Conv2d(256, 512, 3, 1, 1),\n nn.InstanceNorm2d(512),\n nn.LeakyReLU(0.2),\n nn.MaxPool2d(2, 2, 0),\n )\n\n self.cont_attn = nn.MultiheadAttention(embed_dim=512, num_heads=8)\n\n def forward(self, HR):\n # LR = F.interpolate(LR, scale_factor=8, mode='nearest')\n feat = self.conv(HR)\n feat1 = F.interpolate(feat, size=(32, 32), mode='bilinear')\n feat1 = feat1.view(HR.shape[0], 512, -1)\n feat1, _ = self.cont_attn(feat1.permute(2, 0, 1), feat1.permute(2, 0, 1), feat1.permute(2, 0, 1))\n feat1 = feat1.view(HR.shape[0], 512, 32, 32)\n feat1 = F.interpolate(feat1, size=(feat.shape[2], feat.shape[3]), mode='bilinear')\n feat = feat + feat1\n\n return feat\n\n\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, input_channel, output_channel, upsample=True):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(input_channel, output_channel, kernel_size=3, padding=0)\n self.conv2 = nn.Conv2d(output_channel, output_channel, kernel_size=3, padding=0)\n self.conv_shortcut = nn.Conv2d(input_channel, output_channel, kernel_size=1, bias=False)\n self.relu = nn.LeakyReLU(0.2)\n self.norm = nn.InstanceNorm2d(output_channel)\n self.upsample = upsample\n self.reflecPad1 = nn.ReflectionPad2d((1, 1, 1, 1))\n self.reflecPad2 = nn.ReflectionPad2d((1, 1, 1, 1))\n\n def forward(self, x):\n if self.upsample:\n x = F.interpolate(x, mode='bilinear', scale_factor=2)\n x_s = self.conv_shortcut(x)\n\n x = self.conv1(self.reflecPad1(x))\n x = self.relu(x)\n x = self.norm(x)\n x = self.conv2(self.reflecPad2(x))\n x = self.relu(x)\n x = self.norm(x)\n\n return x_s + x\n\n\nclass Img_decoder_v3(nn.Module):\n def __init__(self):\n super(Img_decoder_v3, self).__init__()\n\n self.slice4 = ResidualBlock(512, 256)\n self.slice3 = ResidualBlock(256, 128)\n self.slice2 = ResidualBlock(128, 64)\n self.slice1 = ResidualBlock(64, 64, upsample=False)\n # self.slice0 = nn.Conv2d(64, 1, 3, 1, 1)\n self.map = nn.Conv2d(64, 5, 3, 1, 1)\n self.confidence = nn.Conv2d(64, 5, 3, 1, 1)\n self.soft = nn.Softmax(dim=1)\n\n def forward(self, feat):\n # reconstruction\n h = self.slice4(feat)\n h = self.slice3(h)\n h = self.slice2(h)\n h = self.slice1(h)\n score = self.confidence(h)\n score = self.soft(score)\n out = self.map(h) * score\n out = torch.sum(out, dim=1).unsqueeze(1)\n # out = out * self.std.type_as(feat) + self.mean.type_as(feat)\n\n return out\n\n\nclass Img_decoder_v3_m(nn.Module):\n def __init__(self):\n super(Img_decoder_v3_m, self).__init__()\n\n self.slice4 = ResidualBlock(512, 256)\n self.slice3 = ResidualBlock(256, 128)\n self.slice2 = ResidualBlock(128, 64)\n self.slice1 = ResidualBlock(64, 64, upsample=False)\n # self.slice0 = nn.Conv2d(64, 1, 3, 1, 1)\n self.map = nn.Conv2d(64, 5, 3, 1, 1)\n self.confidence = nn.Conv2d(64, 5, 3, 1, 1)\n self.soft = nn.Softmax(dim=1)\n\n def forward(self, feat, t):\n # reconstruction\n h = self.slice4(feat)\n h = self.slice3(h)\n h = self.slice2(h)\n h = self.slice1(h)\n score = self.confidence(h)\n out = torch.cat((score, self.map(h)), dim=2)\n sorted, indices = torch.sort(out, dim=1)\n result = sorted[:, t:t+1, score.shape[2]:2*score.shape[2], :]\n\n return result\n\n\nfrom collections import namedtuple\nvgg_outputs = namedtuple(\"VggOutputs\", ['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1'])\nclass Vgg19(nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg19, self).__init__()\n vgg_pretrained_features = models.vgg19(pretrained=True).features\n self.slice1 = nn.Sequential()\n self.slice2 = nn.Sequential()\n self.slice3 = nn.Sequential()\n self.slice4 = nn.Sequential()\n for x in range(2):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(2, 7):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(7, 12):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(12, 21):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n\n def forward(self, x):\n x = (x - self.mean.type_as(x)) / self.std.type_as(x)\n h = self.slice1(x)\n h_relu1_1 = h\n h = self.slice2(h)\n h_relu2_1 = h\n h = self.slice3(h)\n h_relu3_1 = h\n h = self.slice4(h)\n h_relu4_1 = h\n\n out = vgg_outputs(h_relu1_1, h_relu2_1, h_relu3_1, h_relu4_1)\n # step 1\n # m, std = calc_mean_std(h_relu1_1)\n # style_1 = torch.cat((m, std), dim=1)\n # # step 2\n # m, std = calc_mean_std(h_relu2_1)\n # style_2 = torch.cat((m, std), dim=1)\n # # step 3\n # m, std = calc_mean_std(h_relu3_1)\n # style_3 = torch.cat((m, std), dim=1)\n # # step 4\n # m, std = calc_mean_std(h_relu4_1)\n # style_4 = torch.cat((m, std), dim=1)\n #\n # code = torch.cat((style_1, style_2, style_3, style_4), dim=1).squeeze(2).squeeze(2)\n\n return out\n\n\nclass discriminator_v2(nn.Module):\n def __init__(self, device, num_channels, base_filter):\n super(discriminator_v2, self).__init__()\n # self.norm = nn.BatchNorm2d(num_channels*2)\n self.input_conv = nn.Conv2d(num_channels, base_filter, 4, 2, 1)#512*256\n self.norm0 = nn.InstanceNorm2d(base_filter)\n self.conv1 = nn.Conv2d(base_filter, base_filter * 2, 4, 2, 1)\n self.norm1 = nn.InstanceNorm2d(base_filter * 2)\n self.conv2 = nn.Conv2d(base_filter * 2, base_filter * 4, 4, 2, 1)\n self.norm2 = nn.InstanceNorm2d(base_filter * 4)\n self.conv3 = nn.Conv2d(base_filter * 4, base_filter * 8, 4, 2, 1)\n self.norm3 = nn.InstanceNorm2d(base_filter * 8)\n self.cont_attn = nn.MultiheadAttention(embed_dim=base_filter * 8, num_heads=8)\n self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n self.weight = nn.Conv1d(base_filter * 8, 1, 3, 1, 1)\n\n self.down = nn.UpsamplingBilinear2d(scale_factor=0.5)\n\n mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)\n std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)\n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n\n for m in self.modules():\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.zero_()\n elif classname.find('ConvTranspose2d') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.zero_()\n\n def encode(self, x):\n feat1 = self.act(self.norm0(self.input_conv(x)))\n feat1 = self.act(self.norm1(self.conv1(feat1)))\n feat2 = self.act(self.norm2(self.conv2(feat1)))\n feat3 = self.act(self.norm3(self.conv3(feat2)))\n feat3 = feat3.view(feat1.shape[0], 512, -1)\n feat3, _ = self.cont_attn(feat3.permute(2, 0, 1), feat3.permute(2, 0, 1), feat3.permute(2, 0, 1))\n feat3 = feat3.view(feat1.shape[0], 512, -1)\n out3 = self.weight(feat3).view(feat1.shape[0], -1)\n\n feat = torch.cat((feat1.view(feat1.shape[0], -1), feat2.view(feat2.shape[0], -1), feat3.view(feat3.shape[0], -1)), 1)\n return feat, out3\n\n def forward(self, x):\n # mean = mean.expand_as(x)\n # std = std.expand_as(std)\n # x = torch.cat((x, y), dim=1)\n feat1, prob1 = self.encode(x)\n x = self.down(x)\n feat2, prob2 = self.encode(x)\n x = self.down(x)\n feat3, prob3 = self.encode(x)\n\n feat_out = torch.cat((feat1, feat2, feat3), 1)\n prob_out = torch.cat((prob1, prob2, prob3), 1)\n\n return feat_out, prob_out\n\n" ]
[ [ "torch.sum", "torch.nn.MaxPool2d", "torch.nn.MultiheadAttention", "torch.Tensor", "torch.sort", "torch.nn.Softmax", "torch.nn.Conv1d", "torch.nn.Conv2d", "torch.nn.ReflectionPad2d", "torch.nn.InstanceNorm2d", "torch.nn.Sequential", "torch.nn.UpsamplingBilinear2d", "torch.cat", "torch.nn.functional.interpolate", "torch.nn.LeakyReLU" ] ]