repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
kiranvad/fdasrsf_python
[ "e45efa35f53eb04ddfef3dbfbfaf893084413755" ]
[ "test/test_all.py" ]
[ "import unittest\nimport numpy as np\nimport fdasrsf as fs \n\nclass TestFDASRSF(unittest.TestCase): \n \n # Returns True or False. \n def test_reparm(self): \n M = 101\n q1 = np.sin(np.linspace(0,2*np.pi,M))\n timet = np.linspace(0,1,M)\n gam = fs.optimum_reparam(q1, timet, q1) \n self.assertAlmostEqual(sum(gam-timet),0)\n \n def test_rlbgs(self):\n M = 101\n q1 = np.sin(np.linspace(0,2*np.pi,M))\n timet = np.linspace(0,1,M)\n gam = fs.optimum_reparam(q1, timet, q1, method=\"RBFGS\") \n self.assertAlmostEqual(sum(gam-timet),0)\n \n def test_f_to_srvf(self):\n M = 101\n f1 = np.sin(np.linspace(0,2*np.pi,M))\n timet = np.linspace(0,1,M)\n q1 = fs.f_to_srsf(f1,timet)\n f1a = fs.srsf_to_f(q1,timet)\n self.assertAlmostEqual(sum(f1-f1a),0,4)\n \n def test_elastic_distance(self):\n M = 101\n f1 = np.sin(np.linspace(0,2*np.pi,M))\n timet = np.linspace(0,1,M)\n da, dp = fs.elastic_distance(f1, f1, timet)\n self.assertLessEqual(da, 1e-10)\n self.assertLessEqual(dp, 1e-6)\n \nif __name__ == '__main__': \n unittest.main() " ]
[ [ "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
utsekaj42/chaospy
[ "0fb23cbb58eb987c3ca912e2a20b83ebab0514d0" ]
[ "chaospy/distributions/operators/negative.py" ]
[ "\"\"\"\nNegative of a distribution.\n\nExample usage\n-------------\n\nInvert sign of a distribution::\n\n >>> distribution = -chaospy.Uniform()\n >>> distribution\n Negative(Uniform())\n >>> distribution.sample(5).round(4)\n array([-0.3464, -0.885 , -0.0497, -0.5178, -0.1275])\n >>> distribution.fwd([-0.3, -0.2, -0.1])\n array([0.7, 0.8, 0.9])\n >>> distribution.inv(distribution.fwd([-0.3, -0.2, -0.1]))\n array([-0.3, -0.2, -0.1])\n >>> distribution.pdf([-0.3, -0.2, -0.1])\n array([1., 1., 1.])\n >>> distribution.mom([1, 2, 3]).round(4)\n array([-0.5 , 0.3333, -0.25 ])\n >>> distribution.ttr([1, 2, 3]).round(4)\n array([[-0.5 , -0.5 , -0.5 ],\n [ 0.0833, 0.0667, 0.0643]])\n\n\"\"\"\nimport numpy\n\nfrom ..baseclass import Distribution, OperatorDistribution\n\n\nclass Negative(OperatorDistribution):\n \"\"\"Negative of a distribution.\"\"\"\n\n _operator = lambda self, left, right: -left\n\n def __init__(self, dist):\n \"\"\"\n Constructor.\n\n Args:\n dist (Distribution) : distribution.\n \"\"\"\n super(Negative, self).__init__(\n left=dist,\n right=0,\n )\n self._repr_args = [dist]\n\n def _lower(self, idx, left, right, cache):\n del right\n return -left._get_upper(idx, cache)\n\n def _upper(self, idx, left, right, cache):\n del right\n return -left._get_lower(idx, cache)\n\n def _pdf(self, xloc, idx, left, right, cache):\n del right\n return left._get_pdf(-xloc, idx, cache)\n\n def _cdf(self, xloc, idx, left, right, cache):\n del right\n return 1-left._get_fwd(-xloc, idx, cache)\n\n def _ppf(self, uloc, idx, left, right, cache):\n del right\n return -left._get_inv(1-uloc, idx, cache)\n\n def _mom(self, kloc, left, right, cache):\n \"\"\"Statistical moments.\"\"\"\n del right\n del cache\n return (-1)**numpy.sum(kloc)*left._get_mom(kloc)\n\n def _ttr(self, kloc, idx, left, right, cache):\n \"\"\"Three terms recurrence coefficients.\"\"\"\n del right\n del cache\n alpha, beta = left._get_ttr(kloc, idx)\n return -alpha, beta\n\n\ndef neg(left):\n \"\"\"\n Negative of a distribution.\n\n Args:\n dist (Distribution) : distribution.\n \"\"\"\n return Negative(left)\n" ]
[ [ "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RCastroAB/struc2vec
[ "822a1691e279a7ce47918b0f210e7c50efe52782" ]
[ "src/struc2vec.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport random,sys,logging\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\nfrom multiprocessing import Manager\nfrom time import time\nfrom collections import deque\n\nfrom utils import *\nfrom algorithms import *\nfrom algorithms_distances import *\nimport graph\n\n\nclass Graph():\n\tdef __init__(self, g, is_directed, workers, untilLayer = None):\n\n\t\tlogging.info(\" - Converting graph to dict...\")\n\t\tself.G = g.gToDict()\n\t\tlogging.info(\"Graph converted.\")\n\n\t\tself.num_vertices = g.number_of_nodes()\n\t\tself.num_edges = g.number_of_edges()\n\t\tself.is_directed = is_directed\n\t\tself.workers = workers\n\t\tself.calcUntilLayer = untilLayer\n\t\tlogging.info('Graph - Number of vertices: {}'.format(self.num_vertices))\n\t\tlogging.info('Graph - Number of edges: {}'.format(self.num_edges))\n\n\n\tdef preprocess_neighbors_with_bfs(self):\n\n\t\twith ProcessPoolExecutor(max_workers=self.workers) as executor:\n\t\t\tjob = executor.submit(exec_bfs,self.G,self.workers,self.calcUntilLayer)\n\t\t\t\n\t\t\tjob.result()\n\n\t\treturn\n\n\tdef preprocess_neighbors_with_bfs_compact(self):\n\n\t\twith ProcessPoolExecutor(max_workers=self.workers) as executor:\n\t\t\tjob = executor.submit(exec_bfs_compact,self.G,self.workers,self.calcUntilLayer)\n\t\t\t\n\t\t\tjob.result()\n\n\t\treturn\n\n\tdef preprocess_degree_lists(self):\n\n\t\twith ProcessPoolExecutor(max_workers=self.workers) as executor:\n\t\t\tjob = executor.submit(preprocess_degreeLists)\n\t\t\t\n\t\t\tjob.result()\n\n\t\treturn\n\n\n\tdef create_vectors(self):\n\t\tlogging.info(\"Creating degree vectors...\")\n\t\tdegrees = {}\n\t\tdegrees_sorted = set()\n\t\tG = self.G\n\t\tfor v in list(G.keys()):\n\t\t\tdegree = len(G[v])\n\t\t\tdegrees_sorted.add(degree)\n\t\t\tif(degree not in degrees):\n\t\t\t\tdegrees[degree] = {}\n\t\t\t\tdegrees[degree]['vertices'] = deque() \n\t\t\tdegrees[degree]['vertices'].append(v)\n\t\tdegrees_sorted = np.array(list(degrees_sorted),dtype='int')\n\t\tdegrees_sorted = np.sort(degrees_sorted)\n\n\t\tl = len(degrees_sorted)\n\t\tfor index, degree in enumerate(degrees_sorted):\n\t\t\tif(index > 0):\n\t\t\t\tdegrees[degree]['before'] = degrees_sorted[index - 1]\n\t\t\tif(index < (l - 1)):\n\t\t\t\tdegrees[degree]['after'] = degrees_sorted[index + 1]\n\t\tlogging.info(\"Degree vectors created.\")\n\t\tlogging.info(\"Saving degree vectors...\")\n\t\tsaveVariableOnDisk(degrees,'degrees_vector')\n\n\n\tdef calc_distances_all_vertices(self,compactDegree = False):\n\n\t\tlogging.info(\"Using compactDegree: {}\".format(compactDegree))\n\t\tif(self.calcUntilLayer):\n\t\t\tlogging.info(\"Calculations until layer: {}\".format(self.calcUntilLayer))\n\n\t\tfutures = {}\n\n\t\tcount_calc = 0\n\n\t\tvertices = list(reversed(sorted(self.G.keys())))\n\n\t\tif(compactDegree):\n\t\t logging.info(\"Recovering degreeList from disk...\")\n\t\t degreeList = restoreVariableFromDisk('compactDegreeList')\n\t\telse:\n\t\t logging.info(\"Recovering compactDegreeList from disk...\")\n\t\t degreeList = restoreVariableFromDisk('degreeList')\n\n\t\tparts = self.workers\n\t\tchunks = partition(vertices,parts)\n\n\t\tt0 = time()\n\n\t\twith ProcessPoolExecutor(max_workers = self.workers) as executor:\n\n\t\t\tpart = 1\n\t\t\tfor c in chunks:\n\t\t\t\tlogging.info(\"Executing part {}...\".format(part))\n\t\t\t\tlist_v = []\n\t\t\t\tfor v in c:\n\t\t\t\t\tlist_v.append([vd for vd in list(degreeList.keys()) if vd > v])\n\t\t\t\tjob = executor.submit(calc_distances_all, c, list_v, degreeList,part, compactDegree = compactDegree)\n\t\t\t\tfutures[job] = part\n\t\t\t\tpart += 1\n\n\n\t\t\tlogging.info(\"Receiving results...\")\n\n\t\t\tfor job in as_completed(futures):\n\t\t\t\tjob.result()\n\t\t\t\tr = futures[job]\n\t\t\t\tlogging.info(\"Part {} Completed.\".format(r))\n\t\t\n\t\tlogging.info('Distances calculated.')\n\t\tt1 = time()\n\t\tlogging.info('Time : {}m'.format((t1-t0)/60))\n\t\t\n\t\treturn\n\n\n\tdef calc_distances(self, compactDegree = False):\n\n\t\tlogging.info(\"Using compactDegree: {}\".format(compactDegree))\n\t\tif(self.calcUntilLayer):\n\t\t\tlogging.info(\"Calculations until layer: {}\".format(self.calcUntilLayer))\n\n\t\tfutures = {}\n\t\t#distances = {}\n\n\t\tcount_calc = 0\n\n\t\tG = self.G\n\t\tvertices = list(G.keys())\n\n\t\tparts = self.workers\n\t\tchunks = partition(vertices,parts)\n\n\t\twith ProcessPoolExecutor(max_workers = 1) as executor:\n\n\t\t\tlogging.info(\"Split degree List...\")\n\t\t\tpart = 1\n\t\t\tfor c in chunks:\n\t\t\t\tjob = executor.submit(splitDegreeList,part,c,G,compactDegree)\n\t\t\t\tjob.result()\n\t\t\t\tlogging.info(\"degreeList {} completed.\".format(part))\n\t\t\t\tpart += 1\n\n\t\t\n\t\twith ProcessPoolExecutor(max_workers = self.workers) as executor:\n\n\t\t\tpart = 1\n\t\t\tfor c in chunks:\n\t\t\t\tlogging.info(\"Executing part {}...\".format(part))\n\t\t\t\tjob = executor.submit(calc_distances, part, compactDegree = compactDegree)\n\t\t\t\tfutures[job] = part\n\t\t\t\tpart += 1\n\n\t\t\tlogging.info(\"Receiving results...\")\n\t\t\tfor job in as_completed(futures):\n\t\t\t\tjob.result()\n\t\t\t\tr = futures[job]\n\t\t\t\tlogging.info(\"Part {} completed.\".format(r))\n\n\n\t\treturn\n\n\tdef consolide_distances(self):\n\n\t\tdistances = {}\n\n\t\tparts = self.workers\n\t\tfor part in range(1,parts + 1):\n\t\t\td = restoreVariableFromDisk('distances-'+str(part))\n\t\t\tpreprocess_consolides_distances(distances)\n\t\t\tdistances.update(d)\n\n\n\t\tpreprocess_consolides_distances(distances)\n\t\tsaveVariableOnDisk(distances,'distances')\n\n\n\tdef create_distances_network(self):\n\n\t\twith ProcessPoolExecutor(max_workers=1) as executor:\n\t\t\tjob = executor.submit(generate_distances_network,self.workers)\n\n\t\t\tjob.result()\n\n\t\treturn\n\n\tdef preprocess_parameters_random_walk(self):\n\n\t\twith ProcessPoolExecutor(max_workers=1) as executor:\n\t\t\tjob = executor.submit(generate_parameters_random_walk,self.workers)\n\n\t\t\tjob.result()\n\n\t\treturn\n\n\n\tdef simulate_walks(self,num_walks,walk_length):\n\n\t\t# for large graphs, it is serially executed, because of memory use.\n\t\tif(len(self.G) > 500000):\n\n\t\t\twith ProcessPoolExecutor(max_workers=1) as executor:\n\t\t\t\tjob = executor.submit(generate_random_walks_large_graphs,num_walks,walk_length,self.workers,list(self.G.keys()))\n\n\t\t\t\tjob.result()\n\n\t\telse:\n\n\t\t\twith ProcessPoolExecutor(max_workers=1) as executor:\n\t\t\t\tjob = executor.submit(generate_random_walks,num_walks,walk_length,self.workers,list(self.G.keys()))\n\n\t\t\t\tjob.result()\n\n\n\t\treturn\t\n\n\n\n\n\n\t\t\n\n \t\n\n\n" ]
[ [ "numpy.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thePetrMarek/SequenceOfDigitsRecognition
[ "98a24a8da1c5c75096bcefe1fbf1dab8cc67438c" ]
[ "sequence_of_digits/sequence_reshaped_convolution_batchnorm.py" ]
[ "import tensorflow as tf\n\n'''\nRecurrent model for sequence recognition with dropout\n'''\n\n\nclass SequenceReshapedConvolutionBatchnorm:\n def get_name(self):\n return \"sequence_reshaped_convolution_batchnorm\"\n\n def input_placeholders(self):\n inputs_placeholder = tf.placeholder(tf.float32, shape=[None, 28, 140], name=\"inputs\")\n labels_placeholder = tf.placeholder(tf.float32, shape=[None, 5, 10], name=\"labels\")\n keep_prob_placeholder = tf.placeholder(tf.float32)\n is_training_placeholder = tf.placeholder(tf.bool)\n return inputs_placeholder, labels_placeholder, keep_prob_placeholder, is_training_placeholder\n\n def inference(self, input, keep_prob, is_training):\n with tf.name_scope(\"inference\"):\n input = tf.reshape(input, [-1, 28, 140, 1])\n conv1 = self._convolutional(input, [10, 10, 1, 8])\n relu1 = self._relu(conv1)\n batch_norm1 = tf.contrib.layers.batch_norm(relu1, decay=0.9, is_training=is_training)\n dropout1 = tf.nn.dropout(batch_norm1, keep_prob)\n max_pool1 = self._max_pooling(dropout1, [1, 2, 2, 1], [1, 2, 2, 1])\n\n conv2 = self._convolutional(max_pool1, [5, 5, 8, 16])\n relu2 = self._relu(conv2)\n batch_norm2 = tf.contrib.layers.batch_norm(relu2, decay=0.9, is_training=is_training)\n dropout2 = tf.nn.dropout(batch_norm2, keep_prob)\n max_pool2 = self._max_pooling(dropout2, [1, 2, 2, 1], [1, 2, 2, 1])\n\n conv3 = self._convolutional(max_pool2, [2, 2, 16, 32])\n relu3 = self._relu(conv3)\n batch_norm3 = tf.contrib.layers.batch_norm(relu3, decay=0.9, is_training=is_training)\n dropout3 = tf.nn.dropout(batch_norm3, keep_prob)\n max_pool3 = self._max_pooling(dropout3, [1, 2, 2, 1], [1, 2, 2, 1])\n\n reshaped = tf.reshape(max_pool3, [-1, 2304])\n\n logits = []\n gru = tf.contrib.rnn.GRUCell(576)\n state = gru.zero_state(tf.shape(reshaped)[0], tf.float32)\n with tf.variable_scope(\"RNN\"):\n for i in range(5):\n if i > 0: tf.get_variable_scope().reuse_variables()\n output, state = gru(reshaped, state)\n number_logits = self._fully_connected(output, 576, 10)\n logits.append(number_logits)\n return tf.stack(logits, axis=1)\n\n def loss(self, logits, labels):\n with tf.name_scope(\"loss\"):\n labels = tf.to_int64(labels)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits,\n name=\"cross_entropy\")\n mean = tf.reduce_mean(cross_entropy, name=\"cross_entropy_mean\")\n tf.summary.scalar(\"loss\", mean)\n return mean\n\n def training(self, loss, learning_rate):\n with tf.name_scope(\"training\"):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_operation = optimizer.minimize(loss)\n return train_operation\n\n def evaluation(self, logits, labels):\n with tf.name_scope(\"evaluation\"):\n labels = tf.to_int64(labels)\n labels = tf.argmax(labels, 2)\n logits = tf.argmax(logits, 2)\n difference = tf.subtract(labels, logits, name=\"sub\")\n corrects = tf.count_nonzero(difference, axis=1, name=\"count_nonzero\")\n corrects = tf.less_equal(corrects, 0, name=\"is_zero\")\n\n return self.tf_count(corrects, True), corrects, logits\n\n def tf_count(self, t, val):\n elements_equal_to_value = tf.equal(t, val)\n as_ints = tf.cast(elements_equal_to_value, tf.int32)\n count = tf.reduce_sum(as_ints)\n return count\n\n def _fully_connected(self, input, size_in, size_out, name=\"fc\"):\n with tf.name_scope(name):\n w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.1, shape=[size_out]), name=\"b\")\n act = tf.matmul(input, w) + b\n return act\n\n def _convolutional(self, input, dimensions, name=\"conv\"):\n with tf.name_scope(name):\n w = tf.Variable(tf.truncated_normal(dimensions, stddev=0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.1, shape=[dimensions[3]]), name=\"b\")\n return tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='SAME') + b\n\n def _max_pooling(self, input, ksize, strides, name=\"max_pooling\"):\n with tf.name_scope(name):\n return tf.nn.max_pool(input, ksize, strides, padding=\"SAME\")\n\n def _relu(self, input, name=\"relu\"):\n with tf.name_scope(name):\n return tf.nn.relu(input)\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.contrib.rnn.GRUCell", "tensorflow.count_nonzero", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.equal", "tensorflow.control_dependencies", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.to_int64", "tensorflow.get_collection", "tensorflow.subtract", "tensorflow.name_scope", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.shape", "tensorflow.less_equal", "tensorflow.placeholder", "tensorflow.contrib.layers.batch_norm", "tensorflow.nn.relu", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
SyedZiaul/freud
[ "04bcf9b3bcf45c14b05872205eb16205b2dbcf60" ]
[ "benchmarks/benchmark_box_ParticleBuffer.py" ]
[ "import numpy as np\nfrom benchmark import Benchmark\nfrom benchmarker import run_benchmarks\n\nimport freud\n\n\nclass BenchmarkParticleBuffer(Benchmark):\n def __init__(self, L, buf, images):\n self.L = L\n self.buffer = buf\n self.images = images\n\n def bench_setup(self, N):\n seed = 0\n np.random.seed(seed)\n self.positions = np.random.uniform(-self.L / 2, self.L / 2, (N, 3))\n self.pbuff = freud.box.PeriodicBuffer()\n\n def bench_run(self, N):\n box = freud.box.Box.cube(self.L)\n self.pbuff.compute(\n (box, self.positions), buffer=self.buffer, images=self.images\n )\n\n\ndef run():\n Ns = [1000, 5000, 10000]\n number = 100\n name = \"freud.box.ParticleBuffer\"\n\n L = 10\n buf = 2\n images = True\n return run_benchmarks(\n name, Ns, number, BenchmarkParticleBuffer, L=L, buf=buf, images=images\n )\n\n\nif __name__ == \"__main__\":\n run()\n" ]
[ [ "numpy.random.uniform", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kchiguichon/dfn
[ "0485ce36ed81683b60a08c98af28792ead1425d3" ]
[ "src/models/neural_models.py" ]
[ "# Copyright 2019 Kenneth Chiguichon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models\nfrom scipy import fftpack, ndimage\nimport matplotlib.pyplot as plt\nimport random\n\nclass DAN(models.Model):\n def __init__(self, \n vocab_size: int, \n embedding_dim: int, \n output_dim: int, \n num_layers: int,\n hidden_dim: int, \n dropout: float = 0.2,\n trainable_embeddings: bool = True, **kwargs):\n super(DAN, self).__init__()\n self.num_layers = num_layers\n self.dropout_prob = dropout\n self.embeddings = tf.Variable(tf.random.normal([vocab_size, embedding_dim]), trainable=trainable_embeddings)\n for i in range(self.num_layers):\n dense_name = 'dense' + str(i+1)\n setattr(self, dense_name, layers.Dense(hidden_dim, activation='tanh', name=dense_name))\n self.classifier = layers.Dense(output_dim)\n\n def call(self, batch_data: tf.Tensor, training=False) -> tf.Tensor:\n sequence_mask = tf.cast(batch_data != 0, dtype=tf.float32)\n logits = tf.nn.embedding_lookup(self.embeddings, batch_data)\n if training:\n # Word dropout\n dropout_mask = tf.cast(tf.random.uniform(batch_data.get_shape()) >= self.dropout_prob, dtype=tf.float32) * sequence_mask\n divisor = tf.expand_dims(tf.reduce_sum(dropout_mask, 1, True), [-1])\n logits = tf.squeeze(tf.divide(\n tf.reduce_sum(logits * tf.expand_dims(dropout_mask, [-1]), 1, True),\n tf.where(divisor == 0, tf.ones_like(divisor), divisor)\n ))\n else:\n if sequence_mask is not None:\n divisor = tf.expand_dims(tf.reduce_sum(sequence_mask, 1, True), [-1])\n sequence_mask = tf.cast(tf.expand_dims(sequence_mask, [-1]), dtype=tf.float32)\n inputs = logits * sequence_mask\n else:\n divisor = tf.expand_dims(tf.reduce_sum(tf.ones_like(logits), 1, True), [-1])\n inputs = logits\n logits = tf.squeeze(tf.divide(tf.reduce_sum(inputs, 1, True), divisor))\n \n for i in range(self.num_layers):\n logits = getattr(self, 'dense' + str(i+1))(logits)\n logits = self.classifier(logits)\n return logits\n\nclass DFN(models.Model):\n def __init__(self, \n vocab_size: int, \n embedding_dim: int, \n output_dim: int, \n num_layers: int,\n hidden_dim: int, \n dropout: float = 0.2,\n trainable_embeddings: bool = True,\n transform_sequences = False, **kwargs):\n super(DFN, self).__init__()\n self.num_layers = num_layers\n self.dropout_prob = dropout\n self.transform_sequences = transform_sequences\n\n def swish(inputs):\n return inputs * tf.math.sigmoid(0.7 * inputs)\n tf.keras.utils.get_custom_objects().update({'swish' : layers.Activation(swish)})\n\n self.embeddings = tf.Variable(tf.random.normal([vocab_size, embedding_dim]), trainable=trainable_embeddings)\n for i in range(self.num_layers):\n dense_name = 'dense' + str(i+1)\n setattr(self, dense_name, layers.Dense(hidden_dim, activation='swish', name=dense_name))\n self.classifier = layers.Dense(output_dim)\n\n def call(self, batch_data: tf.Tensor, training=False) -> tf.Tensor:\n sequence_mask = tf.cast(tf.expand_dims(batch_data != 0, [-1]), dtype=tf.float32)\n logits = tf.nn.embedding_lookup(self.embeddings, batch_data) * sequence_mask\n\n if training:\n dropout_mask = tf.cast(tf.random.uniform(batch_data.get_shape()) >= self.dropout_prob, dtype=tf.float32)\n logits *= tf.expand_dims(dropout_mask, -1)\n\n # Convert to fourier space\n x = tf.signal.rfft(logits)\n if self.transform_sequences:\n y = tf.signal.rfft(tf.transpose(logits, [0, 2, 1]))\n else:\n y = tf.transpose(x, [0, 2, 1])\n\n # Aggregate\n x = tf.reduce_sum(x, 1)\n y = tf.reduce_sum(y, 1)\n\n # Reverse transform\n x = tf.signal.irfft(x)\n y = tf.signal.irfft(y)\n\n logits = tf.concat([x, y], -1)\n for i in range(self.num_layers):\n logits = getattr(self, 'dense' + str(i+1))(logits)\n logits = self.classifier(logits)\n return logits\n\nclass GRU(models.Model):\n def __init__(self, \n vocab_size: int, \n embedding_dim: int, \n output_dim: int, \n num_layers: int,\n hidden_dim:int, \n dropout: float = 0.2,\n trainable_embeddings: bool = True, **kwargs):\n super(GRU, self).__init__()\n self.num_layers = num_layers\n self.embeddings = tf.Variable(tf.random.normal([vocab_size, embedding_dim]), trainable=trainable_embeddings)\n for i in range(self.num_layers):\n name = 'gru' + str(i+1)\n if i < num_layers -1:\n setattr(self, name, layers.GRU(hidden_dim, activation='tanh', return_sequences=True, name=name))\n else:\n setattr(self, name, layers.GRU(hidden_dim, activation='tanh', name=name))\n self.classifier = layers.Dense(output_dim)\n\n def call(self, batch_data: tf.Tensor, training=False) -> tf.Tensor:\n sequence_mask = batch_data != 0\n logits = tf.nn.embedding_lookup(self.embeddings, batch_data)\n for i in range(self.num_layers):\n logits = getattr(self, 'gru' + str(i+1))(logits, mask=sequence_mask)\n logits = self.classifier(logits)\n return logits\n" ]
[ [ "tensorflow.concat", "tensorflow.transpose", "tensorflow.keras.layers.Activation", "tensorflow.random.normal", "tensorflow.keras.layers.Dense", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.keras.utils.get_custom_objects", "tensorflow.expand_dims", "tensorflow.ones_like", "tensorflow.math.sigmoid", "tensorflow.keras.layers.GRU", "tensorflow.signal.rfft", "tensorflow.signal.irfft", "tensorflow.nn.embedding_lookup" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
bengisug/multiagent-particle-envs
[ "c87280f18fbaf885932fe6da4d600ab474fd83fe" ]
[ "multiagent/policies/attention/multi_head_attention.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass MultiHeadAttention(torch.nn.Module):\n\n def __init__(self, d_model=16, head_count=4, bias=True):\n super(MultiHeadAttention, self).__init__()\n self.head_count = head_count\n self.d_model = d_model\n self.d_k = d_model // head_count\n self.d_v = d_model // head_count\n self.nonlinear = nn.ReLU()\n assert d_model % head_count == 0, \"d_model: {} should be divisible by head_count: {}\".format(d_model, head_count)\n\n self.q_linear = nn.Linear(self.d_model, self.d_k * self.head_count, bias=bias)\n self.k_linear = nn.Linear(self.d_model, self.d_k * self.head_count, bias=bias)\n self.v_linear = nn.Linear(self.d_model, self.d_v * self.head_count, bias=bias)\n\n self.attention = ScaledDotProduct(self.d_k, head_count)\n\n def forward(self, query, key, value, mask=None):\n if len(query.shape) == 2:\n query = query.unsqueeze(0)\n key = key.unsqueeze(0)\n value = value.unsqueeze(0)\n\n batch_size, node_size, _ = query.size()\n\n query = self.nonlinear(self.q_linear(query))\n query = query.reshape(batch_size, node_size, self.head_count, self.d_k).permute(0, 2, 1, 3).reshape(\n batch_size * self.head_count, node_size, self.d_k)\n # query = query.reshape(node_size, -1, self.head_count, self.d_k).transpose(1, 2).transpose(0, 1).transpose(1, 2)\n\n key = self.nonlinear(self.k_linear(key))\n key = key.reshape(batch_size, node_size, self.head_count, self.d_k).permute(0, 2, 1, 3).reshape(\n batch_size * self.head_count, node_size, self.d_k)\n # key = key.reshape(node_size, -1, self.head_count, self.d_k).transpose(1, 2).transpose(0, 1).transpose(1, 2)\n\n value = self.nonlinear(self.v_linear(value))\n value = value.reshape(batch_size, node_size, self.head_count, self.d_k).permute(0, 2, 1, 3).reshape(\n batch_size * self.head_count, node_size, self.d_k)\n # value = value.reshape(node_size, -1, self.head_count, self.d_k).transpose(1, 2).transpose(0, 1).transpose(1, 2)\n\n x, dist = self.attention(query, key, value, mask)\n x = x.reshape(batch_size, self.head_count, node_size, self.d_k).permute(0, 2, 1, 3).reshape(batch_size,\n node_size,\n self.d_model)\n # x = x.reshape(-1, node_size, self.d_model)\n\n return x, dist\n\n\nclass ScaledDotProduct(torch.nn.Module):\n\n def __init__(self, d_k, head_count):\n super(ScaledDotProduct, self).__init__()\n self.d_k = d_k\n self.head_count = head_count\n\n def forward(self, query, key, value, mask=None, dropout=None):\n x = torch.matmul(query, key.transpose(-2, -1)) / (self.d_k ** 0.5)\n if mask is not None:\n mask = mask.repeat(self.head_count, 1, 1)\n x = x.masked_fill(mask == 0, -1e9)\n x = torch.nn.functional.softmax(x, dim=-1)\n dist = x\n if dropout is not None:\n x = torch.nn.Dropout(dropout)\n\n x = torch.matmul(x, value)\n return x, dist\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.nn.Linear", "torch.matmul", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nastiag67/tools
[ "9989c98acd67ff8fda900d0a2ae3a6d21e2bbd88" ]
[ "tests/models/test_clustering.py" ]
[ "\"\"\"Tests for `clustering` module.\"\"\"\n\nimport pytest\nimport pandas as pd\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.decomposition import PCA\n\nfrom pipelitools.models import clustering as c\n\n\[email protected](scope=\"function\")\ndef df_binary():\n X_train, y_train = make_classification(n_samples=100, n_features=2, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,\n class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=1)\n X_test, y_test = make_classification(n_samples=50, n_features=2, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,\n class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=2)\n y_train = pd.Series(y_train)\n y_test = pd.Series(y_test)\n return X_train, y_train, X_test, y_test\n\n\[email protected](scope=\"function\")\ndef df_multiclass():\n X_train, y_train = make_classification(n_samples=100, n_features=2, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1,\n class_sep=2, flip_y=0, weights=[0.2, 0.3, 0.5], random_state=1)\n X_test, y_test = make_classification(n_samples=50, n_features=2, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1,\n class_sep=2, flip_y=0, weights=[0.3, 0.3, 0.4], random_state=2)\n y_train = pd.Series(y_train)\n y_test = pd.Series(y_test)\n return X_train, y_train, X_test, y_test\n\n\ndef test_basic(df_binary):\n X_train, y_train, X_test, y_test = df_binary\n ncomp = 2\n pca = PCA(n_components=ncomp, whiten=True)\n pca.fit(X_train) # estimate the parameters of the PCA\n Z = pca.transform(X_train)\n\n c.n_clusters(Z, Kmax=10, n_init=100)\n\n\n\n\n" ]
[ [ "sklearn.decomposition.PCA", "sklearn.datasets.make_classification", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
danielkingai2/allennlp
[ "65a146d20697ac431564ef1bedb18d61011e727e" ]
[ "allennlp/training/tensorboard_writer.py" ]
[ "from typing import Any, Callable, Dict, List, Optional, Set\nimport logging\nimport os\n\nfrom tensorboardX import SummaryWriter\nimport torch\n\nfrom allennlp.common.from_params import FromParams\nfrom allennlp.data.dataloader import TensorDict\nfrom allennlp.nn import util as nn_util\nfrom allennlp.training.optimizers import Optimizer\nfrom allennlp.training import util as training_util\nfrom allennlp.models.model import Model\n\nlogger = logging.getLogger(__name__)\n\n\nclass TensorboardWriter(FromParams):\n \"\"\"\n Class that handles Tensorboard (and other) logging.\n\n # Parameters\n\n serialization_dir : `str`, optional (default = `None`)\n If provided, this is where the Tensorboard logs will be written.\n summary_interval : `int`, optional (default = `100`)\n Most statistics will be written out only every this many batches.\n histogram_interval : `int`, optional (default = `None`)\n If provided, activation histograms will be written out every this many batches.\n If None, activation histograms will not be written out.\n When this parameter is specified, the following additional logging is enabled:\n * Histograms of model parameters\n * The ratio of parameter update norm to parameter norm\n * Histogram of layer activations\n We log histograms of the parameters returned by\n `model.get_parameters_for_histogram_tensorboard_logging`.\n The layer activations are logged for any modules in the `Model` that have\n the attribute `should_log_activations` set to `True`. Logging\n histograms requires a number of GPU-CPU copies during training and is typically\n slow, so we recommend logging histograms relatively infrequently.\n Note: only Modules that return tensors, tuples of tensors or dicts\n with tensors as values currently support activation logging.\n batch_size_interval : `int`, optional, (default = `None`)\n If defined, how often to log the average batch size.\n should_log_parameter_statistics : `bool`, optional (default = `True`)\n Whether to log parameter statistics (mean and standard deviation of parameters and\n gradients).\n should_log_learning_rate : `bool`, optional (default = `False`)\n Whether to log (parameter-specific) learning rate.\n get_batch_num_total : `Callable[[], int]`, optional (default = `None`)\n A thunk that returns the number of batches so far. Most likely this will\n be a closure around an instance variable in your `Trainer` class. Because of circular\n dependencies in constructing this object and the `Trainer`, this is typically `None` when\n you construct the object, but it gets set inside the constructor of our `Trainer`.\n \"\"\"\n\n def __init__(\n self,\n serialization_dir: Optional[str] = None,\n summary_interval: int = 100,\n histogram_interval: int = None,\n batch_size_interval: Optional[int] = None,\n should_log_parameter_statistics: bool = True,\n should_log_learning_rate: bool = False,\n get_batch_num_total: Callable[[], int] = None,\n ) -> None:\n if serialization_dir is not None:\n # Create log directories prior to creating SummaryWriter objects\n # in order to avoid race conditions during distributed training.\n train_ser_dir = os.path.join(serialization_dir, \"log\", \"train\")\n os.makedirs(train_ser_dir, exist_ok=True)\n self._train_log = SummaryWriter(train_ser_dir)\n val_ser_dir = os.path.join(serialization_dir, \"log\", \"validation\")\n os.makedirs(val_ser_dir, exist_ok=True)\n self._validation_log = SummaryWriter(val_ser_dir)\n else:\n self._train_log = self._validation_log = None\n\n self._summary_interval = summary_interval\n self._histogram_interval = histogram_interval\n self._batch_size_interval = batch_size_interval\n self._should_log_parameter_statistics = should_log_parameter_statistics\n self._should_log_learning_rate = should_log_learning_rate\n self.get_batch_num_total = get_batch_num_total\n\n self._cumulative_batch_group_size = 0\n self._batches_this_epoch = 0\n self._histogram_parameters: Set[str] = None\n\n @staticmethod\n def _item(value: Any):\n if hasattr(value, \"item\"):\n val = value.item()\n else:\n val = value\n return val\n\n def log_batch(\n self,\n model: Model,\n optimizer: Optimizer,\n batch_grad_norm: Optional[float],\n metrics: Dict[str, float],\n batch_group: List[List[TensorDict]],\n param_updates: Optional[Dict[str, torch.Tensor]],\n ) -> None:\n if self.should_log_this_batch():\n self.log_parameter_and_gradient_statistics(model, batch_grad_norm)\n self.log_learning_rates(model, optimizer)\n\n self.add_train_scalar(\"loss/loss_train\", metrics[\"loss\"])\n self.log_metrics({\"epoch_metrics/\" + k: v for k, v in metrics.items()})\n\n if self.should_log_histograms_this_batch():\n self.log_histograms(model)\n self.log_gradient_updates(model, param_updates)\n\n if self._batch_size_interval:\n # We're assuming here that `log_batch` will get called every batch, and only every\n # batch. This is true with our current usage of this code (version 1.0); if that\n # assumption becomes wrong, this code will break.\n batch_group_size = sum(training_util.get_batch_size(batch) for batch in batch_group)\n self._batches_this_epoch += 1\n self._cumulative_batch_group_size += batch_group_size\n\n if (self._batches_this_epoch - 1) % self._batch_size_interval == 0:\n average = self._cumulative_batch_group_size / self._batches_this_epoch\n logger.info(f\"current batch size: {batch_group_size} mean batch size: {average}\")\n self.add_train_scalar(\"current_batch_size\", batch_group_size)\n self.add_train_scalar(\"mean_batch_size\", average)\n\n def reset_epoch(self) -> None:\n self._cumulative_batch_group_size = 0\n self._batches_this_epoch = 0\n\n def should_log_this_batch(self) -> bool:\n return self.get_batch_num_total() % self._summary_interval == 0\n\n def should_log_histograms_this_batch(self) -> bool:\n return (\n self._histogram_interval is not None\n and self.get_batch_num_total() % self._histogram_interval == 0\n )\n\n def add_train_scalar(self, name: str, value: float, timestep: int = None) -> None:\n timestep = timestep or self.get_batch_num_total()\n # get the scalar\n if self._train_log is not None:\n self._train_log.add_scalar(name, self._item(value), timestep)\n\n def add_train_histogram(self, name: str, values: torch.Tensor) -> None:\n if self._train_log is not None:\n if isinstance(values, torch.Tensor):\n values_to_write = values.cpu().data.numpy().flatten()\n self._train_log.add_histogram(name, values_to_write, self.get_batch_num_total())\n\n def add_validation_scalar(self, name: str, value: float, timestep: int = None) -> None:\n timestep = timestep or self.get_batch_num_total()\n if self._validation_log is not None:\n self._validation_log.add_scalar(name, self._item(value), timestep)\n\n def log_parameter_and_gradient_statistics(self, model: Model, batch_grad_norm: float) -> None:\n \"\"\"\n Send the mean and std of all parameters and gradients to tensorboard, as well\n as logging the average gradient norm.\n \"\"\"\n if self._should_log_parameter_statistics:\n # Log parameter values to Tensorboard\n for name, param in model.named_parameters():\n if param.data.numel() > 0:\n self.add_train_scalar(\"parameter_mean/\" + name, param.data.mean())\n if param.data.numel() > 1:\n self.add_train_scalar(\"parameter_std/\" + name, param.data.std())\n if param.grad is not None:\n if param.grad.is_sparse:\n\n grad_data = param.grad.data._values()\n else:\n grad_data = param.grad.data\n\n # skip empty gradients\n if torch.prod(torch.tensor(grad_data.shape)).item() > 0:\n self.add_train_scalar(\"gradient_mean/\" + name, grad_data.mean())\n if grad_data.numel() > 1:\n self.add_train_scalar(\"gradient_std/\" + name, grad_data.std())\n else:\n # no gradient for a parameter with sparse gradients\n logger.info(\"No gradient for %s, skipping tensorboard logging.\", name)\n # norm of gradients\n if batch_grad_norm is not None:\n self.add_train_scalar(\"gradient_norm\", batch_grad_norm)\n\n def log_learning_rates(self, model: Model, optimizer: torch.optim.Optimizer):\n \"\"\"\n Send current parameter specific learning rates to tensorboard\n \"\"\"\n if self._should_log_learning_rate:\n # optimizer stores lr info keyed by parameter tensor\n # we want to log with parameter name\n names = {param: name for name, param in model.named_parameters()}\n for group in optimizer.param_groups:\n if \"lr\" not in group:\n continue\n rate = group[\"lr\"]\n for param in group[\"params\"]:\n # check whether params has requires grad or not\n effective_rate = rate * float(param.requires_grad)\n self.add_train_scalar(\"learning_rate/\" + names[param], effective_rate)\n\n def log_histograms(self, model: Model) -> None:\n \"\"\"\n Send histograms of parameters to tensorboard.\n \"\"\"\n if not self._histogram_parameters:\n # Avoiding calling this every batch. If we ever use two separate models with a single\n # writer, this is wrong, but I doubt that will ever happen.\n self._histogram_parameters = set(\n model.get_parameters_for_histogram_tensorboard_logging()\n )\n for name, param in model.named_parameters():\n if name in self._histogram_parameters:\n self.add_train_histogram(\"parameter_histogram/\" + name, param)\n\n def log_gradient_updates(self, model: Model, param_updates: Dict[str, torch.Tensor]) -> None:\n for name, param in model.named_parameters():\n update_norm = torch.norm(param_updates[name].view(-1))\n param_norm = torch.norm(param.view(-1)).cpu()\n self.add_train_scalar(\n \"gradient_update/\" + name,\n update_norm / (param_norm + nn_util.tiny_value_of_dtype(param_norm.dtype)),\n )\n\n def log_metrics(\n self,\n train_metrics: dict,\n val_metrics: dict = None,\n epoch: int = None,\n log_to_console: bool = False,\n ) -> None:\n \"\"\"\n Sends all of the train metrics (and validation metrics, if provided) to tensorboard.\n \"\"\"\n metric_names = set(train_metrics.keys())\n if val_metrics is not None:\n metric_names.update(val_metrics.keys())\n val_metrics = val_metrics or {}\n\n # For logging to the console\n if log_to_console:\n dual_message_template = \"%s | %8.3f | %8.3f\"\n no_val_message_template = \"%s | %8.3f | %8s\"\n no_train_message_template = \"%s | %8s | %8.3f\"\n header_template = \"%s | %-10s\"\n name_length = max(len(x) for x in metric_names)\n logger.info(header_template, \"Training\".rjust(name_length + 13), \"Validation\")\n\n for name in metric_names:\n # Log to tensorboard\n train_metric = train_metrics.get(name)\n if train_metric is not None:\n self.add_train_scalar(name, train_metric, timestep=epoch)\n val_metric = val_metrics.get(name)\n if val_metric is not None:\n self.add_validation_scalar(name, val_metric, timestep=epoch)\n\n # And maybe log to console\n if log_to_console and val_metric is not None and train_metric is not None:\n logger.info(\n dual_message_template, name.ljust(name_length), train_metric, val_metric\n )\n elif log_to_console and val_metric is not None:\n logger.info(no_train_message_template, name.ljust(name_length), \"N/A\", val_metric)\n elif log_to_console and train_metric is not None:\n logger.info(no_val_message_template, name.ljust(name_length), train_metric, \"N/A\")\n\n def enable_activation_logging(self, model: Model) -> None:\n if self._histogram_interval is not None:\n # To log activation histograms to the forward pass, we register\n # a hook on forward to capture the output tensors.\n # This uses a closure to determine whether to log the activations,\n # since we don't want them on every call.\n for _, module in model.named_modules():\n if not getattr(module, \"should_log_activations\", False):\n # skip it\n continue\n\n def hook(module_, inputs, outputs):\n\n log_prefix = \"activation_histogram/{0}\".format(module_.__class__)\n if self.should_log_histograms_this_batch():\n self.log_activation_histogram(outputs, log_prefix)\n\n module.register_forward_hook(hook)\n\n def log_activation_histogram(self, outputs, log_prefix: str) -> None:\n if isinstance(outputs, torch.Tensor):\n log_name = log_prefix\n self.add_train_histogram(log_name, outputs)\n elif isinstance(outputs, (list, tuple)):\n for i, output in enumerate(outputs):\n log_name = \"{0}_{1}\".format(log_prefix, i)\n self.add_train_histogram(log_name, output)\n elif isinstance(outputs, dict):\n for k, tensor in outputs.items():\n log_name = \"{0}_{1}\".format(log_prefix, k)\n self.add_train_histogram(log_name, tensor)\n else:\n # skip it\n pass\n\n def close(self) -> None:\n \"\"\"\n Calls the `close` method of the `SummaryWriter` s which makes sure that pending\n scalars are flushed to disk and the tensorboard event files are closed properly.\n \"\"\"\n if self._train_log is not None:\n self._train_log.close()\n if self._validation_log is not None:\n self._validation_log.close()\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TheDutchDevil/pipelines
[ "a5ba3f0fcd98ffd60f98bce964927ab63382d5d7" ]
[ "components/deprecated/tfx/Trainer/component.py" ]
[ "# flake8: noqa TODO\n\nfrom kfp.components import InputPath, OutputPath\n\n\ndef Trainer(\n examples_path: InputPath('Examples'),\n schema_path: InputPath('Schema'),\n\n model_path: OutputPath('Model'),\n\n train_args: {'JsonObject': {'data_type': 'proto:tfx.components.trainer.TrainArgs'}},\n eval_args: {'JsonObject': {'data_type': 'proto:tfx.components.trainer.EvalArgs'}},\n module_file: str = None,\n trainer_fn: str = None,\n custom_config: dict = None,\n\n transform_graph_path: InputPath('TransformGraph') = None,\n base_model_path: InputPath('Model') = None,\n hyperparameters_path: InputPath('HyperParameters') = None,\n):\n \"\"\"\n A TFX component to train a TensorFlow model.\n\n The Trainer component is used to train and eval a model using given inputs and\n a user-supplied estimator. This component includes a custom driver to\n optionally grab previous model to warm start from.\n\n ## Providing an estimator\n The TFX executor will use the estimator provided in the `module_file` file\n to train the model. The Trainer executor will look specifically for the\n `trainer_fn()` function within that file. Before training, the executor will\n call that function expecting the following returned as a dictionary:\n\n - estimator: The\n [estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)\n to be used by TensorFlow to train the model.\n - train_spec: The\n [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)\n to be used by the \"train\" part of the TensorFlow `train_and_evaluate()`\n call.\n - eval_spec: The\n [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)\n to be used by the \"eval\" part of the TensorFlow `train_and_evaluate()` call.\n - eval_input_receiver_fn: The\n [configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)\n to be used\n by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)\n component when validating the model.\n\n An example of `trainer_fn()` can be found in the [user-supplied\n code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))\n of the TFX Chicago Taxi pipeline example.\n\n\n Args:\n examples: A Channel of 'Examples' type, serving as the source of\n examples that are used in training (required). May be raw or\n transformed.\n transform_graph: An optional Channel of 'TransformGraph' type, serving as\n the input transform graph if present.\n schema: A Channel of 'SchemaPath' type, serving as the schema of training\n and eval data.\n module_file: A path to python module file containing UDF model definition.\n The module_file must implement a function named `trainer_fn` at its\n top level. The function must have the following signature.\n\n def trainer_fn(tf.contrib.training.HParams,\n tensorflow_metadata.proto.v0.schema_pb2) -> Dict:\n ...\n\n where the returned Dict has the following key-values.\n 'estimator': an instance of tf.estimator.Estimator\n 'train_spec': an instance of tf.estimator.TrainSpec\n 'eval_spec': an instance of tf.estimator.EvalSpec\n 'eval_input_receiver_fn': an instance of tfma.export.EvalInputReceiver\n\n Exactly one of 'module_file' or 'trainer_fn' must be supplied.\n trainer_fn: A python path to UDF model definition function. See\n 'module_file' for the required signature of the UDF.\n Exactly one of 'module_file' or 'trainer_fn' must be supplied.\n train_args: A trainer_pb2.TrainArgs instance, containing args used for\n training. Current only num_steps is available.\n eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.\n Current only num_steps is available.\n custom_config: A dict which contains the training job parameters to be\n passed to Google Cloud ML Engine. For the full set of parameters\n supported by Google Cloud ML Engine, refer to\n https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job\n Returns:\n model: Optional 'Model' channel for result of exported models.\n Raises:\n ValueError:\n - When both or neither of 'module_file' and 'trainer_fn' is supplied.\n \"\"\"\n from tfx.components.trainer.component import Trainer as component_class\n\n #Generated code\n import json\n import os\n import tensorflow\n from google.protobuf import json_format, message\n from tfx.types import Artifact, channel_utils, artifact_utils\n\n arguments = locals().copy()\n\n component_class_args = {}\n\n for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():\n argument_value_obj = argument_value = arguments.get(name, None)\n if argument_value is None:\n continue\n parameter_type = execution_parameter.type\n if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple\n argument_value_obj = parameter_type()\n json_format.Parse(argument_value, argument_value_obj)\n component_class_args[name] = argument_value_obj\n\n for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():\n artifact_path = arguments[name + '_path']\n if artifact_path:\n artifact = channel_parameter.type()\n artifact.uri = artifact_path + '/' # ?\n if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:\n # Recovering splits\n subdirs = tensorflow.io.gfile.listdir(artifact_path)\n artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))\n component_class_args[name] = channel_utils.as_channel([artifact])\n\n component_class_instance = component_class(**component_class_args)\n\n input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}\n output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}\n exec_properties = component_class_instance.exec_properties\n\n # Generating paths for output artifacts\n for name, artifacts in output_dict.items():\n base_artifact_path = arguments[name + '_path']\n # Are there still cases where output channel has multiple artifacts?\n for idx, artifact in enumerate(artifacts):\n subdir = str(idx + 1) if idx > 0 else ''\n artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'\n\n print('component instance: ' + str(component_class_instance))\n\n #executor = component_class.EXECUTOR_SPEC.executor_class() # Same\n executor = component_class_instance.executor_spec.executor_class()\n executor.Do(\n input_dict=input_dict,\n output_dict=output_dict,\n exec_properties=exec_properties,\n )\n\n\nif __name__ == '__main__':\n import kfp\n kfp.components.func_to_container_op(\n Trainer,\n base_image='tensorflow/tfx:0.21.4',\n output_component_file='component.yaml'\n )\n" ]
[ [ "tensorflow.io.gfile.listdir" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DavidLeoni/prova-qcb
[ "38520ab66b34a145e43ffb0ee808562eae31c325" ]
[ "jupman.py" ]
[ "\n# Library to be included in Jupyter notebooks \n\n__author__ = \"David Leoni\"\n__status__ = \"Development\"\n\nimport sys\nimport unittest\nimport inspect\nimport os\nimport argparse\n\ndef detect_relpath(in_cells):\n \"\"\" Hacky way to find out relative path to jupman.py\n\n in_cells: \"In\" cells of a notebook\n \"\"\"\n import re \n \n for code in in_cells: \n rs = re.findall(r'import\\s+sys\\s*;?\\s*\\nsys\\.path.append\\([\\'\\\"]((\\.\\./)+)[\\'\\\"]\\)\\s*;?\\s*\\nimport\\s+jupman', code)\n \n if rs:\n return rs[0][0] \n return ''\n\n\ndef init(toc=False):\n \"\"\" Injects notebooks with js and css from _static\n \n To be called at the beginning of notebooks, only if you *really* need it.\n Please do read https://jupman.readthedocs.io/en/latest/usage.html#Running-Jupyter\n \n NOTE: on error doesn't raise exception and just prints error message\n \n \"\"\"\n \n # Hacky stuff, because Jupyter only allows to set a per user custom js, we want per project js\n try:\n from IPython.core.display import HTML\n on_rtd = os.environ.get('READTHEDOCS') == 'True'\n\n if on_rtd:\n # on RTD we don't inject anything, files are set in sphinx conf.py\n print(\"\")\n else:\n # NOTE: \n # 1. regardless of the notebook position from which you are importing,\n # in root you get the directory of jupman.py file\n # 2. in Jupyter you *cannot* know reliably the worksheet position \n # see https://github.com/ipython/ipython/issues/10123\n # so it is better to include scripts instead of using relative imports\n\n root = os.path.dirname(os.path.abspath(__file__))\n _static = os.path.join(root, '_static') \n \n css = open(\"%s/css/jupman.css\" % _static, \"r\").read()\n tocjs = open(\"%s/js/toc.js\" % _static, \"r\").read()\n js = open(\"%s/js/jupman.js\" % _static, \"r\").read()\n\n ret = \"<style>\\n\" \n ret += css\n ret += \"\\n </style>\\n\"\n\n ret +=\"\\n\"\n\n ret += \"<script>\\n\"\n ret += \"var JUPMAN_IN_JUPYTER = true;\" \n ret += \"\\n\"\n if toc:\n ret += tocjs\n ret += \"\\n\" \n ret += js\n ret += \"\\n</script>\\n\"\n return HTML(ret)\n except Exception as ex:\n print(ex)\n\n\ndef get_class(meth):\n \"\"\" Return the class of method meth\n \n Taken from here: https://stackoverflow.com/a/25959545\n \"\"\"\n\n if inspect.ismethod(meth):\n for cls in inspect.getmro(meth.__self__.__class__):\n if cls.__dict__.get(meth.__name__) is meth:\n return cls\n meth = meth.__func__ # fallback to __qualname__ parsing\n if inspect.isfunction(meth):\n cls = getattr(inspect.getmodule(meth),\n meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])\n if isinstance(cls, type):\n return cls\n ret = getattr(meth, '__objclass__', None) # handle special descriptor objects\n if ret == None:\n raise ValueError(\"Couldn't find the class of method %s\" % meth)\n return ret\n\ndef run(classOrMethodOrModule): \n \"\"\" Runs test class or method or Module. Doesn't show code nor output in html.\n\n todo look at test order here: http://stackoverflow.com/a/18499093 \n \"\"\" \n\n if inspect.isclass(classOrMethodOrModule) and issubclass(classOrMethodOrModule, unittest.TestCase): \n testcase = classOrMethodOrModule\n suite = unittest.TestLoader().loadTestsFromTestCase(testcase)\n elif inspect.isfunction(classOrMethodOrModule):\n meth = classOrMethodOrModule\n suite = unittest.TestSuite()\n testcase = get_class(meth)\n suite.addTest(testcase(meth.__name__))\n elif inspect.ismodule(classOrMethodOrModule):\n module = classOrMethodOrModule\n suite = unittest.TestLoader().loadTestsFromModule(module)\n else:\n raise Exception(\"Accepted parameters are either a TestCase class, a TestCase method or a test module. Found instead: \" + str(classOrMethodOrModule))\n\n unittest.TextTestRunner(verbosity=1,stream=sys.stderr).run( suite )\n\ndef show_run(classOrMethod): \n \"\"\" Runs test class or method. Code is not shown, but output is\n\n @since 0.19\n @deprecated Just use run()\n \"\"\" \n run(classOrMethod)\n\ndef save_py(filename, data):\n \"\"\" Creates a .py file holding pydata assigned to a variable \n \n Example: save_py('my_data.py', ['a','b','c'])\n \n will create a file containing the line:\n \n my_data = ['a','b','c'] \n \n @since 3.3\n \"\"\"\n with open(filename, \"w+\", encoding='utf-8') as expo: \n from pprint import pformat\n s = pformat(data) \n expo.write(filename[:-3])\n expo.write(' = ')\n expo.write(s) \n \n\ndef mem_limit(MB=None):\n \"\"\"Limits the memory this Python process can use. By default uses half free memory.\n \n @since 3.3\n \n \"\"\"\n # from https://stackoverflow.com/questions/41105733/limit-ram-usage-to-python-program \n \n # TODO CHECK WINDOWS: \n # https://stackoverflow.com/questions/54949110/limit-python-script-ram-usage-in-windows\n # https://stackoverflow.com/questions/10892258/resource-limits-on-windows\n\n import os\n if os.name == 'nt':\n print('WARNING: limiting memory on Windows is not supported')\n return\n \n import resource\n with open('/proc/meminfo', 'r') as mem:\n free_memory = 0\n for i in mem:\n sline = i.split() \n if str(sline[0]) == 'MemAvailable:':\n free_memory = int(sline[1]) \n break \n if sline[2] != 'kB':\n raise Exception('Unrecognized memory unit:', sline[2])\n soft, hard = resource.getrlimit(resource.RLIMIT_AS)\n if not MB: \n MB = (free_memory // 1024) // 2 \n print('Free mem:', free_memory//1024, 'MB', \n ' Limiting to:', MB, 'MB')\n resource.setrlimit(resource.RLIMIT_AS, (MB *1024 * 1024, hard))\n\n\n\n \ndef draw_img(path, figsize=None):\n \"\"\" Display images of given size\n Workaround for https://github.com/DavidLeoni/jupman/issues/61\n \n @since 3.3\n \"\"\"\n import matplotlib.pyplot as plt;\n import matplotlib.image as mpimg\n img = mpimg.imread(path)\n if figsize: \n fig, ax = plt.subplots(1, 1, figsize=figsize)\n else:\n fig,ax = plt.subplots(1, 1) \n ax.axis('off')\n \n plt.imshow(img) \n\n \ndef draw_text(text, fontsize=None): \n \"\"\" Display text as image\n Workaround for https://github.com/DavidLeoni/jupman/issues/66 \n \n @since 3.3\n \"\"\"\n import matplotlib.pyplot as plt\n \n fig, ax = plt.subplots(1, 1, figsize=(1,1)) \n \n # Note: figsize doesn't appear to work to reduce size, only to increase and doesn't scale text anyway\n # if figsize is not set, text appears too low\n \n if fontsize:\n plt.text(0, 0, str(text),fontsize=fontsize)\n else:\n plt.text(0, 0, str(text),fontsize=9) # note: this default looks good in PDF, but is small for jupyter\n ax.axis('off') \n plt.show() \n \ndef draw_df(df, fontsize=16, scale=(1.8, 3.9), figsize=(12, 2)):\n \"\"\" Draws a Pandas DataFrame as an image\n Taken from https://stackoverflow.com/a/36904120\n @since 3.3\n \"\"\" \n import pandas as pd\n import matplotlib.pyplot as plt\n from pandas.plotting import table\n import numpy as np\n fig, ax = plt.subplots(figsize=figsize) # set size frame\n ax.xaxis.set_visible(False) # hide the x axis\n ax.yaxis.set_visible(False) # hide the y axis\n ax.set_frame_on(False) # no visible frame, uncomment if size is ok\n col_widths = [0.008 * (8 + df[col].map(lambda x: len(str(x))).max()) for col in df]\n tabla = table(ax, df, loc='upper right', colWidths=col_widths) # where df is your data frame\n tabla.auto_set_font_size(False) # Activate set fontsize manually\n tabla.set_fontsize(fontsize) # if ++fontsize is necessary ++colWidths\n tabla.scale(scale[0], scale[1]) # change size table\n #plt.savefig('table.png', transparent=True) \n \ndef get_doc(fun):\n \"\"\" Returns the help of a function formatted in a faithful manner\n \n @since 3.3\n \"\"\"\n import pydoc\n lines = pydoc.render_doc(fun, renderer=pydoc.plaintext).split('\\n')\n \n return 'def ' + lines[2] + ':\\n \"\"\" ' + '\\n '.join(lines[3:]).strip()+ '\\n \"\"\"' \n \ndef pytut_json(jm_code):\n \"\"\" Runs jm_code and return a JSON execution trace\n\n # David Leoni: 15 March 2020 \n # I JUST MERGED RELEVANT FILES OF PYTHON TUTOR INTO THIS ONE\n # HACKS ARE MARKED WITH 'JUPMAN' or 'JM'\n # \n # ALL CREDITS FOR PYTHON TUTOR GO TO Philip J. Guo ([email protected])\n # SEE COPYRIGHT BELOW\n \"\"\"\n\n import sys\n from types import ModuleType\n\n class MockModule(ModuleType):\n def __init__(self, module_name, module_doc=None):\n ModuleType.__init__(self, module_name, module_doc)\n if '.' in module_name:\n package, module = module_name.rsplit('.', 1)\n get_mock_module(package).__path__ = []\n setattr(get_mock_module(package), module, self)\n\n def _initialize_(self, module_code):\n self.__dict__.update(module_code(self.__name__))\n self.__doc__ = module_code.__doc__\n\n def get_mock_module(module_name):\n if module_name not in sys.modules:\n sys.modules[module_name] = MockModule(module_name)\n return sys.modules[module_name]\n\n def modulize(module_name, dependencies=[]):\n for d in dependencies: get_mock_module(d)\n return get_mock_module(module_name)._initialize_\n\n ##===========================================================================##\n\n @modulize('pg_encoder')\n def _pg_encoder(__name__):\n ##----- Begin pg_encoder.py --------------------------------------------------##\n # Online Python Tutor\n # https://github.com/pgbovine/OnlinePythonTutor/\n #\n # Copyright (C) Philip J. Guo ([email protected])\n #\n # Permission is hereby granted, free of charge, to any person obtaining a\n # copy of this software and associated documentation files (the\n # \"Software\"), to deal in the Software without restriction, including\n # without limitation the rights to use, copy, modify, merge, publish,\n # distribute, sublicense, and/or sell copies of the Software, and to\n # permit persons to whom the Software is furnished to do so, subject to\n # the following conditions:\n #\n # The above copyright notice and this permission notice shall be included\n # in all copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \n # Thanks to John DeNero for making the encoder work on both Python 2 and 3\n # (circa 2012-2013)\n \n \n # Given an arbitrary piece of Python data, encode it in such a manner\n # that it can be later encoded into JSON.\n # http://json.org/\n #\n # We use this function to encode run-time traces of data structures\n # to send to the front-end.\n #\n # Format:\n # Primitives:\n # * None, int, long, float, str, bool - unchanged\n # (json.dumps encodes these fine verbatim, except for inf, -inf, and nan)\n #\n # exceptions: float('inf') -> ['SPECIAL_FLOAT', 'Infinity']\n # float('-inf') -> ['SPECIAL_FLOAT', '-Infinity']\n # float('nan') -> ['SPECIAL_FLOAT', 'NaN']\n # x == int(x) -> ['SPECIAL_FLOAT', '%.1f' % x]\n # (this way, 3.0 prints as '3.0' and not as 3, which looks like an int)\n #\n # If render_heap_primitives is True, then primitive values are rendered\n # on the heap as ['HEAP_PRIMITIVE', <type name>, <value>]\n #\n # (for SPECIAL_FLOAT values, <value> is a list like ['SPECIAL_FLOAT', 'Infinity'])\n #\n # added on 2018-06-13:\n # ['IMPORTED_FAUX_PRIMITIVE', <label>] - renders externally imported objects\n # like they were primitives, to save\n # space and to prevent from having to\n # recurse into of them to see internals\n #\n # Compound objects:\n # * list - ['LIST', elt1, elt2, elt3, ..., eltN]\n # * tuple - ['TUPLE', elt1, elt2, elt3, ..., eltN]\n # * set - ['SET', elt1, elt2, elt3, ..., eltN]\n # * dict - ['DICT', [key1, value1], [key2, value2], ..., [keyN, valueN]]\n # * instance - ['INSTANCE', class name, [attr1, value1], [attr2, value2], ..., [attrN, valueN]]\n # * instance with non-trivial __str__ defined - ['INSTANCE_PPRINT', class name, <__str__ value>, [attr1, value1], [attr2, value2], ..., [attrN, valueN]]\n # * class - ['CLASS', class name, [list of superclass names], [attr1, value1], [attr2, value2], ..., [attrN, valueN]]\n # * function - ['FUNCTION', function name, parent frame ID (for nested functions),\n # [*OPTIONAL* list of pairs of default argument names/values] ] <-- final optional element added on 2018-06-13\n # * module - ['module', module name]\n # * other - [<type name>, string representation of object]\n # * compound object reference - ['REF', target object's unique_id]\n #\n # the unique_id is derived from id(), which allows us to capture aliasing\n \n \n # number of significant digits for floats\n FLOAT_PRECISION = 4\n \n \n from collections import defaultdict\n import re\n import types\n import sys\n import math\n typeRE = re.compile(\"<type '(.*)'>\")\n classRE = re.compile(\"<class '(.*)'>\")\n \n import inspect\n \n # TODO: maybe use the 'six' library to smooth over Py2 and Py3 incompatibilities?\n is_python3 = (sys.version_info[0] == 3)\n if is_python3:\n # avoid name errors (GROSS!)\n long = int\n unicode = str\n \n \n def is_class(dat):\n \"\"\"Return whether dat is a class.\"\"\"\n if is_python3:\n return isinstance(dat, type)\n else:\n return type(dat) in (types.ClassType, types.TypeType)\n \n \n def is_instance(dat):\n \"\"\"Return whether dat is an instance of a class.\"\"\"\n if is_python3:\n return type(dat) not in PRIMITIVE_TYPES and \\\n isinstance(type(dat), type) and \\\n not isinstance(dat, type)\n else:\n # ugh, classRE match is a bit of a hack :(\n return type(dat) == types.InstanceType or classRE.match(str(type(dat)))\n \n \n def get_name(obj):\n \"\"\"Return the name of an object.\"\"\"\n return obj.__name__ if hasattr(obj, '__name__') else get_name(type(obj))\n \n \n PRIMITIVE_TYPES = (int, long, float, str, unicode, bool, type(None))\n \n \n def encode_primitive(dat):\n t = type(dat)\n if t is float:\n if math.isinf(dat):\n if dat > 0:\n return ['SPECIAL_FLOAT', 'Infinity']\n else:\n return ['SPECIAL_FLOAT', '-Infinity']\n elif math.isnan(dat):\n return ['SPECIAL_FLOAT', 'NaN']\n else:\n # render floats like 3.0 as '3.0' and not as 3\n if dat == int(dat):\n return ['SPECIAL_FLOAT', '%.1f' % dat]\n else:\n return round(dat, FLOAT_PRECISION)\n elif t is str and (not is_python3):\n # hack only for Python 2 strings ... always turn into unicode\n # and display '?' when it's not valid unicode\n return dat.decode('utf-8', 'replace')\n else:\n # return all other primitives verbatim\n return dat\n \n \n # grab a line number like ' <line 2>' or ' <line 2b>'\n def create_lambda_line_number(codeobj, line_to_lambda_code):\n try:\n lambda_lineno = codeobj.co_firstlineno\n lst = line_to_lambda_code[lambda_lineno]\n ind = lst.index(codeobj)\n # add a suffix for all subsequent lambdas on a line beyond the first\n # (nix this for now because order isn't guaranteed when you have\n # multiple lambdas on the same line)\n '''\n if ind > 0:\n lineno_str = str(lambda_lineno) + chr(ord('a') + ind)\n else:\n lineno_str = str(lambda_lineno)\n '''\n lineno_str = str(lambda_lineno)\n return ' <line ' + lineno_str + '>'\n except:\n return ''\n \n \n # Note that this might BLOAT MEMORY CONSUMPTION since we're holding on\n # to every reference ever created by the program without ever releasing\n # anything!\n class ObjectEncoder:\n def __init__(self, parent):\n self.parent = parent # should be a PGLogger object\n \n # Key: canonicalized small ID\n # Value: encoded (compound) heap object\n self.encoded_heap_objects = {}\n \n self.render_heap_primitives = parent.render_heap_primitives\n \n self.id_to_small_IDs = {}\n self.cur_small_ID = 1\n \n # wow, creating unique identifiers for lambdas is quite annoying,\n # especially if we want to properly differentiate:\n # 1.) multiple lambdas defined on the same line, and\n # 2.) the same lambda code defined multiple times on different lines\n #\n # However, it gets confused when there are multiple identical\n # lambdas on the same line, like:\n # f(lambda x:x*x, lambda y:y*y, lambda x:x*x)\n \n # (assumes everything is in one file)\n # Key: line number\n # Value: list of the code objects of lambdas defined\n # on that line in the order they were defined\n self.line_to_lambda_code = defaultdict(list)\n \n def should_hide_var(self, var):\n return self.parent.should_hide_var(var)\n \n # searches through self.parents.types_to_inline and tries\n # to match the type returned by type(obj).__name__ and\n # also 'class' and 'instance' for classes and instances, respectively\n def should_inline_object_by_type(self, obj):\n # fast-pass optimization -- common case\n if not self.parent.types_to_inline:\n return False\n \n # copy-pasted from the end of self.encode()\n typ = type(obj)\n typename = typ.__name__\n \n # pick up built-in functions too:\n if typ in (types.FunctionType, types.MethodType, types.BuiltinFunctionType, types.BuiltinMethodType):\n typename = 'function'\n \n if not typename:\n return False\n \n alt_typename = None\n if is_class(obj):\n alt_typename = 'class'\n elif is_instance(obj) and typename != 'function':\n # if obj is an instance of the Fooo class, then we want to match\n # on both 'instance' and 'Fooo'\n # (exception: 'function' objects are sometimes also instances,\n # but we still want to call them 'function', so ignore them)\n typename = 'instance'\n class_name = None\n if hasattr(obj, '__class__'):\n # common case ...\n class_name = get_name(obj.__class__)\n else:\n # super special case for something like\n # \"from datetime import datetime_CAPI\" in Python 3.2,\n # which is some weird 'PyCapsule' type ...\n # http://docs.python.org/release/3.1.5/c-api/capsule.html\n class_name = get_name(type(obj))\n alt_typename = class_name\n \n for re_match in self.parent.types_to_inline:\n if re_match(typename):\n return True\n if alt_typename and re_match(alt_typename):\n return True\n return False\n \n def get_heap(self):\n return self.encoded_heap_objects\n \n def reset_heap(self):\n # VERY IMPORTANT to reassign to an empty dict rather than just\n # clearing the existing dict, since get_heap() could have been\n # called earlier to return a reference to a previous heap state\n self.encoded_heap_objects = {}\n \n def set_function_parent_frame_ID(self, ref_obj, enclosing_frame_id):\n assert ref_obj[0] == 'REF'\n func_obj = self.encoded_heap_objects[ref_obj[1]]\n assert func_obj[0] == 'FUNCTION'\n func_obj[-1] = enclosing_frame_id\n \n # return either a primitive object or an object reference;\n # and as a side effect, update encoded_heap_objects\n def encode(self, dat, get_parent):\n \"\"\"Encode a data value DAT using the GET_PARENT function for parent ids.\"\"\"\n # primitive type\n if not self.render_heap_primitives and type(dat) in PRIMITIVE_TYPES:\n return encode_primitive(dat)\n # compound type - return an object reference and update encoded_heap_objects\n else:\n # IMPORTED_FAUX_PRIMITIVE feature added on 2018-06-13:\n # is dat defined in external (i.e., non-user) code?\n is_externally_defined = False\n try:\n # some objects don't return anything for getsourcefile() but DO return\n # something legit for getmodule(). e.g., \"from io import StringIO\"\n # so TRY getmodule *first* and then fall back on getsourcefile\n # since getmodule seems more robust empirically ...\n gsf = inspect.getmodule(dat).__file__\n if not gsf:\n gsf = inspect.getsourcefile(dat)\n \n # a hacky heuristic is that if gsf is an absolute path, then it's likely\n # to be some library function and *not* in user-defined code\n #\n # NB: don't use os.path.isabs() since it doesn't work on some\n # python installations (e.g., on my webserver) and also adds a\n # dependency on the os module. just do a simple check:\n #\n # hacky: do other checks for strings that are indicative of files\n # that load user-written code, like 'generate_json_trace.py'\n if gsf and gsf[0] == '/' and 'generate_json_trace.py' not in gsf:\n is_externally_defined = True\n except (AttributeError, TypeError):\n pass # fail soft\n my_id = id(dat)\n \n # if dat is an *real* object instance (and not some special built-in one\n # like ABCMeta, or a py3 function object), then DON'T treat it as\n # externally-defined because a user might be instantiating an *instance*\n # of an imported class in their own code, so we want to show that instance\n # in da visualization - ugh #hacky\n if (is_instance(dat) and\n type(dat) not in (types.FunctionType, types.MethodType, types.BuiltinFunctionType, types.BuiltinMethodType) and\n hasattr(dat, '__class__') and (get_name(dat.__class__) != 'ABCMeta')):\n is_externally_defined = False\n \n # if this is an externally-defined object (i.e., from an imported\n # module, don't try to recurse into it since we don't want to see\n # the internals of imported objects; just return an\n # IMPORTED_FAUX_PRIMITIVE object and continue along on our way\n if is_externally_defined:\n label = 'object'\n try:\n label = type(dat).__name__\n if is_class(dat):\n label = 'class'\n elif is_instance(dat):\n label = 'object'\n except:\n pass\n # punt early!\n return ['IMPORTED_FAUX_PRIMITIVE', 'imported ' + label]\n \n # next check whether it should be inlined\n if self.should_inline_object_by_type(dat):\n label = 'object'\n try:\n label = type(dat).__name__\n if is_class(dat):\n class_name = get_name(dat)\n label = class_name + ' class'\n elif is_instance(dat):\n # a lot of copy-pasta from other parts of this file:\n # TODO: clean up\n class_name = None\n if hasattr(dat, '__class__'):\n # common case ...\n class_name = get_name(dat.__class__)\n else:\n # super special case for something like\n # \"from datetime import datetime_CAPI\" in Python 3.2,\n # which is some weird 'PyCapsule' type ...\n # http://docs.python.org/release/3.1.5/c-api/capsule.html\n class_name = get_name(type(dat))\n if class_name:\n label = class_name + ' instance'\n else:\n label = 'instance'\n except:\n pass\n # punt early!\n return ['IMPORTED_FAUX_PRIMITIVE', label + ' (hidden)']\n \n try:\n my_small_id = self.id_to_small_IDs[my_id]\n except KeyError:\n my_small_id = self.cur_small_ID\n self.id_to_small_IDs[my_id] = self.cur_small_ID\n self.cur_small_ID += 1\n \n del my_id # to prevent bugs later in this function\n \n ret = ['REF', my_small_id]\n \n # punt early if you've already encoded this object\n if my_small_id in self.encoded_heap_objects:\n return ret\n \n # major side-effect!\n new_obj = []\n self.encoded_heap_objects[my_small_id] = new_obj\n \n typ = type(dat)\n \n if typ == list:\n new_obj.append('LIST')\n for e in dat:\n new_obj.append(self.encode(e, get_parent))\n elif typ == tuple:\n new_obj.append('TUPLE')\n for e in dat:\n new_obj.append(self.encode(e, get_parent))\n elif typ == set:\n new_obj.append('SET')\n for e in dat:\n new_obj.append(self.encode(e, get_parent))\n elif typ == dict:\n new_obj.append('DICT')\n for (k, v) in dat.items():\n # don't display some built-in locals ...\n if k not in ('__module__', '__return__', '__locals__'):\n new_obj.append(\n [self.encode(k, get_parent), self.encode(v, get_parent)])\n elif typ in (types.FunctionType, types.MethodType):\n if is_python3:\n argspec = inspect.getfullargspec(dat)\n else:\n argspec = inspect.getargspec(dat)\n \n printed_args = [e for e in argspec.args]\n \n default_arg_names_and_vals = []\n if argspec.defaults:\n num_missing_defaults = len(\n printed_args) - len(argspec.defaults)\n assert num_missing_defaults >= 0\n # tricky tricky tricky how default positional arguments work!\n for i in range(num_missing_defaults, len(printed_args)):\n default_arg_names_and_vals.append((printed_args[i], self.encode(\n argspec.defaults[i-num_missing_defaults], get_parent)))\n \n if argspec.varargs:\n printed_args.append('*' + argspec.varargs)\n \n if is_python3:\n # kwonlyargs come before varkw\n if argspec.kwonlyargs:\n printed_args.extend(argspec.kwonlyargs)\n if argspec.kwonlydefaults:\n # iterate in order of appearance in kwonlyargs\n for varname in argspec.kwonlyargs:\n if varname in argspec.kwonlydefaults:\n val = argspec.kwonlydefaults[varname]\n default_arg_names_and_vals.append(\n (varname, self.encode(val, get_parent)))\n if argspec.varkw:\n printed_args.append('**' + argspec.varkw)\n else:\n if argspec.keywords:\n printed_args.append('**' + argspec.keywords)\n \n func_name = get_name(dat)\n \n pretty_name = func_name\n \n # sometimes might fail for, say, <genexpr>, so just ignore\n # failures for now ...\n try:\n pretty_name += '(' + ', '.join(printed_args) + ')'\n except TypeError:\n pass\n \n # put a line number suffix on lambdas to more uniquely identify\n # them, since they don't have names\n if func_name == '<lambda>':\n cod = (dat.__code__ if is_python3 else dat.func_code) # ugh!\n lst = self.line_to_lambda_code[cod.co_firstlineno]\n if cod not in lst:\n lst.append(cod)\n pretty_name += create_lambda_line_number(cod,\n self.line_to_lambda_code)\n \n encoded_val = ['FUNCTION', pretty_name, None]\n if get_parent:\n enclosing_frame_id = get_parent(dat)\n encoded_val[2] = enclosing_frame_id\n new_obj.extend(encoded_val)\n # OPTIONAL!!!\n if default_arg_names_and_vals:\n # *append* it as a single list element\n new_obj.append(default_arg_names_and_vals)\n \n elif typ is types.BuiltinFunctionType:\n pretty_name = get_name(dat) + '(...)'\n new_obj.extend(['FUNCTION', pretty_name, None])\n elif is_class(dat) or is_instance(dat):\n self.encode_class_or_instance(dat, new_obj)\n elif typ is types.ModuleType:\n new_obj.extend(['module', dat.__name__])\n elif typ in PRIMITIVE_TYPES:\n assert self.render_heap_primitives\n new_obj.extend(['HEAP_PRIMITIVE', type(\n dat).__name__, encode_primitive(dat)])\n else:\n typeStr = str(typ)\n m = typeRE.match(typeStr)\n \n if not m:\n m = classRE.match(typeStr)\n \n assert m, typ\n \n if is_python3:\n encoded_dat = str(dat)\n else:\n # ugh, for bytearray() in Python 2, str() returns\n # non-JSON-serializable characters, so need to decode:\n encoded_dat = str(dat).decode('utf-8', 'replace')\n new_obj.extend([m.group(1), encoded_dat])\n \n return ret\n \n def encode_class_or_instance(self, dat, new_obj):\n \"\"\"Encode dat as a class or instance.\"\"\"\n if is_instance(dat):\n if hasattr(dat, '__class__'):\n # common case ...\n class_name = get_name(dat.__class__)\n else:\n # super special case for something like\n # \"from datetime import datetime_CAPI\" in Python 3.2,\n # which is some weird 'PyCapsule' type ...\n # http://docs.python.org/release/3.1.5/c-api/capsule.html\n class_name = get_name(type(dat))\n \n pprint_str = None\n # do you or any of your superclasses have a __str__ field? if so, pretty-print yourself!\n if hasattr(dat, '__str__'):\n try:\n pprint_str = dat.__str__()\n \n # sometimes you'll get 'trivial' pprint_str like: '<__main__.MyObj object at 0x10f465cd0>'\n # or '<module 'collections' ...'\n # IGNORE THOSE!!!\n if pprint_str[0] == '<' and pprint_str[-1] == '>' and (' at ' in pprint_str or pprint_str.startswith('<module')):\n pprint_str = None\n except:\n pass\n \n # TODO: filter for trivial-looking pprint_str like those produced\n # by object.__str__\n if pprint_str:\n new_obj.extend(['INSTANCE_PPRINT', class_name, pprint_str])\n else:\n new_obj.extend(['INSTANCE', class_name])\n \n # don't traverse inside modules, or else risk EXPLODING the visualization\n if class_name == 'module':\n return\n else:\n superclass_names = [\n e.__name__ for e in dat.__bases__ if e is not object]\n new_obj.extend(['CLASS', get_name(dat), superclass_names])\n \n # traverse inside of its __dict__ to grab attributes\n # (filter out useless-seeming ones, based on anecdotal observation):\n hidden = ('__doc__', '__module__', '__return__', '__dict__',\n '__locals__', '__weakref__', '__qualname__')\n if hasattr(dat, '__dict__'):\n user_attrs = sorted([e for e in dat.__dict__ if e not in hidden])\n else:\n user_attrs = []\n \n for attr in user_attrs:\n if not self.should_hide_var(attr):\n new_obj.append(\n [self.encode(attr, None), self.encode(dat.__dict__[attr], None)])\n \n ##----- End pg_encoder.py ----------------------------------------------------##\n return locals()\n\n @modulize('pg_logger')\n def _pg_logger(__name__):\n ##----- Begin pg_logger.py ---------------------------------------------------##\n # Online Python Tutor\n # https://github.com/pgbovine/OnlinePythonTutor/\n #\n # Copyright (C) Philip J. Guo ([email protected])\n #\n # Permission is hereby granted, free of charge, to any person obtaining a\n # copy of this software and associated documentation files (the\n # \"Software\"), to deal in the Software without restriction, including\n # without limitation the rights to use, copy, modify, merge, publish,\n # distribute, sublicense, and/or sell copies of the Software, and to\n # permit persons to whom the Software is furnished to do so, subject to\n # the following conditions:\n #\n # The above copyright notice and this permission notice shall be included\n # in all copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \n \n # This is the meat of the Online Python Tutor back-end. It implements a\n # full logger for Python program execution (based on pdb, the standard\n # Python debugger imported via the bdb module), printing out the values\n # of all in-scope data structures after each executed instruction.\n \n # NB: try to import the minimal amount of stuff in this module to lessen\n # the security attack surface\n \n import imp\n import sys\n import bdb # the KEY import here!\n import re\n import traceback\n import types\n \n # TODO: use the 'six' package to smooth out Py2 and Py3 differences\n is_python3 = (sys.version_info[0] == 3)\n \n # NB: don't use cStringIO since it doesn't support unicode!!!\n if is_python3:\n import io as StringIO\n import io # expose regular io for Python3 users too\n else:\n import StringIO\n import pg_encoder\n \n \n # upper-bound on the number of executed lines, in order to guard against\n # infinite loops\n #MAX_EXECUTED_LINES = 300\n MAX_EXECUTED_LINES = 1000 # on 2016-05-01, I increased the limit from 300 to 1000 for Python due to popular user demand! and I also improved the warning message\n \n #DEBUG = False\n DEBUG = True\n \n BREAKPOINT_STR = '#break'\n \n # if a line starts with this string, then look for a comma-separated\n # list of variables after the colon. *hide* those variables in da trace\n #\n # 2018-06-17:\n # - now supports unix-style shell globs using the syntax in\n # https://docs.python.org/3/library/fnmatch.html so you can write things\n # like '#pythontutor_hide: _*' to hide all private instance variables\n # - also now filters class and instance fields in addition to top-level vars\n PYTUTOR_HIDE_STR = '#pythontutor_hide:'\n # 2018-06-17: a comma-separated list of types that should be displayed *inline*\n # like primitives, with their actual values HIDDEN to save space. for details\n # of what types are legal to specify, see:\n # pg_encoder.py:should_inline_object_by_type()\n # - also accepts shell globs, just like PYTUTOR_HIDE_STR\n PYTUTOR_INLINE_TYPE_STR = '#pythontutor_hide_type:'\n \n CLASS_RE = re.compile('class\\s+')\n \n # copied-pasted from translate() in https://github.com/python/cpython/blob/2.7/Lib/fnmatch.py\n \n \n def globToRegex(pat):\n \"\"\"Translate a shell PATTERN to a regular expression.\n There is no way to quote meta-characters.\n \"\"\"\n \n i, n = 0, len(pat)\n res = ''\n while i < n:\n c = pat[i]\n i = i+1\n if c == '*':\n res = res + '.*'\n elif c == '?':\n res = res + '.'\n elif c == '[':\n j = i\n if j < n and pat[j] == '!':\n j = j+1\n if j < n and pat[j] == ']':\n j = j+1\n while j < n and pat[j] != ']':\n j = j+1\n if j >= n:\n res = res + '\\\\['\n else:\n stuff = pat[i:j].replace('\\\\', '\\\\\\\\')\n i = j+1\n if stuff[0] == '!':\n stuff = '^' + stuff[1:]\n elif stuff[0] == '^':\n stuff = '\\\\' + stuff\n res = '%s[%s]' % (res, stuff)\n else:\n res = res + re.escape(c)\n return res + '\\Z(?ms)'\n \n \n def compileGlobMatch(pattern):\n # very important to use match and *not* search!\n return re.compile(globToRegex(pattern)).match\n \n \n # test globToRegex and compileGlobMatch\n '''\n for e in ('_*', '__*', '__*__', '*_$'):\n stuff = compileGlobMatch(e)\n for s in ('_test', 'test_', '_test_', '__test', '__test__'):\n print(e, s, stuff(s) is not None)\n '''\n \n \n TRY_ANACONDA_STR = '\\n\\nYou can also try \"Python 3.6 with Anaconda (experimental)\",\\nwhich is slower but lets you import many more modules.\\n'\n \n \n # simple sandboxing scheme:\n #\n # - use resource.setrlimit to deprive this process of ANY file descriptors\n # (which will cause file read/write and subprocess shell launches to fail)\n # - restrict user builtins and module imports\n # (beware that this is NOT foolproof at all ... there are known flaws!)\n #\n # ALWAYS use defense-in-depth and don't just rely on these simple mechanisms\n try:\n import resource\n resource_module_loaded = True\n except ImportError:\n # Google App Engine doesn't seem to have the 'resource' module\n resource_module_loaded = False\n \n \n # From http://coreygoldberg.blogspot.com/2009/05/python-redirect-or-turn-off-stdout-and.html\n class NullDevice():\n def write(self, s):\n pass\n \n \n # ugh, I can't figure out why in Python 2, __builtins__ seems to\n # be a dict, but in Python 3, __builtins__ seems to be a module,\n # so just handle both cases ... UGLY!\n if type(__builtins__) is dict:\n BUILTIN_IMPORT = __builtins__['__import__']\n else:\n assert type(__builtins__) is types.ModuleType\n BUILTIN_IMPORT = __builtins__.__import__\n \n \n # whitelist of module imports\n ALLOWED_STDLIB_MODULE_IMPORTS = ('math', 'random', 'time', 'datetime',\n 'functools', 'itertools', 'operator', 'string',\n 'collections', 're', 'json',\n 'heapq', 'bisect', 'copy', 'hashlib', 'typing',\n # the above modules were first added in 2012-09\n # and then incrementally appended to up until\n # 2016-ish (see git blame logs)\n \n # added these additional ones on 2018-06-15\n # after seeing usage logs of what users tried\n # importing a lot but we didn't support yet\n # (ignoring imports that heavily deal with\n # filesystem, networking, or 3rd-party libs)\n '__future__', 'cmath', 'decimal', 'fractions',\n 'pprint', 'calendar', 'pickle',\n 'types', 'array',\n 'locale', 'abc',\n 'doctest', 'unittest',\n )\n \n # allow users to import but don't explicitly import it since it's\n # already been done above\n OTHER_STDLIB_WHITELIST = ('StringIO', 'io')\n \n \n # Restrict imports to a whitelist\n def __restricted_import__(*args):\n # filter args to ONLY take in real strings so that someone can't\n # subclass str and bypass the 'in' test on the next line\n args = [e for e in args if type(e) is str]\n \n all_allowed_imports = sorted(\n ALLOWED_STDLIB_MODULE_IMPORTS + OTHER_STDLIB_WHITELIST)\n if is_python3:\n all_allowed_imports.remove('StringIO')\n else:\n all_allowed_imports.remove('typing')\n \n if args[0] in all_allowed_imports:\n imported_mod = BUILTIN_IMPORT(*args)\n # somewhat weak protection against imported modules that contain one\n # of these troublesome builtins. again, NOTHING is foolproof ...\n # just more defense in depth :)\n #\n # unload it so that if someone attempts to reload it, then it has to be\n # loaded from the filesystem, which is (supposedly!) blocked by setrlimit\n for mod in ('os', 'sys', 'posix', 'gc'):\n if hasattr(imported_mod, mod):\n delattr(imported_mod, mod)\n \n return imported_mod\n else:\n # original error message ...\n #raise ImportError('{0} not supported'.format(args[0]))\n \n # 2017-12-06: added a better error message to tell the user what\n # modules *can* be imported in python tutor ...\n ENTRIES_PER_LINE = 6\n \n lines_to_print = []\n # adapted from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks\n for i in range(0, len(all_allowed_imports), ENTRIES_PER_LINE):\n lines_to_print.append(all_allowed_imports[i:i + ENTRIES_PER_LINE])\n pretty_printed_imports = ',\\n '.join(\n [', '.join(e) for e in lines_to_print])\n \n raise ImportError('{0} not found or not supported\\nOnly these modules can be imported:\\n {1}{2}'.format(\n args[0], pretty_printed_imports, TRY_ANACONDA_STR))\n \n \n # Support interactive user input by:\n #\n # 1. running the entire program up to a call to raw_input (or input in py3),\n # 2. bailing and returning a trace ending in a special 'raw_input' event,\n # 3. letting the web frontend issue a prompt to the user to grab a string,\n # 4. RE-RUNNING the whole program with that string added to input_string_queue,\n # 5. which should bring execution to the next raw_input call (if\n # available), or to termination.\n # Repeat until no more raw_input calls are encountered.\n # Note that this is mad inefficient, but is simple to implement!\n \n # VERY IMPORTANT -- set random seed to 0 to ensure deterministic execution:\n import random\n random.seed(0)\n \n # queue of input strings passed from either raw_input or mouse_input\n input_string_queue = []\n \n \n def open_wrapper(*args):\n if is_python3:\n raise Exception('''open() is not supported by Python Tutor.\n Instead use io.StringIO() to simulate a file.\n Example: http://goo.gl/uNvBGl''' + TRY_ANACONDA_STR)\n else:\n raise Exception('''open() is not supported by Python Tutor.\n Instead use StringIO.StringIO() to simulate a file.\n Example: http://goo.gl/Q9xQ4p''' + TRY_ANACONDA_STR)\n \n # create a more sensible error message for unsupported features\n \n \n def create_banned_builtins_wrapper(fn_name):\n def err_func(*args):\n raise Exception(\n \"'\" + fn_name + \"' is not supported by Python Tutor.\" + TRY_ANACONDA_STR)\n return err_func\n \n \n class RawInputException(Exception):\n pass\n \n \n def raw_input_wrapper(prompt=''):\n if input_string_queue:\n input_str = input_string_queue.pop(0)\n \n # write the prompt and user input to stdout, to emulate what happens\n # at the terminal\n sys.stdout.write(str(prompt)) # always convert prompt into a string\n # newline to simulate the user hitting Enter\n sys.stdout.write(input_str + \"\\n\")\n return input_str\n raise RawInputException(str(prompt)) # always convert prompt into a string\n \n \n # Python 2 input() does eval(raw_input())\n def python2_input_wrapper(prompt=''):\n if input_string_queue:\n input_str = input_string_queue.pop(0)\n \n # write the prompt and user input to stdout, to emulate what happens\n # at the terminal\n sys.stdout.write(str(prompt)) # always convert prompt into a string\n # newline to simulate the user hitting Enter\n sys.stdout.write(input_str + \"\\n\")\n return eval(input_str) # remember to eval!\n raise RawInputException(str(prompt)) # always convert prompt into a string\n \n \n class MouseInputException(Exception):\n pass\n \n \n def mouse_input_wrapper(prompt=''):\n if input_string_queue:\n return input_string_queue.pop(0)\n raise MouseInputException(prompt)\n \n \n # blacklist of builtins\n # 2018-06-15 don't ban any builtins since that's just security by obscurity\n BANNED_BUILTINS = []\n # we should rely on other layered security mechanisms\n \n # old banned built-ins prior to 2018-06-15\n # BANNED_BUILTINS = ['reload', 'open', 'compile',\n # 'file', 'eval', 'exec', 'execfile',\n # 'exit', 'quit', 'help',\n # 'dir', 'globals', 'locals', 'vars']\n # Peter says 'apply' isn't dangerous, so don't ban it\n \n IGNORE_VARS = set(('__builtins__', '__name__',\n '__exception__', '__doc__', '__package__'))\n \n \n '''\n 2013-12-26\n \n Okay, what's with this f_valuestack business?\n \n If you compile your own CPython and patch Objects/frameobject.c to add a\n Python accessor for f_valuestack, then you can actually access the value\n stack, which is useful for, say, grabbbing the objects within\n list/set/dict comprehensions as they're being built. e.g., try:\n \n z = [x*y for x in range(5) for y in range(5)]\n \n Note that on pythontutor.com, I am currently running custom-compiled\n versions of Python-2.7.6 and Python-3.3.3 with this f_valuestack hack.\n Unless you run your own custom CPython, you won't get these benefits.\n - update as of 2018-06-16: I don't think the above has been true for a while\n \n \n Patch:\n \n static PyObject *\n frame_getlineno(PyFrameObject *f, void *closure)\n {\n return PyLong_FromLong(PyFrame_GetLineNumber(f));\n }\n \n +// copied from Py2crazy, which was for Python 2, but let's hope this still works!\n +static PyObject *\n +frame_getvaluestack(PyFrameObject* f) {\n + // pgbovine - TODO: will this memory leak? hopefully not,\n + // since all other accessors seem to follow the same idiom\n + PyObject* lst = PyList_New(0);\n + if (f->f_stacktop != NULL) {\n + PyObject** p = NULL;\n + for (p = f->f_valuestack; p < f->f_stacktop; p++) {\n + PyList_Append(lst, *p);\n + }\n + }\n +\n + return lst;\n +}\n +\n /* Setter for f_lineno - you can set f_lineno from within a trace function in\n * order to jump to a given line of code, subject to some restrictions. Most\n * lines are OK to jump to because they don't make any assumptions about the\n @@ -368,6 +384,11 @@\n \n static PyGetSetDef frame_getsetlist[] = {\n {\"f_locals\", (getter)frame_getlocals, NULL, NULL},\n {\"f_lineno\", (getter)frame_getlineno,\n (setter)frame_setlineno, NULL},\n {\"f_trace\", (getter)frame_gettrace, (setter)frame_settrace, NULL},\n +\n + // pgbovine\n + {\"f_valuestack\",(getter)frame_getvaluestack,\n + (setter)NULL /* don't let it be set */, NULL},\n +\n {0}\n };\n '''\n \n # at_global_scope should be true only if 'frame' represents the global scope\n \n \n def get_user_globals(frame, at_global_scope=False):\n d = filter_var_dict(frame.f_globals)\n \n # don't blurt out all of f_valuestack for now ...\n '''\n if at_global_scope and hasattr(frame, 'f_valuestack'):\n for (i, e) in enumerate(frame.f_valuestack):\n d['_tmp' + str(i+1)] = e\n '''\n \n # print out list objects being built up in Python 2.x list comprehensions\n # (which don't have its own special <listcomp> frame, sadly)\n if not is_python3 and hasattr(frame, 'f_valuestack'):\n for (i, e) in enumerate([e for e in frame.f_valuestack if type(e) is list]):\n d['_tmp' + str(i+1)] = e\n \n # also filter out __return__ for globals only, but NOT for locals\n if '__return__' in d:\n del d['__return__']\n return d\n \n \n def get_user_locals(frame):\n ret = filter_var_dict(frame.f_locals)\n # don't blurt out all of f_valuestack for now ...\n '''\n if hasattr(frame, 'f_valuestack'):\n for (i, e) in enumerate(frame.f_valuestack):\n ret['_tmp' + str(i+1)] = e\n '''\n \n # special printing of list/set/dict comprehension objects as they are\n # being built up incrementally ...\n f_name = frame.f_code.co_name\n if hasattr(frame, 'f_valuestack'):\n # print out list objects being built up in Python 2.x list comprehensions\n # (which don't have its own special <listcomp> frame, sadly)\n if not is_python3:\n for (i, e) in enumerate([e for e in frame.f_valuestack\n if type(e) is list]):\n ret['_tmp' + str(i+1)] = e\n \n # for dict and set comprehensions, which have their own frames:\n if f_name.endswith('comp>'):\n for (i, e) in enumerate([e for e in frame.f_valuestack\n if type(e) in (list, set, dict)]):\n ret['_tmp' + str(i+1)] = e\n \n return ret\n \n \n def filter_var_dict(d):\n ret = {}\n for (k, v) in d.items():\n if k not in IGNORE_VARS:\n ret[k] = v\n return ret\n \n \n # yield all function objects locally-reachable from frame,\n # making sure to traverse inside all compound objects ...\n def visit_all_locally_reachable_function_objs(frame):\n for (k, v) in get_user_locals(frame).items():\n for e in visit_function_obj(v, set()):\n if e: # only non-null if it's a function object\n assert type(e) in (types.FunctionType, types.MethodType)\n yield e\n \n \n # TODO: this might be slow if we're traversing inside lots of objects:\n def visit_function_obj(v, ids_seen_set):\n v_id = id(v)\n \n # to prevent infinite loop\n if v_id in ids_seen_set:\n yield None\n else:\n ids_seen_set.add(v_id)\n \n typ = type(v)\n \n # simple base case\n if typ in (types.FunctionType, types.MethodType):\n yield v\n \n # recursive cases\n elif typ in (list, tuple, set):\n for child in v:\n for child_res in visit_function_obj(child, ids_seen_set):\n yield child_res\n \n elif typ == dict or pg_encoder.is_class(v) or pg_encoder.is_instance(v):\n contents_dict = None\n \n if typ == dict:\n contents_dict = v\n # warning: some classes or instances don't have __dict__ attributes\n elif hasattr(v, '__dict__'):\n contents_dict = v.__dict__\n \n if contents_dict:\n for (key_child, val_child) in contents_dict.items():\n for key_child_res in visit_function_obj(key_child, ids_seen_set):\n yield key_child_res\n for val_child_res in visit_function_obj(val_child, ids_seen_set):\n yield val_child_res\n \n # degenerate base case\n yield None\n \n \n class PGLogger(bdb.Bdb):\n # if custom_modules is non-empty, it should be a dict mapping module\n # names to the python source code of each module. when _runscript is\n # called, it will do \"from <module> import *\" for all modules in\n # custom_modules before running the user's script and then trace all\n # code within custom_modules\n #\n # if separate_stdout_by_module, then have a separate stdout stream\n # for each module rather than all stdout going to a single stream\n def __init__(self, cumulative_mode, heap_primitives, show_only_outputs, finalizer_func,\n disable_security_checks=False, allow_all_modules=False, crazy_mode=False,\n custom_modules=None, separate_stdout_by_module=False, probe_exprs=None):\n bdb.Bdb.__init__(self)\n self.mainpyfile = ''\n self._wait_for_mainpyfile = 0\n \n if probe_exprs:\n self.probe_exprs = probe_exprs\n else:\n self.probe_exprs = None\n \n self.separate_stdout_by_module = separate_stdout_by_module\n self.stdout_by_module = {} # Key: module name, Value: StringIO faux-stdout\n \n self.modules_to_trace = set(['__main__']) # always trace __main__!\n \n # Key: module name\n # Value: module's python code as a string\n self.custom_modules = custom_modules\n if self.custom_modules:\n for module_name in self.custom_modules:\n self.modules_to_trace.add(module_name)\n \n self.disable_security_checks = disable_security_checks\n self.allow_all_modules = allow_all_modules\n # if we allow all modules, we shouldn't do security checks\n # either since otherwise users can't really import anything\n # because that will likely involve opening files on disk, which\n # is disallowed by security checks\n if self.allow_all_modules:\n self.disable_security_checks = True\n \n # if True, then displays ALL stack frames that have ever existed\n # rather than only those currently on the stack (and their\n # lexical parents)\n self.cumulative_mode = cumulative_mode\n \n # if True, then render certain primitive objects as heap objects\n self.render_heap_primitives = heap_primitives\n \n # if True, then don't render any data structures in the trace,\n # and show only outputs\n self.show_only_outputs = show_only_outputs\n \n # Run using the custom Py2crazy Python interpreter\n self.crazy_mode = crazy_mode\n \n # a function that takes the output trace as a parameter and\n # processes it\n self.finalizer_func = finalizer_func\n \n # each entry contains a dict with the information for a single\n # executed line\n self.trace = []\n \n # if this is true, don't put any more stuff into self.trace\n self.done = False\n \n # if this is non-null, don't do any more tracing until a\n # 'return' instruction with a stack gotten from\n # get_stack_code_IDs() that matches wait_for_return_stack\n self.wait_for_return_stack = None\n \n # http://stackoverflow.com/questions/2112396/in-python-in-google-app-engine-how-do-you-capture-output-produced-by-the-print\n self.GAE_STDOUT = sys.stdout\n \n # Key: function object\n # Value: parent frame\n self.closures = {}\n \n # Key: code object for a lambda\n # Value: parent frame\n self.lambda_closures = {}\n \n # set of function objects that were defined in the global scope\n self.globally_defined_funcs = set()\n \n # Key: frame object\n # Value: monotonically increasing small ID, based on call order\n self.frame_ordered_ids = {}\n self.cur_frame_id = 1\n \n # List of frames to KEEP AROUND after the function exits.\n # If cumulative_mode is True, then keep ALL frames in\n # zombie_frames; otherwise keep only frames where\n # nested functions were defined within them.\n self.zombie_frames = []\n \n # set of elements within zombie_frames that are also\n # LEXICAL PARENTS of other frames\n self.parent_frames_set = set()\n \n # all globals that ever appeared in the program, in the order in\n # which they appeared. note that this might be a superset of all\n # the globals that exist at any particular execution point,\n # since globals might have been deleted (using, say, 'del')\n self.all_globals_in_order = []\n \n # very important for this single object to persist throughout\n # execution, or else canonical small IDs won't be consistent.\n self.encoder = pg_encoder.ObjectEncoder(self)\n \n self.executed_script = None # Python script to be executed!\n \n # if there is at least one line that ends with BREAKPOINT_STR,\n # then activate \"breakpoint mode\", where execution should stop\n # ONLY at breakpoint lines.\n self.breakpoints = []\n \n self.vars_to_hide = set() # a set of regex match objects\n # created by compileGlobMatch() from\n # the contents of PYTUTOR_HIDE_STR\n # a set of regex match objects derived from PYTUTOR_INLINE_TYPE_STR\n self.types_to_inline = set()\n \n self.prev_lineno = -1 # keep track of previous line just executed\n \n def should_hide_var(self, var):\n for re_match in self.vars_to_hide:\n if re_match(var):\n return True\n return False\n \n def get_user_stdout(self):\n def encode_stringio(sio):\n # This is SUPER KRAZY! In Python 2, the buflist inside of a StringIO\n # instance can be made up of both str and unicode, so we need to convert\n # the str to unicode and replace invalid characters with the Unicode '?'\n # But leave unicode elements alone. This way, EVERYTHING inside buflist\n # will be unicode. (Note that in Python 3, everything is already unicode,\n # so we're fine.)\n if not is_python3:\n sio.buflist = [(e.decode('utf-8', 'replace')\n if type(e) is str\n else e)\n for e in sio.buflist]\n return sio.getvalue()\n \n if self.separate_stdout_by_module:\n ret = {}\n for module_name in self.stdout_by_module:\n ret[module_name] = encode_stringio(\n self.stdout_by_module[module_name])\n return ret\n else:\n # common case - single stdout stream\n return encode_stringio(self.user_stdout)\n \n def get_frame_id(self, cur_frame):\n return self.frame_ordered_ids[cur_frame]\n \n # Returns the (lexical) parent of a function value.\n def get_parent_of_function(self, val):\n if val in self.closures:\n return self.get_frame_id(self.closures[val])\n elif val in self.lambda_closures:\n return self.get_frame_id(self.lambda_closures[val])\n else:\n return None\n \n # Returns the (lexical) parent frame of the function that was called\n # to create the stack frame 'frame'.\n #\n # OKAY, this is a SUPER hack, but I don't see a way around it\n # since it's impossible to tell exactly which function\n # ('closure') object was called to create 'frame'.\n #\n # The Python interpreter doesn't maintain this information,\n # so unless we hack the interpreter, we will simply have\n # to make an educated guess based on the contents of local\n # variables inherited from possible parent frame candidates.\n def get_parent_frame(self, frame):\n #print >> sys.stderr, 'get_parent_frame: frame.f_code', frame.f_code\n for (func_obj, parent_frame) in self.closures.items():\n # ok, there's a possible match, but let's compare the\n # local variables in parent_frame to those of frame\n # to make sure. this is a hack that happens to work because in\n # Python, each stack frame inherits ('inlines') a copy of the\n # variables from its (lexical) parent frame.\n if func_obj.__code__ == frame.f_code:\n all_matched = True\n for k in frame.f_locals:\n # Do not try to match local names\n if k in frame.f_code.co_varnames:\n continue\n if k != '__return__' and k in parent_frame.f_locals:\n if parent_frame.f_locals[k] != frame.f_locals[k]:\n all_matched = False\n break\n \n if all_matched:\n return parent_frame\n \n for (lambda_code_obj, parent_frame) in self.lambda_closures.items():\n if lambda_code_obj == frame.f_code:\n # TODO: should we do more verification like above?!?\n return parent_frame\n \n return None\n \n def lookup_zombie_frame_by_id(self, frame_id):\n # TODO: kinda inefficient\n for e in self.zombie_frames:\n if self.get_frame_id(e) == frame_id:\n return e\n assert False # should never get here\n \n # unused ...\n # def reset(self):\n # bdb.Bdb.reset(self)\n # self.forget()\n \n def forget(self):\n self.lineno = None\n self.stack = []\n self.curindex = 0\n self.curframe = None\n \n def setup(self, f, t):\n self.forget()\n self.stack, self.curindex = self.get_stack(f, t)\n self.curframe = self.stack[self.curindex][0]\n \n # should be a reasonably unique ID to match calls and returns:\n def get_stack_code_IDs(self):\n return [id(e[0].f_code) for e in self.stack]\n \n # Override Bdb methods\n \n def user_call(self, frame, argument_list):\n \"\"\"This method is called when there is the remote possibility\n that we ever need to stop in this function.\"\"\"\n # TODO: figure out a way to move this down to 'def interaction'\n # or right before self.trace.append ...\n if self.done:\n return\n \n if self._wait_for_mainpyfile:\n return\n if self.stop_here(frame):\n # delete __return__ so that on subsequent calls to\n # a generator function, the OLD yielded (returned)\n # value gets deleted from the frame ...\n try:\n del frame.f_locals['__return__']\n except KeyError:\n pass\n \n self.interaction(frame, None, 'call')\n \n def user_line(self, frame):\n \"\"\"This function is called when we stop or break at this line.\"\"\"\n if self.done:\n return\n \n if self._wait_for_mainpyfile:\n if ((frame.f_globals['__name__'] not in self.modules_to_trace) or\n frame.f_lineno <= 0):\n # older code:\n # if (self.canonic(frame.f_code.co_filename) != \"<string>\" or\n # frame.f_lineno <= 0):\n return\n self._wait_for_mainpyfile = 0\n self.interaction(frame, None, 'step_line')\n \n def user_return(self, frame, return_value):\n \"\"\"This function is called when a return trap is set here.\"\"\"\n if self.done:\n return\n \n frame.f_locals['__return__'] = return_value\n self.interaction(frame, None, 'return')\n \n def user_exception(self, frame, exc_info):\n \"\"\"This function is called if an exception occurs,\n but only if we are to stop at or just below this level.\"\"\"\n if self.done:\n return\n \n exc_type, exc_value, exc_traceback = exc_info\n frame.f_locals['__exception__'] = exc_type, exc_value\n if type(exc_type) == type(''):\n exc_type_name = exc_type\n else:\n exc_type_name = exc_type.__name__\n \n if exc_type_name == 'RawInputException':\n # make sure it's a string so it's JSON serializable!\n raw_input_arg = str(exc_value.args[0])\n self.trace.append(dict(event='raw_input', prompt=raw_input_arg))\n self.done = True\n elif exc_type_name == 'MouseInputException':\n # make sure it's a string so it's JSON serializable!\n mouse_input_arg = str(exc_value.args[0])\n self.trace.append(\n dict(event='mouse_input', prompt=mouse_input_arg))\n self.done = True\n else:\n self.interaction(frame, exc_traceback, 'exception')\n \n def get_script_line(self, n):\n return self.executed_script_lines[n-1]\n \n # General interaction function\n \n def interaction(self, frame, traceback, event_type):\n self.setup(frame, traceback)\n tos = self.stack[self.curindex]\n top_frame = tos[0]\n lineno = tos[1]\n \n topframe_module = top_frame.f_globals['__name__']\n \n # debug ...\n '''\n print >> sys.stderr\n print >> sys.stderr, '=== STACK ===', 'curindex:', self.curindex\n for (e,ln) in self.stack:\n print >> sys.stderr, e.f_code.co_name + ' ' + e.f_code.co_filename + ' ' + str(ln)\n print >> sys.stderr, \"top_frame\", top_frame.f_code.co_name, top_frame.f_code\n '''\n \n # don't trace inside of ANY functions that aren't user-written code\n # (e.g., those from imported modules -- e.g., random, re -- or the\n # __restricted_import__ function in this file)\n #\n # empirically, it seems like the FIRST entry in self.stack is\n # the 'run' function from bdb.py, but everything else on the\n # stack is the user program's \"real stack\"\n \n # Look only at the \"topmost\" frame on the stack ...\n \n # if we're not in a module that we are explicitly tracing, skip:\n # (this comes up in tests/backend-tests/namedtuple.txt)\n if topframe_module not in self.modules_to_trace:\n return\n # also don't trace inside of the magic \"constructor\" code\n if top_frame.f_code.co_name == '__new__':\n return\n # or __repr__, which is often called when running print statements\n if top_frame.f_code.co_name == '__repr__':\n return\n \n # don't trace if wait_for_return_stack is non-null ...\n if self.wait_for_return_stack:\n if event_type == 'return' and \\\n (self.wait_for_return_stack == self.get_stack_code_IDs()):\n self.wait_for_return_stack = None # reset!\n return # always bail!\n else:\n # Skip all \"calls\" that are actually class definitions, since\n # those faux calls produce lots of ugly cruft in the trace.\n #\n # NB: Only trigger on calls to functions defined in\n # user-written code (i.e., co_filename == '<string>'), but that\n # should already be ensured by the above check for whether we're\n # in user-written code.\n if event_type == 'call':\n first_lineno = top_frame.f_code.co_firstlineno\n if topframe_module == \"__main__\":\n func_line = self.get_script_line(first_lineno)\n elif topframe_module in self.custom_modules:\n module_code = self.custom_modules[topframe_module]\n module_code_lines = module_code.splitlines() # TODO: maybe pre-split lines?\n func_line = module_code_lines[first_lineno-1]\n else:\n # you're hosed\n func_line = ''\n #print >> sys.stderr, func_line\n \n if CLASS_RE.match(func_line.lstrip()): # ignore leading spaces\n self.wait_for_return_stack = self.get_stack_code_IDs()\n return\n \n self.encoder.reset_heap() # VERY VERY VERY IMPORTANT,\n # or else we won't properly capture heap object mutations in the trace!\n \n if event_type == 'call':\n # Don't be so strict about this assertion because it FAILS\n # when you're calling a generator (not for the first time),\n # since that frame has already previously been on the stack ...\n #assert top_frame not in self.frame_ordered_ids\n \n self.frame_ordered_ids[top_frame] = self.cur_frame_id\n self.cur_frame_id += 1\n \n if self.cumulative_mode:\n self.zombie_frames.append(top_frame)\n \n # kinda tricky to get the timing right -- basically, as soon as you\n # make a call, set sys.stdout to the stream for the appropriate\n # module, and as soon as you return, set sys.stdout to the\n # stream for your caller's module. we need to do this on the\n # return call since we want to immediately start picking up\n # prints to stdout *right after* this function returns\n if self.separate_stdout_by_module:\n if event_type == 'call':\n if topframe_module in self.stdout_by_module:\n sys.stdout = self.stdout_by_module[topframe_module]\n else:\n sys.stdout = self.stdout_by_module[\"<other>\"]\n elif event_type == 'return' and self.curindex > 0:\n prev_tos = self.stack[self.curindex - 1]\n prev_topframe = prev_tos[0]\n prev_topframe_module = prev_topframe.f_globals['__name__']\n if prev_topframe_module in self.stdout_by_module:\n sys.stdout = self.stdout_by_module[prev_topframe_module]\n else:\n sys.stdout = self.stdout_by_module[\"<other>\"]\n \n # only render zombie frames that are NO LONGER on the stack\n #\n # subtle: self.stack[:self.curindex+1] is the real stack, since\n # everything after self.curindex+1 is beyond the top of the\n # stack. this seems to be relevant only when there's an exception,\n # since the ENTIRE stack is preserved but self.curindex\n # starts decrementing as the exception bubbles up the stack.\n cur_stack_frames = [e[0] for e in self.stack[:self.curindex+1]]\n zombie_frames_to_render = [\n e for e in self.zombie_frames if e not in cur_stack_frames]\n \n # each element is a pair of (function name, ENCODED locals dict)\n encoded_stack_locals = []\n \n # returns a dict with keys: function name, frame id, id of parent frame, encoded_locals dict\n def create_encoded_stack_entry(cur_frame):\n #print >> sys.stderr, '- create_encoded_stack_entry', cur_frame, self.closures, self.lambda_closures\n ret = {}\n \n parent_frame_id_list = []\n \n f = cur_frame\n while True:\n p = self.get_parent_frame(f)\n if p:\n pid = self.get_frame_id(p)\n assert pid\n parent_frame_id_list.append(pid)\n f = p\n else:\n break\n \n cur_name = cur_frame.f_code.co_name\n \n if cur_name == '':\n cur_name = 'unnamed function'\n \n # augment lambdas with line number\n if cur_name == '<lambda>':\n cur_name += pg_encoder.create_lambda_line_number(cur_frame.f_code,\n self.encoder.line_to_lambda_code)\n \n # encode in a JSON-friendly format now, in order to prevent ill\n # effects of aliasing later down the line ...\n encoded_locals = {}\n \n for (k, v) in get_user_locals(cur_frame).items():\n is_in_parent_frame = False\n \n # don't display locals that appear in your parents' stack frames,\n # since that's redundant\n for pid in parent_frame_id_list:\n parent_frame = self.lookup_zombie_frame_by_id(pid)\n if k in parent_frame.f_locals:\n # ignore __return__, which is never copied\n if k != '__return__':\n # these values SHOULD BE ALIASES\n # (don't do an 'is' check since it might not fire for primitives)\n if parent_frame.f_locals[k] == v:\n is_in_parent_frame = True\n \n if is_in_parent_frame and k not in cur_frame.f_code.co_varnames:\n continue\n \n # don't display some built-in locals ...\n if k == '__module__':\n continue\n \n if self.should_hide_var(k):\n continue\n \n encoded_val = self.encoder.encode(\n v, self.get_parent_of_function)\n encoded_locals[k] = encoded_val\n \n # order the variable names in a sensible way:\n \n # Let's start with co_varnames, since it (often) contains all\n # variables in this frame, some of which might not exist yet.\n ordered_varnames = []\n for e in cur_frame.f_code.co_varnames:\n if e in encoded_locals:\n ordered_varnames.append(e)\n \n # sometimes co_varnames doesn't contain all of the true local\n # variables: e.g., when executing a 'class' definition. in that\n # case, iterate over encoded_locals and push them onto the end\n # of ordered_varnames in alphabetical order\n for e in sorted(encoded_locals.keys()):\n if e != '__return__' and e not in ordered_varnames:\n ordered_varnames.append(e)\n \n # finally, put __return__ at the very end\n if '__return__' in encoded_locals:\n ordered_varnames.append('__return__')\n \n # doctor Python 3 initializer to look like a normal function (denero)\n if '__locals__' in encoded_locals:\n ordered_varnames.remove('__locals__')\n local = encoded_locals.pop('__locals__')\n if encoded_locals.get('__return__', True) is None:\n encoded_locals['__return__'] = local\n \n # crucial sanity checks!\n assert len(ordered_varnames) == len(encoded_locals)\n for e in ordered_varnames:\n assert e in encoded_locals\n \n return dict(func_name=cur_name,\n is_parent=(cur_frame in self.parent_frames_set),\n frame_id=self.get_frame_id(cur_frame),\n parent_frame_id_list=parent_frame_id_list,\n encoded_locals=encoded_locals,\n ordered_varnames=ordered_varnames)\n \n i = self.curindex\n \n # look for whether a nested function has been defined during\n # this particular call:\n if i > 1: # i == 1 implies that there's only a global scope visible\n for v in visit_all_locally_reachable_function_objs(top_frame):\n if (v not in self.closures and\n v not in self.globally_defined_funcs):\n \n # Look for the presence of the code object (v.func_code\n # for Python 2 or v.__code__ for Python 3) in the\n # constant pool (f_code.co_consts) of an enclosing\n # stack frame, and set that frame as your parent.\n #\n # This technique properly handles lambdas passed as\n # function parameters. e.g., this example:\n #\n # def foo(x):\n # bar(lambda y: x + y)\n # def bar(a):\n # print a(20)\n # foo(10)\n chosen_parent_frame = None\n # SUPER hacky but seems to work -- use reversed(self.stack)\n # because we want to traverse starting from the TOP of the stack\n # (most recent frame) and find the first frame containing\n # a constant code object that matches v.__code__ or v.func_code\n #\n # required for this example from Berkeley CS61a:\n #\n # def f(p, k):\n # def g():\n # print(k)\n # if k == 0:\n # f(g, 1)\n # f(None, 0)\n #\n # there are two calls to f, each of which defines a\n # closure g that should point to the respective frame.\n #\n # note that for the second call to f, the parent of the\n # g defined in there should be that frame, which is at\n # the TOP of the stack. this reversed() hack does the\n # right thing. note that if you don't traverse the stack\n # backwards, then you will mistakenly get the parent as\n # the FIRST f frame (bottom of the stack).\n for (my_frame, my_lineno) in reversed(self.stack):\n if chosen_parent_frame:\n break\n \n for frame_const in my_frame.f_code.co_consts:\n if frame_const is (v.__code__ if is_python3 else v.func_code):\n chosen_parent_frame = my_frame\n break\n \n # 2013-12-01 commented out this line so tests/backend-tests/papajohn-monster.txt\n # works without an assertion failure ...\n # assert chosen_parent_frame # I hope this always passes :0\n \n # this condition should be False for functions declared in global scope ...\n if chosen_parent_frame in self.frame_ordered_ids:\n self.closures[v] = chosen_parent_frame\n # unequivocally add to this set!!!\n self.parent_frames_set.add(chosen_parent_frame)\n if not chosen_parent_frame in self.zombie_frames:\n self.zombie_frames.append(chosen_parent_frame)\n else:\n # look for code objects of lambdas defined within this\n # function, which comes up in cases like line 2 of:\n # def x(y):\n # (lambda z: lambda w: z+y)(y)\n #\n # x(42)\n if top_frame.f_code.co_consts:\n for e in top_frame.f_code.co_consts:\n if type(e) == types.CodeType and e.co_name == '<lambda>':\n # TODO: what if it's already in lambda_closures?\n self.lambda_closures[e] = top_frame\n self.parent_frames_set.add(\n top_frame) # copy-paste from above\n if not top_frame in self.zombie_frames:\n self.zombie_frames.append(top_frame)\n else:\n # if there is only a global scope visible ...\n for (k, v) in get_user_globals(top_frame).items():\n if (type(v) in (types.FunctionType, types.MethodType) and\n v not in self.closures):\n self.globally_defined_funcs.add(v)\n \n # climb up until you find '<module>', which is (hopefully) the global scope\n top_frame = None\n while True:\n cur_frame = self.stack[i][0]\n cur_name = cur_frame.f_code.co_name\n if cur_name == '<module>':\n break\n \n # do this check because in some cases, certain frames on the\n # stack might NOT be tracked, so don't push a stack entry for\n # those frames. this happens when you have a callback function\n # in an imported module. e.g., your code:\n # def foo():\n # bar(baz)\n #\n # def baz(): pass\n #\n # imported module code:\n # def bar(callback_func):\n # callback_func()\n #\n # when baz is executing, the real stack is [foo, bar, baz] but\n # bar is in imported module code, so pg_logger doesn't trace\n # it, and it doesn't show up in frame_ordered_ids. thus, the\n # stack to render should only be [foo, baz].\n if cur_frame in self.frame_ordered_ids:\n encoded_stack_locals.append(\n create_encoded_stack_entry(cur_frame))\n if not top_frame:\n top_frame = cur_frame\n i -= 1\n \n zombie_encoded_stack_locals = [\n create_encoded_stack_entry(e) for e in zombie_frames_to_render]\n \n # encode in a JSON-friendly format now, in order to prevent ill\n # effects of aliasing later down the line ...\n encoded_globals = {}\n cur_globals_dict = get_user_globals(\n tos[0], at_global_scope=(self.curindex <= 1))\n for (k, v) in cur_globals_dict.items():\n if self.should_hide_var(k):\n continue\n \n encoded_val = self.encoder.encode(v, self.get_parent_of_function)\n encoded_globals[k] = encoded_val\n \n if k not in self.all_globals_in_order:\n self.all_globals_in_order.append(k)\n \n # filter out globals that don't exist at this execution point\n # (because they've been, say, deleted with 'del')\n ordered_globals = [\n e for e in self.all_globals_in_order if e in encoded_globals]\n assert len(ordered_globals) == len(encoded_globals)\n \n # merge zombie_encoded_stack_locals and encoded_stack_locals\n # into one master ordered list using some simple rules for\n # making it look aesthetically pretty\n stack_to_render = []\n \n # first push all regular stack entries\n if encoded_stack_locals:\n for e in encoded_stack_locals:\n e['is_zombie'] = False\n e['is_highlighted'] = False\n stack_to_render.append(e)\n \n # highlight the top-most active stack entry\n stack_to_render[0]['is_highlighted'] = True\n \n # now push all zombie stack entries\n for e in zombie_encoded_stack_locals:\n # don't display return value for zombie frames\n # TODO: reconsider ...\n '''\n try:\n e['ordered_varnames'].remove('__return__')\n except ValueError:\n pass\n '''\n \n e['is_zombie'] = True\n e['is_highlighted'] = False # never highlight zombie entries\n \n stack_to_render.append(e)\n \n # now sort by frame_id since that sorts frames in \"chronological\n # order\" based on the order they were invoked\n stack_to_render.sort(key=lambda e: e['frame_id'])\n \n # create a unique hash for this stack entry, so that the\n # frontend can uniquely identify it when doing incremental\n # rendering. the strategy is to use a frankenstein-like mix of the\n # relevant fields to properly disambiguate closures and recursive\n # calls to the same function\n for e in stack_to_render:\n hash_str = e['func_name']\n # frame_id is UNIQUE, so it can disambiguate recursive calls\n hash_str += '_f' + str(e['frame_id'])\n \n # needed to refresh GUI display ...\n if e['is_parent']:\n hash_str += '_p'\n \n # TODO: this is no longer needed, right? (since frame_id is unique)\n # if e['parent_frame_id_list']:\n # hash_str += '_p' + '_'.join([str(i) for i in e['parent_frame_id_list']])\n if e['is_zombie']:\n hash_str += '_z'\n \n e['unique_hash'] = hash_str\n \n # handle probe_exprs *before* encoding the heap with self.encoder.get_heap\n encoded_probe_vals = {}\n if self.probe_exprs:\n if top_frame: # are we in a function call?\n top_frame_locals = get_user_locals(top_frame)\n else:\n top_frame_locals = {}\n for e in self.probe_exprs:\n try:\n # evaluate it with globals + locals of the top frame ...\n probe_val = eval(e, cur_globals_dict, top_frame_locals)\n encoded_probe_vals[e] = self.encoder.encode(\n probe_val, self.get_parent_of_function)\n except:\n pass # don't encode the value if there's been an error\n \n if self.show_only_outputs:\n trace_entry = dict(line=lineno,\n event=event_type,\n func_name=tos[0].f_code.co_name,\n globals={},\n ordered_globals=[],\n stack_to_render=[],\n heap={},\n stdout=self.get_user_stdout())\n else:\n trace_entry = dict(line=lineno,\n event=event_type,\n func_name=tos[0].f_code.co_name,\n globals=encoded_globals,\n ordered_globals=ordered_globals,\n stack_to_render=stack_to_render,\n heap=self.encoder.get_heap(),\n stdout=self.get_user_stdout())\n if encoded_probe_vals:\n trace_entry['probe_exprs'] = encoded_probe_vals\n \n # optional column numbers for greater precision\n # (only relevant in Py2crazy, a hacked CPython that supports column numbers)\n if self.crazy_mode:\n # at the very least, grab the column number\n trace_entry['column'] = frame.f_colno\n \n # now try to find start_col and extent\n # (-1 is an invalid instruction index)\n if frame.f_lasti >= 0:\n key = (frame.f_code.co_code, frame.f_lineno,\n frame.f_colno, frame.f_lasti)\n if key in self.bytecode_map:\n v = self.bytecode_map[key]\n trace_entry['expr_start_col'] = v.start_col\n trace_entry['expr_width'] = v.extent\n trace_entry['opcode'] = v.opcode\n \n # set a 'custom_module_name' field if we're executing in a module\n # that's not the __main__ script:\n if topframe_module != \"__main__\":\n trace_entry['custom_module_name'] = topframe_module\n \n # if there's an exception, then record its info:\n if event_type == 'exception':\n # always check in f_locals\n exc = frame.f_locals['__exception__']\n trace_entry['exception_msg'] = exc[0].__name__ + ': ' + str(exc[1])\n \n # append to the trace only the breakpoint line and the next\n # executed line, so that if you set only ONE breakpoint, OPT shows\n # the state before and after that line gets executed.\n append_to_trace = True\n if self.breakpoints:\n if not ((lineno in self.breakpoints) or (self.prev_lineno in self.breakpoints)):\n append_to_trace = False\n \n # TRICKY -- however, if there's an exception, then ALWAYS\n # append it to the trace, so that the error can be displayed\n if event_type == 'exception':\n append_to_trace = True\n \n self.prev_lineno = lineno\n \n if append_to_trace:\n self.trace.append(trace_entry)\n \n # sanity check to make sure the state of the world at a 'call' instruction\n # is identical to that at the instruction immediately following it ...\n '''\n if len(self.trace) > 1:\n cur = self.trace[-1]\n prev = self.trace[-2]\n if prev['event'] == 'call':\n assert cur['globals'] == prev['globals']\n for (s1, s2) in zip(cur['stack_to_render'], prev['stack_to_render']):\n assert s1 == s2\n assert cur['heap'] == prev['heap']\n assert cur['stdout'] == prev['stdout']\n '''\n \n if len(self.trace) >= MAX_EXECUTED_LINES:\n self.trace.append(dict(event='instruction_limit_reached', exception_msg='Stopped after running ' + str(\n MAX_EXECUTED_LINES) + ' steps. Please shorten your code,\\nsince Python Tutor is not designed to handle long-running code.'))\n self.force_terminate()\n \n self.forget()\n \n def _runscript(self, script_str):\n self.executed_script = script_str\n self.executed_script_lines = self.executed_script.splitlines()\n \n for (i, line) in enumerate(self.executed_script_lines):\n line_no = i + 1\n # subtle -- if the stripped line starts with '#break', that\n # means it may be a commented-out version of a normal Python\n # 'break' statement, which shouldn't be confused with an\n # OPT user-defined breakpoint!\n #\n # TODO: this still fails when someone writes something like\n # '##break' since it doesn't start with '#break'!!! i just\n # picked an unfortunate name that's also a python keyword :0\n if line.endswith(BREAKPOINT_STR) and not line.strip().startswith(BREAKPOINT_STR):\n self.breakpoints.append(line_no)\n \n if line.startswith(PYTUTOR_HIDE_STR):\n hide_vars = line[len(PYTUTOR_HIDE_STR):]\n # remember to call strip() -> compileGlobMatch()\n hide_vars = [compileGlobMatch(e.strip())\n for e in hide_vars.split(',')]\n self.vars_to_hide.update(hide_vars)\n \n if line.startswith(PYTUTOR_INLINE_TYPE_STR):\n listed_types = line[len(PYTUTOR_INLINE_TYPE_STR):]\n # remember to call strip() -> compileGlobMatch()\n listed_types = [compileGlobMatch(\n e.strip()) for e in listed_types.split(',')]\n self.types_to_inline.update(listed_types)\n \n # populate an extent map to get more accurate ranges from code\n if self.crazy_mode:\n # in Py2crazy standard library as Python-2.7.5/Lib/super_dis.py\n import super_dis\n try:\n self.bytecode_map = super_dis.get_bytecode_map(\n self.executed_script)\n except:\n # failure oblivious\n self.bytecode_map = {}\n \n # When bdb sets tracing, a number of call and line events happens\n # BEFORE debugger even reaches user's code (and the exact sequence of\n # events depends on python version). So we take special measures to\n # avoid stopping before we reach the main script (see user_line and\n # user_call for details).\n self._wait_for_mainpyfile = 1\n \n # ok, let's try to sorta 'sandbox' the user script by not\n # allowing certain potentially dangerous operations.\n user_builtins = {}\n \n # ugh, I can't figure out why in Python 2, __builtins__ seems to\n # be a dict, but in Python 3, __builtins__ seems to be a module,\n # so just handle both cases ... UGLY!\n if type(__builtins__) is dict:\n builtin_items = __builtins__.items()\n else:\n assert type(__builtins__) is types.ModuleType\n builtin_items = []\n for k in dir(__builtins__):\n builtin_items.append((k, getattr(__builtins__, k)))\n \n for (k, v) in builtin_items:\n if k == 'open' and not self.allow_all_modules: # put this before BANNED_BUILTINS\n user_builtins[k] = open_wrapper\n elif k in BANNED_BUILTINS:\n user_builtins[k] = create_banned_builtins_wrapper(k)\n elif k == '__import__' and not self.allow_all_modules:\n user_builtins[k] = __restricted_import__\n else:\n if k == 'raw_input':\n user_builtins[k] = raw_input_wrapper\n elif k == 'input':\n if is_python3:\n # Python 3 input() is Python 2 raw_input()\n user_builtins[k] = raw_input_wrapper\n else:\n user_builtins[k] = python2_input_wrapper\n else:\n user_builtins[k] = v\n \n user_builtins['mouse_input'] = mouse_input_wrapper\n \n if self.separate_stdout_by_module:\n self.stdout_by_module[\"__main__\"] = StringIO.StringIO()\n if self.custom_modules:\n for module_name in self.custom_modules:\n self.stdout_by_module[module_name] = StringIO.StringIO()\n # catch-all for all other modules we're NOT tracing\n self.stdout_by_module[\"<other>\"] = StringIO.StringIO()\n sys.stdout = self.stdout_by_module[\"<other>\"] # start with <other>\n else:\n # default -- a single unified stdout stream\n self.user_stdout = StringIO.StringIO()\n sys.stdout = self.user_stdout\n \n self.ORIGINAL_STDERR = sys.stderr\n \n # don't do this, or else certain kinds of errors, such as syntax\n # errors, will be silently ignored. WEIRD!\n # sys.stderr = NullDevice # silence errors\n \n user_globals = {}\n \n # if there are custom_modules, 'import' them into user_globals,\n # which emulates \"from <module> import *\"\n if self.custom_modules:\n for mn in self.custom_modules:\n # http://code.activestate.com/recipes/82234-importing-a-dynamically-generated-module/\n new_m = imp.new_module(mn)\n # exec in custom globals\n exec(self.custom_modules[mn], new_m.__dict__)\n user_globals.update(new_m.__dict__)\n \n # important: do this LAST to get precedence over values in custom_modules\n user_globals.update({\"__name__\": \"__main__\",\n \"__builtins__\": user_builtins})\n \n try:\n # if allow_all_modules is on, then try to parse script_str into an\n # AST, traverse the tree to find all modules that it imports, and then\n # try to PRE-IMPORT all of those. if we *don't* pre-import a module,\n # then when it's imported in the user's code, it may take *forever*\n # because the bdb debugger tries to single-step thru that code\n # (i think!). run 'import pandas' to quickly test this.\n if self.allow_all_modules:\n import ast\n try:\n all_modules_to_preimport = []\n tree = ast.parse(script_str)\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for n in node.names:\n all_modules_to_preimport.append(n.name)\n elif isinstance(node, ast.ImportFrom):\n all_modules_to_preimport(node.module)\n \n for m in all_modules_to_preimport:\n if m in script_str: # optimization: load only modules that appear in script_str\n try:\n __import__(m)\n except ImportError:\n pass\n except:\n pass\n \n # enforce resource limits RIGHT BEFORE running script_str\n \n # set ~200MB virtual memory limit AND a 5-second CPU time\n # limit (tuned for Webfaction shared hosting) to protect against\n # memory bombs such as:\n # x = 2\n # while True: x = x*x\n if resource_module_loaded and (not self.disable_security_checks):\n assert not self.allow_all_modules # <-- shouldn't be on!\n \n # PREEMPTIVELY import all of these modules, so that when the user's\n # script imports them, it won't try to do a file read (since they've\n # already been imported and cached in memory). Remember that when\n # the user's code runs, resource.setrlimit(resource.RLIMIT_NOFILE, (0, 0))\n # will already be in effect, so no more files can be opened.\n for m in ALLOWED_STDLIB_MODULE_IMPORTS:\n if m in script_str: # optimization: load only modules that appear in script_str\n try:\n __import__(m)\n except ImportError:\n pass\n \n resource.setrlimit(resource.RLIMIT_AS, (200000000, 200000000))\n resource.setrlimit(resource.RLIMIT_CPU, (5, 5))\n \n # protect against unauthorized filesystem accesses ...\n # no opened files allowed\n resource.setrlimit(resource.RLIMIT_NOFILE, (0, 0))\n \n # VERY WEIRD. If you activate this resource limitation, it\n # ends up generating an EMPTY trace for the following program:\n # \"x = 0\\nfor i in range(10):\\n x += 1\\n print x\\n x += 1\\n\"\n # (at least on my Webfaction hosting with Python 2.7)\n # resource.setrlimit(resource.RLIMIT_FSIZE, (0, 0)) # (redundancy for paranoia)\n \n # The posix module is a built-in and has a ton of OS access\n # facilities ... if you delete those functions from\n # sys.modules['posix'], it seems like they're gone EVEN IF\n # someone else imports posix in a roundabout way. Of course,\n # I don't know how foolproof this scheme is, though.\n # (It's not sufficient to just \"del sys.modules['posix']\";\n # it can just be reimported without accessing an external\n # file and tripping RLIMIT_NOFILE, since the posix module\n # is baked into the python executable, ergh. Actually DON'T\n # \"del sys.modules['posix']\", since re-importing it will\n # refresh all of the attributes. ergh^2)\n for a in dir(sys.modules['posix']):\n delattr(sys.modules['posix'], a)\n # do the same with os\n for a in dir(sys.modules['os']):\n # 'path' is needed for __restricted_import__ to work\n # and 'stat' is needed for some errors to be reported properly\n if a not in ('path', 'stat'):\n delattr(sys.modules['os'], a)\n # ppl can dig up trashed objects with gc.get_objects()\n import gc\n for a in dir(sys.modules['gc']):\n delattr(sys.modules['gc'], a)\n del sys.modules['gc']\n \n # sys.modules contains an in-memory cache of already-loaded\n # modules, so if you delete modules from here, they will\n # need to be re-loaded from the filesystem.\n #\n # Thus, as an extra precaution, remove these modules so that\n # they can't be re-imported without opening a new file,\n # which is disallowed by resource.RLIMIT_NOFILE\n #\n # Of course, this isn't a foolproof solution by any means,\n # and it might lead to UNEXPECTED FAILURES later in execution.\n del sys.modules['os']\n del sys.modules['os.path']\n del sys.modules['sys']\n \n self.run(script_str, user_globals, user_globals)\n # sys.exit ...\n except SystemExit:\n # sys.exit(0)\n raise bdb.BdbQuit\n except:\n if DEBUG:\n traceback.print_exc()\n \n trace_entry = dict(event='uncaught_exception')\n \n (exc_type, exc_val, exc_tb) = sys.exc_info()\n if hasattr(exc_val, 'lineno'):\n trace_entry['line'] = exc_val.lineno\n if hasattr(exc_val, 'offset'):\n trace_entry['offset'] = exc_val.offset\n \n trace_entry['exception_msg'] = type(\n exc_val).__name__ + \": \" + str(exc_val)\n \n # SUPER SUBTLE! if ANY exception has already been recorded by\n # the program, then DON'T record it again as an uncaught_exception.\n # This looks kinda weird since the exact exception message doesn't\n # need to match up, but in practice, there should be at most only\n # ONE exception per trace.\n already_caught = False\n for e in self.trace:\n if e['event'] == 'exception':\n already_caught = True\n break\n \n if not already_caught:\n if not self.done:\n self.trace.append(trace_entry)\n \n raise bdb.BdbQuit # need to forceably STOP execution\n \n def force_terminate(self):\n # self.finalize()\n raise bdb.BdbQuit # need to forceably STOP execution\n \n def finalize(self):\n sys.stdout = self.GAE_STDOUT # very important!\n sys.stderr = self.ORIGINAL_STDERR\n \n assert len(self.trace) <= (MAX_EXECUTED_LINES + 1)\n \n # don't do this anymore ...\n '''\n # filter all entries after 'return' from '<module>', since they\n # seem extraneous:\n res = []\n for e in self.trace:\n res.append(e)\n if e['event'] == 'return' and e['func_name'] == '<module>':\n break\n '''\n \n res = self.trace\n \n # if the SECOND to last entry is an 'exception'\n # and the last entry is return from <module>, then axe the last\n # entry, for aesthetic reasons :)\n if len(res) >= 2 and \\\n res[-2]['event'] == 'exception' and \\\n res[-1]['event'] == 'return' and res[-1]['func_name'] == '<module>':\n res.pop()\n \n self.trace = res\n \n if self.custom_modules:\n # when there's custom_modules, call with a dict as the first parameter\n return self.finalizer_func(dict(main_code=self.executed_script,\n custom_modules=self.custom_modules),\n self.trace)\n else:\n # common case\n return self.finalizer_func(self.executed_script, self.trace)\n \n \n import json\n \n # the MAIN meaty function!!!\n \n \n def exec_script_str(script_str, raw_input_lst_json, options_json, finalizer_func):\n if options_json:\n options = json.loads(options_json)\n else:\n # defaults\n options = {'cumulative_mode': False,\n 'heap_primitives': False, 'show_only_outputs': False}\n \n py_crazy_mode = ('py_crazy_mode' in options and options['py_crazy_mode'])\n \n logger = PGLogger(options['cumulative_mode'], options['heap_primitives'], options['show_only_outputs'], finalizer_func,\n crazy_mode=py_crazy_mode)\n \n # TODO: refactor these NOT to be globals\n global input_string_queue\n input_string_queue = []\n if raw_input_lst_json:\n # TODO: if we want to support unicode, remove str() cast\n input_string_queue = [str(e) for e in json.loads(raw_input_lst_json)]\n \n try:\n logger._runscript(script_str)\n except bdb.BdbQuit:\n pass\n finally:\n logger.finalize()\n \n \n # disables security check and returns the result of finalizer_func\n # WARNING: ONLY RUN THIS LOCALLY and never over the web, since\n # security checks are disabled\n #\n # [optional] probe_exprs is a list of strings representing\n # expressions whose values to probe at each step (advanced)\n def exec_script_str_local(script_str, raw_input_lst_json, cumulative_mode, heap_primitives, finalizer_func,\n probe_exprs=None, allow_all_modules=False):\n logger = PGLogger(cumulative_mode, heap_primitives, False, finalizer_func,\n disable_security_checks=True,\n allow_all_modules=allow_all_modules,\n probe_exprs=probe_exprs)\n \n # TODO: refactor these NOT to be globals\n global input_string_queue\n input_string_queue = []\n if raw_input_lst_json:\n # TODO: if we want to support unicode, remove str() cast\n input_string_queue = [str(e) for e in json.loads(raw_input_lst_json)]\n \n try:\n logger._runscript(script_str)\n except bdb.BdbQuit:\n pass\n finally:\n return logger.finalize()\n \n ##----- End pg_logger.py -----------------------------------------------------##\n return locals()\n\n #JUPMAN: DONT NEED THIS: \n #@modulize('generate_json_trace')\n def _generate_json_trace(__name__):\n ##----- Begin generate_json_trace.py -----------------------------------------##\n # Generates a JSON trace that is compatible with the js/pytutor.ts frontend\n \n import sys\n import pg_logger\n import json\n from optparse import OptionParser\n \n # To make regression tests work consistently across platforms,\n # standardize display of floats to 3 significant figures\n #\n # Trick from:\n # http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module\n json.encoder.FLOAT_REPR = lambda f: ('%.3f' % f)\n \n \n def json_finalizer(input_code, output_trace):\n ret = dict(code=input_code, trace=output_trace)\n # sort_keys=True leads to printing in DETERMINISTIC order, but might\n # screw up some old tests ... however, there is STILL non-determinism\n # in Python 3.3 tests, ugh!\n #\n # TODO: for Python 3.6, think about reinstating sort_keys=True as a\n # command-line option for tests only? maybe don't activate it for reals\n # since that might falsely give users the impression that object/dict keys\n # are always sorted\n json_output = json.dumps(ret, indent=INDENT_LEVEL)\n return json_output\n \n \n def js_var_finalizer(input_code, output_trace):\n global JS_VARNAME\n ret = dict(code=input_code, trace=output_trace)\n json_output = json.dumps(ret, indent=None)\n return \"var %s = %s;\" % (JS_VARNAME, json_output)\n \n \n \n parser = OptionParser(usage=\"Generate JSON trace for pytutor\")\n parser.add_option('-c', '--cumulative', default=False, action='store_true',\n help='output cumulative trace.')\n parser.add_option('-p', '--heapPrimitives', default=False, action='store_true',\n help='render primitives as heap objects.')\n parser.add_option('-o', '--compact', default=False, action='store_true',\n help='output compact trace.')\n parser.add_option('--allmodules', default=False, action='store_true',\n help='allow importing of all installed Python modules.')\n parser.add_option('-i', '--input', default=False, action='store',\n help='JSON list of strings for simulated raw_input.', dest='raw_input_lst_json')\n parser.add_option(\"--create_jsvar\", dest=\"js_varname\", default=None,\n help=\"Create a JavaScript variable out of the trace\")\n parser.add_option(\"--code\", dest=\"usercode\", default=None,\n help=\"Load user code from a string instead of a file and output compact JSON\")\n parser.add_option(\"--probe-exprs\", dest=\"probe_exprs_json\", default=None,\n help=\"A JSON list of strings representing expressions whose values to probe at each step (advanced)\")\n \n (options, args) = parser.parse_args()\n INDENT_LEVEL = None if options.compact else 2 \n\n if options.usercode:\n INDENT_LEVEL = None\n \n probe_exprs = None\n if options.probe_exprs_json:\n probe_exprs = json.loads(options.probe_exprs_json)\n \n allow_all_modules = False\n if options.allmodules:\n allow_all_modules = True\n \n print(pg_logger.exec_script_str_local(options.usercode,\n options.raw_input_lst_json,\n options.cumulative,\n options.heapPrimitives,\n json_finalizer,\n probe_exprs=probe_exprs,\n allow_all_modules=allow_all_modules))\n else:\n fin = sys.stdin if args[0] == \"-\" else open(args[0])\n if options.js_varname:\n JS_VARNAME = options.js_varname\n print(pg_logger.exec_script_str_local(fin.read(), options.raw_input_lst_json,\n options.cumulative, options.heapPrimitives, js_var_finalizer))\n else:\n print(pg_logger.exec_script_str_local(fin.read(), options.raw_input_lst_json,\n options.cumulative, options.heapPrimitives, json_finalizer))\n \n ##----- End generate_json_trace.py -------------------------------------------##\n return locals()\n\n \n\n # JUPMAN HERE IS THE ACTUAL FUNCTION CODE ---------------------------\n \n import pg_logger\n import json\n \n # To make regression tests work consistently across platforms,\n # standardize display of floats to 3 significant figures\n #\n # Trick from:\n # http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module\n json.encoder.FLOAT_REPR = lambda f: ('%.3f' % f)\n \n \n def json_finalizer(input_code, output_trace):\n ret = dict(code=input_code, trace=output_trace)\n # sort_keys=True leads to printing in DETERMINISTIC order, but might\n # screw up some old tests ... however, there is STILL non-determinism\n # in Python 3.3 tests, ugh!\n #\n # TODO: for Python 3.6, think about reinstating sort_keys=True as a\n # command-line option for tests only? maybe don't activate it for reals\n # since that might falsely give users the impression that object/dict keys\n # are always sorted\n json_output = json.dumps(ret, indent=INDENT_LEVEL)\n return json_output\n \n \n class JmPytutOptions: \n def __init__(self):\n self.raw_input_lst_json=False\n self.cumulative = False\n self.heapPrimitives = False\n self.probe_exprs_json=None \n self.allow_all_modules = False\n\n options = JmPytutOptions()\n \n INDENT_LEVEL = None\n\n probe_exprs = None\n if options.probe_exprs_json:\n probe_exprs = json.loads(options.probe_exprs_json)\n \n return pg_logger.exec_script_str_local(jm_code,\n options.raw_input_lst_json,\n options.cumulative,\n options.heapPrimitives,\n json_finalizer,\n probe_exprs=probe_exprs,\n allow_all_modules=options.allow_all_modules)\n \n \ndef pytut():\n \"\"\" Embeds a Python Tutor in the output of the current Jupyter cell,\n Code to execute is taken from *current* cell stripped from \n the call to pytut() itself. \n\n - The GUI will also be shown on the built Sphinx website.\n - Does *not* requires internet connection\n - ... and yes, implementation is very hacky\n\n Author: David Leoni <[email protected]>\n \"\"\"\n #Hacky way to get variables from stack, but if we use %run -i we don't need it.\n import inspect\n notebook_globals = inspect.stack()[1][0].f_globals\n code = notebook_globals[\"In\"][-1]\n\n i = code.find('jupman.pytut()')\n \n if i == -1:\n i = code.find('pytut()')\n call_text = 'pytut()'\n else:\n call_text = 'jupman.pytut()'\n \n if i != -1: # check \n extra = code[i + len(call_text):]\n if len(extra.strip()) > 0:\n print(\"ERROR: the call to jupman.pytut() must be the last in the cell, found instead this code afterwards: \\n%s\" % extra)\n return\n\n new_code = code.replace('jupman.pytut()', '').replace('pytut()', '')\n \n # ' \\n' IS FUNDAMENTAL TO PREVENT WEIRD BUGS IN JUPYTER !!!!\n # see https://github.com/DavidLeoni/jupman/issues/25\n new_code = ' \\n'+new_code.strip() \n\n if len(new_code.strip()) == 0:\n print(\"\"\"\n Nothing to show ! You have to put ALL the code IN THE SAME cell as pytut()\n right before its call. \n \n For example: \n\n x = 5\n y = x + 3\n jupman.pytut()\n \"\"\")\n return\n \n import urllib\n from IPython.display import display, HTML\n\n trace = pytut_json(new_code) \n \n import uuid\n div_id = 'jm'+str(uuid.uuid4())\n json_id = 'json-' + div_id\n \n relpath = detect_relpath(notebook_globals[\"In\"]) \n \n inject = \"\"\n \n # will end up reloading multiple times the script, not very efficient \n inject += \"\"\"\n <script src=\"%s_static/js/pytutor-embed.bundle.min.js\" type=\"application/javascript\"></script>\n \"\"\" % relpath\n \n inject += \"\"\" \n <script id=\"%s\" type=\"application/json\" >\n %s\n </script>\n <div id=\"%s\" class=\"pytutorVisualizer\"> </div>\n\"\"\" % (json_id, trace, div_id)\n inject += \"\"\" \n <style>\n .vizLayoutTd {\n background-color: #fff !important;\n }\n \n #pyStdout {\n min-height:25px;\n }\n\n /* 'Edit this code' link, hiding because replaces browser tab !!!*/\n #editCodeLinkDiv {\n display:none; \n }\n </style> \n \"\"\"\n inject += \"\"\" \n <script>\n (function(){\n\n var trace = JSON.parse(document.getElementById('%s').innerHTML); \n // NOTE 1: id without #\n // NOTE 2 - maybe there are more predictable ways, but this will work anyway\n // - id should be number\n visualizerIdOverride = Math.trunc(Math.random() * 100000000000)\n addVisualizerToPage(trace, '%s',{'embeddedMode' : false,\n 'visualizerIdOverride':visualizerIdOverride}) \n \n \n // set overflow for pytuts - need to do in python as css \n // does not allow parent selection\n var pytuts = $('.pytutorVisualizer')\n pytuts.closest('div.output_html.rendered_html.output_result')\n .css('overflow-x', 'visible')\n \n //pytuts.closest('div.output_html.rendered_html.output_result')\n // .css('background-color','red') \n \n })()\n </script>\n \n \"\"\" % (json_id, div_id) \n \n return HTML(inject)\n" ]
[ [ "matplotlib.pyplot.imshow", "pandas.plotting.table", "matplotlib.pyplot.subplots", "matplotlib.image.imread", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mofeing/autoray
[ "f6e53939297f15fe7387418e4f89d2eff8bcf0ef" ]
[ "tests/test_autoray.py" ]
[ "import importlib\n\nimport pytest\n\nimport autoray as ar\n\n\n# find backends to tests\nBACKENDS = [pytest.param(\"numpy\")]\nfor lib in [\"cupy\", \"dask\", \"tensorflow\", \"torch\", \"mars\", \"jax\", \"sparse\"]:\n if importlib.util.find_spec(lib):\n BACKENDS.append(pytest.param(lib))\n\n if lib == \"jax\":\n import os\n from jax.config import config\n\n config.update(\"jax_enable_x64\", True)\n config.update(\"jax_platform_name\", \"cpu\")\n os.environ[\"XLA_PYTHON_CLIENT_ALLOCATOR\"] = \"platform\"\n\n else:\n BACKENDS.append(\n pytest.param(\n lib, marks=pytest.mark.skipif(True, reason=f\"No {lib}.\")\n )\n )\n\n\nJAX_RANDOM_KEY = None\n\n\ndef gen_rand(shape, backend, dtype=\"float64\"):\n\n if \"complex\" in dtype:\n re = gen_rand(shape, backend)\n im = gen_rand(shape, backend)\n return ar.astype(ar.do(\"complex\", re, im), dtype)\n\n if backend == \"jax\":\n from jax import random as jrandom\n\n global JAX_RANDOM_KEY\n\n if JAX_RANDOM_KEY is None:\n JAX_RANDOM_KEY = jrandom.PRNGKey(42)\n JAX_RANDOM_KEY, subkey = jrandom.split(JAX_RANDOM_KEY)\n\n return jrandom.uniform(subkey, shape=shape, dtype=dtype)\n\n elif backend == \"sparse\":\n x = ar.do(\n \"random.uniform\",\n size=shape,\n like=backend,\n density=0.5,\n format=\"coo\",\n fill_value=0,\n )\n\n else:\n x = ar.do(\"random.uniform\", size=shape, like=backend)\n\n x = ar.astype(x, ar.to_backend_dtype(dtype, backend))\n assert ar.get_dtype_name(x) == dtype\n return x\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\"fn\", [\"sqrt\", \"exp\", \"sum\"])\ndef test_basic(backend, fn):\n if (backend == \"ctf\") and fn in (\"sqrt\", \"sum\"):\n pytest.xfail(\"ctf doesn't have sqrt, and converts sum output to numpy\")\n\n x = gen_rand((2, 3, 4), backend)\n y = ar.do(fn, x)\n if (backend == \"sparse\") and (fn == \"sum\"):\n pytest.xfail(\"Sparse 'sum' outputs dense.\")\n assert ar.infer_backend(x) == ar.infer_backend(y) == backend\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\n \"fn,args\",\n [\n (ar.conj, []),\n (ar.transpose, []),\n (ar.real, []),\n (ar.imag, []),\n (ar.reshape, [(5, 3)]),\n ],\n)\ndef test_attribute_prefs(backend, fn, args):\n if (backend == \"torch\") and fn in (ar.real, ar.imag):\n pytest.xfail(\"Pytorch doesn't support complex numbers yet...\")\n\n x = gen_rand((3, 5), backend)\n y = fn(x, *args)\n assert ar.infer_backend(x) == ar.infer_backend(y) == backend\n\n\ndef modified_gram_schmidt(X):\n\n Q = []\n for j in range(0, X.shape[0]):\n\n q = X[j, :]\n for i in range(0, j):\n rij = ar.do(\"tensordot\", ar.do(\"conj\", Q[i]), q, 1)\n q = q - rij * Q[i]\n\n rjj = ar.do(\"linalg.norm\", q, 2)\n Q.append(q / rjj)\n\n return ar.do(\"stack\", Q, axis=0)\n\n\[email protected](\"backend\", BACKENDS)\ndef test_mgs(backend):\n if backend == \"sparse\":\n pytest.xfail(\"Sparse doesn't support linear algebra yet...\")\n if backend == \"ctf\":\n pytest.xfail(\"ctf does not have 'stack' function.\")\n x = gen_rand((3, 5), backend)\n Ux = modified_gram_schmidt(x)\n y = ar.do(\"sum\", Ux @ ar.dag(Ux))\n assert ar.to_numpy(y) == pytest.approx(3)\n\n\ndef modified_gram_schmidt_np_mimic(X):\n from autoray import numpy as np\n\n print(np)\n\n Q = []\n for j in range(0, X.shape[0]):\n\n q = X[j, :]\n for i in range(0, j):\n rij = np.tensordot(np.conj(Q[i]), q, 1)\n q = q - rij * Q[i]\n\n rjj = np.linalg.norm(q, 2)\n Q.append(q / rjj)\n\n return np.stack(Q, axis=0)\n\n\[email protected](\"backend\", BACKENDS)\ndef test_mgs_np_mimic(backend):\n if backend == \"sparse\":\n pytest.xfail(\"Sparse doesn't support linear algebra yet...\")\n if backend == \"ctf\":\n pytest.xfail(\"ctf does not have 'stack' function.\")\n x = gen_rand((3, 5), backend)\n Ux = modified_gram_schmidt_np_mimic(x)\n y = ar.do(\"sum\", Ux @ ar.dag(Ux))\n assert ar.to_numpy(y) == pytest.approx(3)\n\n\[email protected](\"backend\", BACKENDS)\ndef test_linalg_svd_square(backend):\n if backend == \"sparse\":\n pytest.xfail(\"Sparse doesn't support linear algebra yet...\")\n x = gen_rand((5, 4), backend)\n U, s, V = ar.do(\"linalg.svd\", x)\n assert (\n ar.infer_backend(x)\n == ar.infer_backend(U)\n == ar.infer_backend(s)\n == ar.infer_backend(V)\n == backend\n )\n y = U @ ar.do(\"diag\", s, like=x) @ V\n diff = ar.do(\"sum\", abs(y - x))\n assert ar.to_numpy(diff) < 1e-8\n\n\[email protected](\"backend\", BACKENDS)\ndef test_translator_random_uniform(backend):\n from autoray import numpy as anp\n\n if backend == \"sparse\":\n pytest.xfail(\"Sparse will have zeros\")\n\n x = anp.random.uniform(low=-10, size=(4, 5), like=backend)\n assert (ar.to_numpy(x) > -10).all()\n assert (ar.to_numpy(x) < 1.0).all()\n\n # test default single scalar\n x = anp.random.uniform(low=1000, high=2000, like=backend)\n assert 1000 <= ar.to_numpy(x) < 2000\n\n\[email protected](\"backend\", BACKENDS)\ndef test_translator_random_normal(backend):\n if backend == \"ctf\":\n pytest.xfail()\n\n from autoray import numpy as anp\n\n x = anp.random.normal(100.0, 0.1, size=(4, 5), like=backend)\n\n if backend == \"sparse\":\n assert (x.data > 90.0).all()\n assert (x.data < 110.0).all()\n return\n\n assert (ar.to_numpy(x) > 90.0).all()\n assert (ar.to_numpy(x) < 110.0).all()\n\n if backend == \"tensorflow\":\n x32 = ar.do(\n \"random.normal\",\n 100.0,\n 0.1,\n dtype=\"float32\",\n size=(4, 5),\n like=backend,\n )\n assert x32.dtype == \"float32\"\n assert (ar.to_numpy(x32) > 90.0).all()\n assert (ar.to_numpy(x32) < 110.0).all()\n\n # test default single scalar\n x = anp.random.normal(loc=1500, scale=10, like=backend)\n assert 1000 <= ar.to_numpy(x) < 2000\n\n\[email protected](\"backend\", BACKENDS)\ndef test_tril(backend):\n x = gen_rand((4, 4), backend)\n xl = ar.do(\"tril\", x)\n xln = ar.to_numpy(xl)\n assert xln[0, 1] == 0.0\n if backend != \"sparse\":\n # this won't work for sparse because density < 1\n assert (xln > 0.0).sum() == 10\n xl = ar.do(\"tril\", x, k=1)\n xln = ar.to_numpy(xl)\n if backend != \"sparse\":\n # this won't work for sparse because density < 1\n assert xln[0, 1] != 0.0\n assert xln[0, 2] == 0.0\n if backend != \"sparse\":\n # this won't work for sparse because density < 1\n assert (xln > 0.0).sum() == 13\n\n if backend == \"tensorflow\":\n with pytest.raises(ValueError):\n ar.do(\"tril\", x, -1)\n\n\[email protected](\"backend\", BACKENDS)\ndef test_triu(backend):\n x = gen_rand((4, 4), backend)\n xl = ar.do(\"triu\", x)\n xln = ar.to_numpy(xl)\n assert xln[1, 0] == 0.0\n if backend != \"sparse\":\n # this won't work for sparse because density < 1\n assert (xln > 0.0).sum() == 10\n xl = ar.do(\"triu\", x, k=-1)\n xln = ar.to_numpy(xl)\n if backend != \"sparse\":\n # this won't work for sparse because density < 1\n assert xln[1, 0] != 0.0\n assert xln[2, 0] == 0.0\n if backend != \"sparse\":\n # this won't work for sparse because density < 1\n assert (xln > 0.0).sum() == 13\n\n if backend == \"tensorflow\":\n with pytest.raises(ValueError):\n ar.do(\"triu\", x, 1)\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\"shape\", [(4, 3), (4, 4), (3, 4)])\ndef test_qr_thin_square_fat(backend, shape):\n if backend == \"sparse\":\n pytest.xfail(\"Sparse doesn't support linear algebra yet...\")\n x = gen_rand(shape, backend)\n Q, R = ar.do(\"linalg.qr\", x)\n xn, Qn, Rn = map(ar.to_numpy, (x, Q, R))\n assert ar.do(\"allclose\", xn, Qn @ Rn)\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\"array_dtype\", [\"int\", \"float\", \"bool\"])\ndef test_count_nonzero(backend, array_dtype):\n\n if backend == \"mars\":\n import mars\n\n if mars._version.version_info < (0, 4, 0, \"\"):\n pytest.xfail(\"mars count_nonzero bug fixed in version 0.4.\")\n if backend == \"ctf\" and array_dtype == \"bool\":\n pytest.xfail(\"ctf doesn't support bool array dtype\")\n\n if array_dtype == \"int\":\n x = ar.do(\"array\", [0, 1, 2, 0, 3], like=backend)\n elif array_dtype == \"float\":\n x = ar.do(\"array\", [0.0, 1.0, 2.0, 0.0, 3.0], like=backend)\n elif array_dtype == \"bool\":\n x = ar.do(\"array\", [False, True, True, False, True], like=backend)\n nz = ar.do(\"count_nonzero\", x)\n assert ar.to_numpy(nz) == 3\n\n\ndef test_pseudo_submodules():\n x = gen_rand((2, 3), \"numpy\")\n xT = ar.do(\"numpy.transpose\", x, like=\"autoray\")\n assert xT.shape == (3, 2)\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\"creation\", [\"ones\", \"zeros\"])\[email protected](\n \"dtype\", [\"float32\", \"float64\", \"complex64\", \"complex128\"]\n)\ndef test_dtype_specials(backend, creation, dtype):\n import numpy as np\n\n x = ar.do(creation, shape=(2, 3), like=backend)\n\n if backend == \"torch\" and \"complex\" in dtype:\n pytest.xfail(\"Pytorch doesn't support complex numbers yet...\")\n\n x = ar.astype(x, dtype)\n assert ar.get_dtype_name(x) == dtype\n x = ar.to_numpy(x)\n assert isinstance(x, np.ndarray)\n assert ar.get_dtype_name(x) == dtype\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\"real_dtype\", [\"float32\", \"float64\"])\ndef test_complex_creation(backend, real_dtype):\n if backend == \"torch\":\n pytest.xfail(\"Pytorch doesn't support complex numbers yet...\")\n if (backend == \"sparse\") and (real_dtype == \"float32\"):\n pytest.xfail(\n \"Bug in sparse where single precision isn't maintained \"\n \"after scalar multiplication.\"\n )\n\n if (backend == \"ctf\") and (real_dtype == \"float32\"):\n pytest.xfail(\n \"ctf currently doesn't preserve single precision when \"\n \"multiplying by python scalars.\"\n )\n\n x = ar.do(\n \"complex\",\n ar.astype(\n ar.do(\"random.uniform\", size=(3, 4), like=backend), real_dtype\n ),\n ar.astype(\n ar.do(\"random.uniform\", size=(3, 4), like=backend), real_dtype\n ),\n )\n assert (\n ar.get_dtype_name(x)\n == {\"float32\": \"complex64\", \"float64\": \"complex128\"}[real_dtype]\n )\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\n \"dtype_in,dtype_out\",\n [\n (\"float32\", \"float32\"),\n (\"float64\", \"float64\"),\n (\"complex64\", \"float32\"),\n (\"complex128\", \"float64\"),\n ],\n)\ndef test_real_imag(backend, dtype_in, dtype_out):\n x = gen_rand((3, 4), backend, dtype_in)\n\n re = ar.do(\"real\", x)\n im = ar.do(\"imag\", x)\n\n assert ar.infer_backend(re) == backend\n assert ar.infer_backend(im) == backend\n\n assert ar.get_dtype_name(re) == dtype_out\n assert ar.get_dtype_name(im) == dtype_out\n\n assert ar.do(\"allclose\", ar.to_numpy(x).real, ar.to_numpy(re))\n assert ar.do(\"allclose\", ar.to_numpy(x).imag, ar.to_numpy(im))\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\n \"dtype\", [\"float32\", \"float64\", \"complex64\", \"complex128\"],\n)\ndef test_linalg_solve(backend, dtype):\n if backend == \"sparse\":\n pytest.xfail(\"Sparse doesn't support linear algebra yet...\")\n\n A = gen_rand((4, 4), backend, dtype)\n b = gen_rand((4, 1), backend, dtype)\n x = ar.do(\"linalg.solve\", A, b)\n assert ar.do(\"allclose\", ar.to_numpy(A @ x), ar.to_numpy(b),\n rtol=1e-3, atol=1e-6)\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\n \"dtype\", [\"float32\", \"float64\", \"complex64\", \"complex128\"],\n)\ndef test_linalg_eigh(backend, dtype):\n if backend == \"sparse\":\n pytest.xfail(\"sparse doesn't support linalg.eigh yet.\")\n if backend == \"dask\":\n pytest.xfail(\"dask doesn't support linalg.eigh yet.\")\n if backend == \"mars\":\n pytest.xfail(\"mars doesn't support linalg.eigh yet.\")\n if (backend == \"torch\") and (\"complex\" in dtype):\n pytest.xfail(\"Pytorch doesn't fully support complex yet.\")\n\n A = gen_rand((4, 4), backend, dtype)\n A = A + ar.dag(A)\n el, ev = ar.do(\"linalg.eigh\", A)\n B = (ev * ar.reshape(el, (1, -1))) @ ar.dag(ev)\n assert ar.do(\"allclose\", ar.to_numpy(A), ar.to_numpy(B), rtol=1e-3)\n\n\[email protected](\"backend\", BACKENDS)\ndef test_pad(backend):\n if backend == \"sparse\":\n pytest.xfail(\"sparse doesn't support linalg.eigh yet.\")\n if backend == \"mars\":\n pytest.xfail(\"mars doesn't support linalg.eigh yet.\")\n\n A = gen_rand((3, 4, 5), backend)\n\n for pad_width, new_shape in [\n # same pad before and after for every axis\n (2, (7, 8, 9)),\n # same pad for every axis\n (((1, 2),), (6, 7, 8)),\n # different pad for every axis\n (((4, 3), (2, 4), (3, 2)), (10, 10, 10)),\n ]:\n B = ar.do(\"pad\", A, pad_width)\n assert B.shape == new_shape\n assert ar.to_numpy(ar.do(\"sum\", A)) == pytest.approx(\n ar.to_numpy(ar.do(\"sum\", B))\n )\n\n\[email protected](\"backend\", BACKENDS)\ndef test_register_function(backend):\n x = ar.do(\"ones\", shape=(2, 3), like=backend)\n\n def direct_fn(x):\n return 1\n\n # first test we can provide the function directly\n ar.register_function(backend, \"test_register\", direct_fn)\n assert ar.do(\"test_register\", x) == 1\n\n def wrap_fn(fn):\n def new_fn(*args, **kwargs):\n res = fn(*args, **kwargs)\n return res + 1\n\n return new_fn\n\n # then check we can wrap the old (previous) function\n ar.register_function(backend, \"test_register\", wrap_fn, wrap=True)\n assert ar.do(\"test_register\", x) == 2\n\n\[email protected](\"backend\", BACKENDS)\ndef test_take(backend):\n if backend == \"sparse\":\n pytest.xfail(\"sparse doesn't support take yet\")\n num_inds = 4\n A = gen_rand((2, 3, 4), backend)\n if backend == \"jax\": # gen_rand doesn't work with ints for JAX\n ind = gen_rand((num_inds,), \"numpy\", dtype=\"int64\")\n else:\n ind = gen_rand((num_inds,), backend, dtype=\"int64\")\n\n # Take along axis 1, and check if result makes sense\n B = ar.do(\"take\", A, ind, axis=1)\n assert B.shape == (2, 4, 4)\n for i in range(num_inds):\n assert ar.do(\n \"allclose\", ar.to_numpy(A[:, ind[0], :]), ar.to_numpy(B[:, 0, :])\n )\n assert ar.infer_backend(A) == ar.infer_backend(B)\n\n\[email protected](\"backend\", BACKENDS)\ndef test_concatenate(backend):\n mats = [gen_rand((2, 3, 4), backend) for _ in range(3)]\n\n # Concatenate along axis 1, check if shape is correct\n # also check if automatically inferring backend works\n mats_concat1 = ar.do(\"concatenate\", mats, axis=1)\n mats_concat2 = ar.do(\"concatenate\", mats, axis=1, like=backend)\n assert mats_concat1.shape == mats_concat2.shape == (2, 9, 4)\n assert (\n backend\n == ar.infer_backend(mats_concat1)\n == ar.infer_backend(mats_concat2)\n )\n\n\[email protected](\"backend\", BACKENDS)\ndef test_stack(backend):\n mats = [gen_rand((2, 3, 4), backend) for _ in range(3)]\n\n # stack, creating a new axis (at position 0)\n # also check if automatically inferring backend works\n mats_stack1 = ar.do(\"stack\", mats)\n mats_stack2 = ar.do(\"stack\", mats, like=backend)\n assert mats_stack1.shape == mats_stack2.shape == (3, 2, 3, 4)\n assert (\n backend\n == ar.infer_backend(mats_stack1)\n == ar.infer_backend(mats_stack2)\n )\n\n\[email protected](\"backend\", BACKENDS)\ndef test_einsum(backend):\n if backend == \"sparse\":\n pytest.xfail(\"sparse doesn't support einsum yet\")\n A = gen_rand((2, 3, 4), backend)\n B = gen_rand((3, 4, 2), backend)\n C1 = ar.do(\"einsum\", \"ijk,jkl->il\", A, B, like=backend)\n C2 = ar.do(\"einsum\", \"ijk,jkl->il\", A, B)\n if backend not in (\"torch\", \"tensorflow\"): # this syntax is not supported\n C3 = ar.do(\"einsum\", A, [0, 1, 2], B, [1, 2, 3], [0, 3])\n else:\n C3 = C1\n C4 = ar.do(\"reshape\", A, (2, 12)) @ ar.do(\"reshape\", B, (12, 2))\n\n assert C1.shape == C2.shape == C3.shape == (2, 2)\n assert ar.do(\"allclose\", ar.to_numpy(C1), ar.to_numpy(C4))\n assert ar.do(\"allclose\", ar.to_numpy(C2), ar.to_numpy(C4))\n assert ar.do(\"allclose\", ar.to_numpy(C3), ar.to_numpy(C4))\n assert (\n ar.infer_backend(C1)\n == ar.infer_backend(C2)\n == ar.infer_backend(C3)\n == ar.infer_backend(C4)\n == backend\n )\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\"int_or_section\", [\"int\", \"section\"])\ndef test_split(backend, int_or_section):\n if backend == \"sparse\":\n pytest.xfail(\"sparse doesn't support split yet\")\n if backend == \"dask\":\n pytest.xfail(\"dask doesn't support split yet\")\n A = ar.do(\"ones\", (10, 20, 10), like=backend)\n if int_or_section == \"section\":\n sections = [2, 4, 14]\n splits = ar.do(\"split\", A, sections, axis=1)\n assert len(splits) == 4\n assert splits[3].shape == (10, 6, 10)\n else:\n splits = ar.do(\"split\", A, 5, axis=2)\n assert len(splits) == 5\n assert splits[2].shape == (10, 20, 2)\n\n\[email protected](\"backend\", BACKENDS)\ndef test_where(backend):\n if backend == \"sparse\":\n pytest.xfail(\"sparse doesn't support where yet\")\n A = ar.do(\"arange\", 10, like=backend)\n B = ar.do(\"arange\", 10, like=backend) + 1\n C = ar.do(\"stack\", [A, B])\n D = ar.do(\"where\", C < 5)\n if backend == \"dask\":\n for x in D:\n x.compute_chunk_sizes()\n for x in D:\n assert ar.to_numpy(x).shape == (9,)\n\n\[email protected](\"backend\", BACKENDS)\[email protected](\"dtype_str\", [\"float32\", \"float64\"])\[email protected](\n \"fn\", [\"random.normal\", \"random.uniform\", \"zeros\", \"ones\", \"eye\"]\n)\[email protected](\"str_or_backend\", (\"str\", \"backend\"))\ndef test_dtype_kwarg(backend, dtype_str, fn, str_or_backend):\n if str_or_backend == \"str\":\n dtype = dtype_str\n else:\n dtype = ar.to_backend_dtype(dtype_str, like=backend)\n\n if fn in (\"random.normal\", \"random.uniform\"):\n A = ar.do(fn, size=(10, 5), dtype=dtype, like=backend)\n elif fn in (\"zeros\", \"ones\"):\n A = ar.do(fn, shape=(10, 5), dtype=dtype, like=backend)\n else: # fn = 'eye'\n A = ar.do(fn, 10, dtype=dtype, like=backend)\n assert A.shape == (10, 10)\n A = ar.do(fn, 10, 5, dtype=dtype, like=backend)\n assert A.shape == (10, 5)\n assert ar.get_dtype_name(A) == dtype_str\n\n\[email protected](\"backend\", BACKENDS)\ndef test_get_common_dtype(backend):\n x = ar.do(\"ones\", (1,), like=backend, dtype='complex64')\n y = ar.do(\"ones\", (1,), like=backend, dtype='float64')\n assert ar.get_common_dtype(x, y) == 'complex128'\n\n\[email protected](\"backend\", BACKENDS)\ndef test_backend_like(backend):\n assert ar.get_backend() is None\n ar.set_backend('test')\n assert ar.get_backend() == 'test'\n ar.set_backend(None)\n assert ar.get_backend() is None\n with ar.backend_like(backend):\n assert ar.get_backend() == backend\n x = ar.do(\"ones\", (2,), like=backend)\n assert ar.infer_backend(x) == backend\n assert ar.get_backend() is None\n" ]
[ [ "numpy.conj", "numpy.linalg.norm", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Quitino/Non-LinearFactor-VIOSLAM
[ "f958a7480de35420479defaa829b6aa0c8a31b26" ]
[ "scripts/verify_dataset.py" ]
[ "#!/usr/bin/env python3\n\nimport sys\nimport math\nimport os\n\nimport numpy as np\n\ndataset_path = sys.argv[1]\n\nprint(dataset_path)\n\ntimestamps = {}\nexposures = {}\n\nfor sensor in ['cam0', 'cam1', 'imu0']:\n data = np.loadtxt(dataset_path + '/mav0/' + sensor + '/data.csv', usecols=[0], delimiter=',', dtype=np.int64)\n timestamps[sensor] = data\n\n# check if dataset is OK...\nfor key, value in timestamps.items():\n times = value * 1e-9\n min_t = times.min()\n max_t = times.max()\n interval = max_t - min_t\n diff = times[1:] - times[:-1]\n print('==========================================')\n print('sensor', key)\n print('min timestamp', min_t)\n print('max timestamp', max_t)\n print('interval', interval)\n print('hz', times.shape[0] / interval)\n print('min time between consecutive msgs', diff.min())\n print('max time between consecutive msgs', diff.max())\n for i, d in enumerate(diff):\n # Note: 0.001 is just a hacky heuristic, since we have nothing faster than 1000Hz. Should maybe be topic-specific.\n if d < 0.001:\n print(\"ERROR: Difference on consecutive measurements too small: {} - {} = {}\".format(times[i + 1], times[i],\n d) + ' in sensor ' + key)\n\n# check if we have all images for timestamps\ntimestamp_to_topic = {}\n\nfor key, value in timestamps.items():\n if not key.startswith('cam'):\n continue\n for v in value:\n if v not in timestamp_to_topic:\n timestamp_to_topic[v] = list()\n timestamp_to_topic[v].append(key)\n\nfor key in timestamp_to_topic.keys():\n if len(timestamp_to_topic[key]) != 2:\n print('timestamp', key, 'has topics', timestamp_to_topic[key])\n\n# check image data.\nimg_extensions = ['.png', '.jpg', '.webp']\nfor key, value in timestamps.items():\n if not key.startswith('cam'):\n continue\n for v in value:\n path = dataset_path + '/mav0/' + key + '/data/' + str(v)\n img_exists = False\n for e in img_extensions:\n if os.path.exists(dataset_path + '/mav0/' + key + '/data/' + str(v) + e):\n img_exists = True\n\n if not img_exists: \n print('No image data for ' + key + ' at timestamp ' + str(v))\n \n exposure_file = dataset_path + '/mav0/' + key + '/exposure.csv'\n if not os.path.exists(exposure_file):\n print('No exposure data for ' + key)\n continue\n \n exposure_data = np.loadtxt(exposure_file, delimiter=',', dtype=np.int64)\n for v in value:\n idx = np.searchsorted(exposure_data[:, 0], v)\n if exposure_data[idx, 0] != v:\n print('No exposure data for ' + key + ' at timestamp ' + str(v))\n" ]
[ [ "numpy.loadtxt", "numpy.searchsorted" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nicaogr/wildcat_box
[ "0d321dac85d4edbe615a3c609501a688f162f799", "0d321dac85d4edbe615a3c609501a688f162f799" ]
[ "wildcat/demo_watercolor.py", "wildcat/trainResNet50_MLP2_IconArt_v1.py" ]
[ "import argparse\n\nimport torch\nimport torch.nn as nn\n\nfrom wildcat.engine import MultiLabelMAPEngine\nfrom wildcat.models import resnet101_wildcat\nfrom wildcat.watercolor import watercolorClassification\n\nparser = argparse.ArgumentParser(description='WILDCAT Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset (e.g. ../data/')\nparser.add_argument('--image-size', '-i', default=224, type=int,\n metavar='N', help='image size (default: 224)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=16, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--lrp', '--learning-rate-pretrained', default=0.1, type=float,\n metavar='LR', help='learning rate for pre-trained layers')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=0, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--k', default=1, type=float,\n metavar='N', help='number of regions (default: 1)')\nparser.add_argument('--alpha', default=1, type=float,\n metavar='N', help='weight for the min regions (default: 1)')\nparser.add_argument('--maps', default=1, type=int,\n metavar='N', help='number of maps per class (default: 1)')\n\n\ndef main_watercolor():\n global args, best_prec1, use_gpu\n args = parser.parse_args()\n\n use_gpu = torch.cuda.is_available()\n\n # define dataset\n train_dataset = watercolorClassification(args.data, 'train')\n val_dataset = watercolorClassification(args.data, 'test')\n num_classes = 6\n\n # load model\n model = resnet101_wildcat(num_classes, pretrained=True, kmax=args.k, alpha=args.alpha, num_maps=args.maps)\n print('classifier', model.classifier)\n print('spatial pooling', model.spatial_pooling)\n\n # define loss function (criterion)\n criterion = nn.MultiLabelSoftMarginLoss()\n\n # define optimizer\n optimizer = torch.optim.SGD(model.get_config_optim(args.lr, args.lrp),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n state = {'batch_size': args.batch_size, 'image_size': args.image_size, 'max_epochs': args.epochs,\n 'evaluate': args.evaluate, 'resume': args.resume}\n state['difficult_examples'] = True\n state['save_model_path'] = '../expes/models/watercolor/'\n\n engine = MultiLabelMAPEngine(state)\n engine.learning(model, criterion, train_dataset, val_dataset, optimizer)\n\n\nif __name__ == '__main__':\n main_watercolor()\n", "import argparse\n\nimport torch\nimport torch.nn as nn\n\nfrom wildcat.engine import MultiLabelMAPEngine\nimport torchvision.models as models\nfrom wildcat.IconArt_v1 import main_IconArt_v1Classification\nfrom wildcat.boxesPredict import object_localization\n\nfrom wildcat.tf_faster_rcnn.lib.datasets.factory import get_imdb\n\nfrom shutil import copyfile\nfrom PIL import Image, ImageDraw, ImageFont\nimport numpy as np\nfrom torch.autograd import Variable\nfrom wildcat.LatexOuput import arrayToLatex\nimport matplotlib.pyplot as plt\nfrom wildcat.util import draw_bboxes\n\nimport torchvision.transforms as transforms\nfrom wildcat.util import AveragePrecisionMeter, Warp\n\nobject_categories = ['angel','Child_Jesus', 'crucifixion_of_Jesus',\n 'Mary','nudity', 'ruins','Saint_Sebastien']\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='WILDCAT Training')\n parser.add_argument('data', metavar='DIR',\n help='path to dataset (e.g. ../data/')\n parser.add_argument('--image_size', '-i', default=224, type=int,\n metavar='N', help='image size (default: 224)')\n parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n parser.add_argument('-b', '--batch_size', default=16, type=int,\n metavar='N', help='mini-batch size (default: 256)')\n parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\n parser.add_argument('--lrp', '--learning-rate-pretrained', default=0.1, type=float,\n metavar='LR', help='learning rate for pre-trained layers')\n parser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\n parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n parser.add_argument('--print-freq', '-p', default=0, type=int,\n metavar='N', help='print frequency (default: 10)')\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n parser.add_argument('--k', default=1, type=float,\n metavar='N', help='number of regions (default: 1)')\n parser.add_argument('--alpha', default=1, type=float,\n metavar='N', help='weight for the min regions (default: 1)')\n parser.add_argument('--maps', default=1, type=int,\n metavar='N', help='number of maps per class (default: 1)')\n parser.add_argument('--kernel_size', default=1, type=int,\n metavar='N', help='kernel size in the last layer (default: 1)')\n parser.add_argument('--kernel_size_lcp', default=1, type=int,\n metavar='N', help='kernel size in the last layer for LCP pooling (default: 1)')\n parser.add_argument('--test', action=\"store_true\",\n help='Use this command to eval the detection performance of the model')\n parser.add_argument('--classif', action=\"store_true\",\n help='Use this command to eval the classification performance of the model')\n parser.add_argument('--plot', action=\"store_true\",\n help='Use this command to plot the bounding boxes.')\n parser.add_argument('--att', action=\"store_true\",\n help='Use this command to use the attention model.')\n parser.add_argument('--same_kernel', action=\"store_true\",\n help='Use this command to have the same kernels weights and biases on all the maps.')\n parser.add_argument('--save_init_model', action=\"store_true\",\n help='Use this command to save the model before optimization.')\n parser.add_argument('--ext', default='', type=str,\n help='Extension added to the name of the model saved (default: '')')\n parser.add_argument('--mode', default='', type=str,\n choices=['','Direct','LCP','LCPPReLU','LCPRReLU'],\n help='Modification of the default WILDCAT algo to have different kernel learned (default: '')')\n parser.add_argument('--init', default='', type=str,\n choices=['','uniform_div_std_maps','xavier_uniform','kaiming_uniform','orthogonal'],\n help='Modification of the default WILDCAT algo to have different kernel learned (default: '')')\n return(parser)\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, model, num_classes):\n super(ResNet, self).__init__()\n\n self.num_classes = num_classes\n\n self.features = nn.Sequential(\n model.conv1,\n model.bn1,\n model.relu,\n model.maxpool,\n model.layer1,\n model.layer2,\n model.layer3,\n model.layer4)\n\n # Pooling \n self.spatial_pooling = nn.AvgPool2d(7)\n # suppose x is your feature map with size N*C*H*W\n #x = torch.mean(x.view(x.size(0), x.size(1), -1), dim=2)\n # now x is of size N*C\n\n # classification layer\n num_features = model.layer4[1].conv1.in_channels\n input_size = 2048 # number of features from the ResNet50\n hidden_size = 256\n self.classifier = nn.Sequential(nn.Linear(input_size,hidden_size),\n nn.ReLU(),nn.Linear(hidden_size, num_classes),nn.Sigmoid())\n\n # image normalization\n self.image_normalization_mean = [0.485, 0.456, 0.406]\n self.image_normalization_std = [0.229, 0.224, 0.225]\n\n def forward(self, x):\n x = self.features(x)\n x = self.spatial_pooling(x)\n x = x.view(-1,2048)\n x = self.classifier(x)\n return x\n\n def get_config_optim(self, lr, lrp):\n return [{'params': self.features.parameters(), 'lr': lr * lrp},\n {'params': self.classifier.parameters()},\n {'params': self.spatial_pooling.parameters()}]\n\ndef resnet50_model(num_classes, pretrained=True):\n model = models.resnet50(pretrained)\n \n return ResNet(model, num_classes)\n\ndef main():\n global args, best_prec1, use_gpu\n parser = get_parser()\n args = parser.parse_args()\n train_or_test_IconArt_v1(args)\n\ndef train_or_test_IconArt_v1(args):\n\n model_name_base = 'ResNet50_MLP2_model_im'+str(args.image_size)+'_bs'+str(args.batch_size)+\\\n '_lrp'+str(args.lrp)+'_lr'+str(args.lr)+'_ep'+str(args.epochs)+'_k'+str(args.k)+\\\n '_a'+str(args.alpha)+'_m'+str(args.maps)\n \n model_name = model_name_base+'.pth.tar'\n \n use_gpu = torch.cuda.is_available()\n\n if not(args.test) and not(args.classif):\n print(\"Training\")\n\n # define dataset\n train_dataset = main_IconArt_v1Classification(args.data, 'trainval')\n val_dataset = main_IconArt_v1Classification(args.data, 'trainval')\n num_classes = 7\n\n # load model\n model = resnet50_model(num_classes, pretrained=True)\n\n # define loss function (criterion)\n criterion = nn.MultiLabelSoftMarginLoss()\n\n # define optimizer\n optimizer = torch.optim.SGD(model.get_config_optim(args.lr, args.lrp),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n state = {'batch_size': args.batch_size, 'image_size': args.image_size, 'max_epochs': args.epochs,\n 'evaluate': args.evaluate, 'resume': args.resume}\n state['difficult_examples'] = True\n state['save_model_path'] = 'expes/models/IconArt_v1/'\n engine = MultiLabelMAPEngine(state)\n engine.learning(model, criterion, train_dataset, val_dataset, \n optimizer)\n\n # Copy the checkpoint with a new name\n \n path = state['save_model_path']\n src = path + 'model_best.pth.tar'\n dst = path + model_name\n copyfile(src, dst)\n else:\n print(\"Testing\")\n PATH = 'expes/models/IconArt_v1/'+model_name\n state = {'batch_size': args.batch_size, 'image_size': args.image_size, 'max_epochs': args.epochs,\n 'evaluate': args.evaluate, 'resume': PATH}\n state['difficult_examples'] = True\n state['save_model_path'] = 'expes/models/IconArt_v1/'\n use_gpu = torch.cuda.is_available()\n state['use_gpu'] = use_gpu\n\n with_gt = False\n multiscale = False\n num_classes = 7\n model = resnet50_model(num_classes, pretrained=True)\n model.train(False)\n state_dict_all = torch.load(PATH)\n best_epoch = state_dict_all[\"epoch\"]\n best_score = state_dict_all[\"best_score\"]\n state_dict = state_dict_all[\"state_dict\"]\n model.load_state_dict(state_dict)\n\n #classwise_feature_maps = []\n #def hook(module, input1, output2):\n #classwise_feature_maps.append(output2)\n\n #model.spatial_pooling.class_wise.register_forward_hook(hook)\n\n normalize = transforms.Normalize(mean=model.image_normalization_mean,\n std=model.image_normalization_std)\n image_transform = transforms.Compose([\n Warp(args.image_size),\n transforms.ToTensor(),\n normalize,])\n\n if use_gpu:\n model = model.to('cuda')\n #model = deepcopy(model).cuda()\n #model.load_state_dict(torch.load(PATH))\n #model.eval()\n #model.get_saved_model()\n #model = utils.load_model_iconart(model_path, multiscale=multiscale, scale=560)\n\n case = 'WILDCAT detections '\n if multiscale:\n case += ' Multiscale '\n if with_gt:\n case += ' With Ground Truth classification '\n print('===',case,'===')\n\n val_dataset = main_IconArt_v1Classification(args.data, 'test')\n val_dataset.transform = image_transform\n criterion = nn.MultiLabelSoftMarginLoss()\n # MultiLabelSoftMarginLoss seems to be the use of sigmoid on the outpur of the model and then the sum over classes of the binary cross entropy loss\n\t\t# https://pytorch.org/docs/stable/nn.html\n \n engine = MultiLabelMAPEngine(state)\n #engine.get_saved_model()\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=state['batch_size'], shuffle=False,\n num_workers=state['workers'])\n if args.classif:\n engine.validate(val_loader, model, criterion)\n if not(args.test):\n return(0)\n\n if args.test:\n print('-------- test detection --------')\n database = 'IconArt_v1_test'\n imdb = get_imdb('IconArt_v1_test')\n imdb.set_use_diff(True)\n dont_use_07_metric = True\n imdb.set_force_dont_use_07_metric(dont_use_07_metric)\n\n max_per_image = 100\n num_images_detect = len(imdb.image_index)\n all_boxes_order = [[[] for _ in range(num_images_detect)] for _ in range(imdb.num_classes)]\n \n plot = args.plot\n if plot:\n plt.ion()\n import pathlib\n folder = '/media/HDD/output_exp/WILDCAT/'+ 'WILDCAT_'+model_name_base+'/'\n pathlib.Path(folder).mkdir(parents=True, exist_ok=True)\n Itera = 500\n for i in range(num_images_detect):\n image_name = imdb.image_path_at(i)\n if i%Itera==0:\n print(i,' : ',image_name)\n with torch.no_grad():\n image_raw = Image.open(image_name).convert(\"RGB\")\n image_normalized = image_transform(image_raw).float()\n #image_normalized = torch.from_numpy(np.array(image_raw)).permute(2, 0, 1)\n if use_gpu:\n #image_normalized= image_normalized.cuda()\n #image_normalized= image_normalized.float().div(255)\n image_normalized= image_normalized.cuda()\n # This model need a RGB image scale between 0 and 1 reduce the mean if ImageNet\n #image_normalized = image_normalized.index_select(0, torch.LongTensor([2,1,0]).cuda())\n #image_normalized = (image_normalized - torch.Tensor([0.485, 0.456, 0.406]).cuda().view(3, 1, 1))\n input_var = Variable(image_normalized.unsqueeze(0))\n #input_var = Variable(image_normalized.unsqueeze(0), volatile=True)\n if not(with_gt):\n gt_labels_minus1 = None\n else:\n image_name_split = image_name.split('/')[-1]\n image_name_split = image_name_split.split('.')[0]\n gt_labels = np.unique(imdb._load_pascal_annotation(image_name_split)['gt_classes'])\n #print('gt_labels',gt_labels)\n gt_labels_minus1 = gt_labels -1 # Background as class 0\n #gt_labels_minus = None\n\n #preds, labels = object_localization(model_dict, input_var, location_type='bbox',gt_labels=None)\n #print('labels',labels)\n #print('labels.cpu().numpy()',labels.cpu().numpy())\n \n preds, labels = object_localization(model, input_var, location_type='bbox',\n gt_labels=gt_labels_minus1,size=args.image_size)\n #print('gt_labels_minus1',gt_labels_minus1)\n #print('labels',labels)\n #print('preds',preds)\n\n # We need to resize the boxes to the real size of the image that have been wrap to (args.image_size, args.image_size)\n x_,y_,_ = np.array(image_raw).shape # when you use cv2 the x and y are inverted\n x_scale = x_ / args.image_size\n y_scale = y_ / args.image_size\n #print(x_scale,y_scale)\n for ii,box in enumerate(preds):\n #print(x_,y_)\n #print(box)\n (classe,origLeft, origTop, origRight, origBottom,score) = box\n x = origLeft * x_scale\n y = origTop * y_scale\n xmax = origRight * x_scale\n ymax = origBottom * y_scale\n preds[ii] = [classe,x, y, xmax, ymax,score]\n #print(preds[ii])\n\n if plot:\n preds_np =np.array(preds)\n if not(len(preds_np)==0):\n inds = np.where(np.array(preds_np)[:,-1]>0.)\n if not(len(inds)==0):\n inds = inds[0]\n preds_plot = preds_np[inds,:]\n labels_plot = preds_plot[:,0]\n img = Image.open(image_name)\n #img_resized = img.resize((args.image_size, args.image_size), Image.ANTIALIAS)\n img_draw = draw_bboxes(img, preds_plot, object_categories)\n plt.imshow(img_draw)\n tmp = image_name.split('/')[-1]\n name_output = folder + tmp.split('.')[0] +'_Regions.jpg'\n plt.axis('off')\n plt.tight_layout()\n #plt.show()\n plt.savefig(name_output, dpi=300)\n #input('wait')\n\n for j in range(len(preds)):\n index_c = preds[j][0]+1\n if len(all_boxes_order[index_c][i])==0:\n all_boxes_order[index_c][i] = np.array([preds[j][1:]])\n else:\n all_boxes_order[index_c][i] = np.vstack((preds[j][1:],all_boxes_order[index_c][i]))\n #if not(with_gt):\n # for c in labels:\n # all_boxes_order[c+1][i] = np.array(all_boxes_order[c+1][i])\n #else: # gt cases\n # for c in labels:\n # all_boxes_order[c+1][i] = np.array(all_boxes_order[c+1][i])\n\n output_dir = 'tmp/'\n aps = imdb.evaluate_detections(all_boxes_order, output_dir)\n apsAt05 = aps\n print(\"Detection score (thres = 0.5): \",database)\n print(arrayToLatex(aps,per=True))\n ovthresh_tab = [0.3,0.1,0.]\n for ovthresh in ovthresh_tab:\n aps = imdb.evaluate_localisation_ovthresh(all_boxes_order, output_dir,ovthresh)\n if ovthresh == 0.1:\n apsAt01 = aps\n print(\"Detection score with thres at \",ovthresh)\n print(arrayToLatex(aps,per=True))\n #imdb.set_use_diff(True) # Modification of the use_diff attribute in the imdb\n #aps = imdb.evaluate_detections(all_boxes_order, output_dir)\n #print(\"Detection score with the difficult element\")\n #print(arrayToLatex(aps,per=True))\n #imdb.set_use_diff(False)\n\n\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.MultiLabelSoftMarginLoss", "torch.cuda.is_available" ], [ "numpy.array", "torch.nn.Sequential", "matplotlib.pyplot.imshow", "matplotlib.pyplot.tight_layout", "torch.load", "torch.nn.MultiLabelSoftMarginLoss", "torch.utils.data.DataLoader", "matplotlib.pyplot.savefig", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.no_grad", "torch.cuda.is_available", "matplotlib.pyplot.axis", "torch.nn.ReLU", "matplotlib.pyplot.ion", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fengwang/aydin
[ "22d7db71de168510bd3aa98fc22384deb8d9916f" ]
[ "aydin/features/groups/uniform.py" ]
[ "from functools import reduce\nfrom operator import mul\nfrom typing import Tuple, Union, Sequence\n\nimport numexpr\nimport numpy\nfrom numba import jit, cuda\nfrom numba.cuda import CudaSupportError\n\nfrom aydin.features.groups.base import FeatureGroupBase\n\n\nfrom aydin.util.array.nd import nd_range_radii\nfrom aydin.util.fast_shift.fast_shift import fast_shift\nfrom aydin.util.fast_uniform_filter.numba_cpu_uf import numba_cpu_uniform_filter\nfrom aydin.util.fast_uniform_filter.parallel_uf import parallel_uniform_filter\nfrom aydin.util.log.log import lprint\n\n\n# Removes duplicates without changing list's order:\ndef _remove_duplicates(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\n\nclass UniformFeatures(FeatureGroupBase):\n \"\"\"\n Uniform Feature Group class\n \"\"\"\n\n def __init__(\n self,\n kernel_widths=None,\n kernel_scales=None,\n kernel_shapes=None,\n min_level=0,\n max_level=13,\n include_scale_one=False,\n include_fine_features=True,\n include_corner_features=False,\n include_line_features=False,\n decimate_large_scale_features=True,\n extend_large_scale_features=False,\n scale_one_width=3,\n dtype=numpy.float32,\n ):\n super().__init__()\n\n # Setting up default features:\n # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n if kernel_widths is None:\n kernel_widths = []\n if include_scale_one:\n kernel_widths += [scale_one_width]\n if include_fine_features:\n kernel_widths += [3, 3, 3]\n if include_line_features:\n kernel_widths += [3, 3, 3]\n kernel_widths += [3] * 10\n if kernel_scales is None:\n kernel_scales = []\n if include_scale_one:\n kernel_scales += [1]\n if include_fine_features:\n kernel_scales += [3, 5, 7]\n if include_line_features:\n kernel_scales += [3, 5, 7]\n kernel_scales += [2 ** i - 1 for i in range(2, 12)]\n if kernel_shapes is None:\n kernel_shapes = []\n if include_scale_one:\n kernel_shapes += ['li']\n if include_fine_features:\n special_feature_shape = '#linc' if include_corner_features else '#l1nc'\n kernel_shapes += [special_feature_shape] * 3\n if include_line_features:\n kernel_shapes += ['|l1'] * 3\n\n prefix = '*' if extend_large_scale_features else ''\n\n kernel_shapes += [prefix + 'l2'] * 3\n\n if decimate_large_scale_features:\n kernel_shapes += [prefix + 'l1nc'] * 4 + [prefix + 'l1oc'] * 3\n else:\n kernel_shapes += [prefix + 'l1'] * 4 + [prefix + 'l1'] * 3\n\n self.kernel_widths = kernel_widths\n self.kernel_scales = kernel_scales\n self.kernel_shapes = kernel_shapes\n\n self.min_level = min_level\n self.max_level = max_level\n\n self.dtype = dtype\n\n self._feature_descriptions_list = None\n self._size_to_full_feature = None\n\n self.image = None\n self.gpu_image = None\n self.cuda_stream = None\n self.original_dtype = None\n self.excluded_voxels: Sequence[Tuple[int, ...]] = []\n self.args = None\n self.kwargs = None\n\n def _ensure_feature_description_available(self, ndim: int):\n if (\n self._feature_descriptions_list is None\n or len(self._feature_descriptions_list[0][0]) != ndim\n ):\n self._feature_descriptions_list = self._get_feature_descriptions_list(ndim)\n\n def _get_feature_descriptions_list(self, ndim: int):\n \"\"\"\n Get feature descriptions\n\n Parameters\n ----------\n ndim : int\n\n Returns\n -------\n feature_description_list\n\n \"\"\"\n feature_description_list = []\n\n level = 0\n for width, scale, shape in zip(\n self.kernel_widths, self.kernel_scales, self.kernel_shapes\n ):\n # Check if we have passed the max number of features already:\n # Important: We might overshoot a bit, but that's ok to make sure we get all features at a given scale...\n if level >= self.max_level:\n break\n elif level < self.min_level:\n level += 1\n continue\n level += 1\n\n # Computing the radius:\n radius = width // 2\n\n # We compute the radii along the different dimensions:\n radii = list((max(1, radius),) * ndim)\n\n # We generate all feature shift vectors:\n features_shifts = list(nd_range_radii(radii))\n\n # print(f'Feature shifts: {features_shifts}')\n\n # For each feature shift we append to the feature description list:\n for feature_shift in features_shifts:\n\n # Excluding the center pixel/feature:\n if scale == 1 and feature_shift == (0,) * ndim:\n continue\n\n # if scale == 2 and width == 1:\n # effective_shift = feature_shift\n # negative_extent = (0,) * ndim\n # positive_extent = (2,) * ndim\n\n # Different 'shapes' of feature distributions:\n if 'l1' in shape and sum([abs(i) for i in feature_shift]) > radius:\n continue\n elif (\n 'l2' in shape\n and sum([i * i for i in feature_shift]) > radius * radius\n ):\n continue\n elif 'li' in shape:\n pass\n\n # keep only center (oc) or remove all that are center (nc)\n if 'oc' in shape and sum([abs(i) for i in feature_shift]) > 0:\n continue\n elif 'nc' in shape and sum([abs(i) for i in feature_shift]) == 0:\n continue\n\n hscale = scale // 2\n if '#' in shape:\n effective_shift = tuple(0 for _ in feature_shift)\n negative_extent = tuple(\n (hscale if s == 0 else hscale * abs(max(0, s)))\n for d, s in zip(range(ndim), feature_shift)\n )\n positive_extent = tuple(\n (hscale if s == 0 else hscale * abs(min(0, s)))\n for d, s in zip(range(ndim), feature_shift)\n )\n elif '|' in shape:\n effective_shift = tuple(i * hscale for i in feature_shift)\n negative_extent = tuple(\n (hscale if s == 0 else abs(max(0, s)))\n for d, s in zip(range(ndim), feature_shift)\n )\n positive_extent = tuple(\n (hscale if s == 0 else abs(min(0, s)))\n for d, s in zip(range(ndim), feature_shift)\n )\n elif '*' in shape:\n effective_shift = tuple(i * scale for i in feature_shift)\n negative_extent = tuple(\n (\n max(0, hscale * (2 + radius))\n if s == 0\n else (abs(s) * scale - 2 if s > 0 else hscale)\n )\n for d, s in zip(range(ndim), feature_shift)\n )\n positive_extent = tuple(\n (\n max(0, hscale * (2 + radius))\n if s == 0\n else (abs(s) * scale - 2 if s < 0 else hscale)\n )\n for d, s in zip(range(ndim), feature_shift)\n )\n else:\n effective_shift = tuple(i * scale for i in feature_shift)\n negative_extent = (hscale,) * ndim\n positive_extent = (hscale,) * ndim\n\n feature_description = (\n effective_shift,\n negative_extent,\n positive_extent,\n shape,\n )\n\n # Now we check if the feature overlaps with any excluded voxels:\n # if check_for_excluded_voxels(\n # effective_shift, negative_extent, positive_extent, excluded_voxels\n # ):\n # continue\n\n # We append the feature description:\n feature_description_list.append(feature_description)\n\n # Some features might be identical due to the aspect ratio, we eliminate duplicates:\n no_duplicate_feature_description_list = _remove_duplicates(\n feature_description_list\n )\n # We save the last computed feature description list for debug purposes:\n self.debug_feature_description_list = feature_description_list\n # We check and report how many duplicates were eliminated:\n number_of_duplicates = len(feature_description_list) - len(\n no_duplicate_feature_description_list\n )\n\n lprint(f\"Number of duplicate features: {number_of_duplicates}\")\n feature_description_list = no_duplicate_feature_description_list\n return feature_description_list\n\n @property\n def receptive_field_radius(self) -> int:\n radius = 0\n counter = 0\n for width, scale in zip(self.kernel_widths, self.kernel_scales):\n\n if counter > self.max_level:\n break\n\n radius = max(radius, width * scale // 2)\n counter += 1\n\n return radius\n\n def num_features(self, ndim: int) -> int:\n self._ensure_feature_description_available(ndim)\n return len(self._feature_descriptions_list)\n\n def prepare(self, image, excluded_voxels=None, **kwargs):\n if excluded_voxels is None:\n excluded_voxels = []\n\n # Save original image dtype:\n self.original_dtype = image.dtype\n\n # Scipy does not support float16 yet:\n dtype = (\n numpy.float32\n if self.original_dtype == numpy.float16\n else self.original_dtype\n )\n image = image.astype(dtype=dtype, copy=False)\n\n self.image = image\n self.excluded_voxels = excluded_voxels\n self.kwargs = kwargs\n\n # Let's make sure we have the descriptions of the features:\n self._ensure_feature_description_available(image.ndim)\n\n size_to_feature = {}\n for feature_description in self._feature_descriptions_list:\n # Unpacking the description:\n translation, negative_extent, positive_extent, shape = feature_description\n\n # Calculating the uniform feature size:\n size = tuple((n + 1 + s for n, s in zip(negative_extent, positive_extent)))\n\n # Let's check that the feature is not already computed:\n if size not in size_to_feature:\n lprint(f\"Pre-computing uniform filter of size: {size}\")\n # Compute the feature\n feature = self._compute_uniform_filter(image, size=size)\n\n # save feature in cache:\n size_to_feature[size] = feature\n\n self._size_to_full_feature = size_to_feature\n\n def compute_feature(self, index: int, feature):\n\n feature_description = self._feature_descriptions_list[index]\n lprint(\n f\"Uniform feature: {index}, description: {feature_description}, excluded_voxels={self.excluded_voxels}\"\n )\n\n # Unpacking the description:\n translation, negative_extent, positive_extent, shape = feature_description\n\n # Calculating the uniform feature size:\n size = tuple((n + 1 + s for n, s in zip(negative_extent, positive_extent)))\n\n # Fetching the corresponding full feature:\n full_feature = self._size_to_full_feature[size]\n\n # We use the full uniform filter result and modify it accordingly:\n self._translate_and_exclude_center_value(\n self.image, full_feature, feature, feature_description, self.excluded_voxels\n )\n\n def _translate_and_exclude_center_value(\n self, image, feature_in, feature_out, feature_description, excluded_voxels\n ):\n \"\"\"\n It is not recommended to optimise this function as there is some very technical points happening here,\n and the key functions that need optimizing have been externalised anyway (see below).\n \"\"\"\n\n # This function exists to facilitate the implementation of optimised versions of it.\n\n # Unpacking the description:\n translation, negative_extent, positive_extent, shape = feature_description\n\n # Adjust translation given the extents:\n # There is something a bit tricky here, this term: ' + (p - n) '\n # That's needed to take into account the fact that the center of a\n # uniform filter of even shape does not land on a voxel...\n translation_adjusted = tuple(\n (\n -t + (p - n) // 2\n for t, n, p in zip(translation, negative_extent, positive_extent)\n )\n )\n\n # We check that the translation is not trivial:\n if any(abs(t) > 0 for t in translation_adjusted):\n self._translate_image(feature_in, feature_out, translation_adjusted)\n else:\n # If the translation does not translated anything then let's just not translate, right?\n feature_out[...] = feature_in\n\n # We store here the sum of excluded values to be able to substract:\n excluded_values_sum = None\n # And count how many voxels are effectively excluded:\n excluded_count = 0\n\n for excluded_voxel in excluded_voxels:\n # Is the center voxel within the filter footprint?\n center_value_within_footprint = all(\n -n <= e - t <= p\n for e, t, n, p in zip(\n excluded_voxel, translation, negative_extent, positive_extent\n )\n )\n if center_value_within_footprint:\n lprint(f\"excluded voxel: {excluded_voxel}\")\n\n # We increment the exclusion count:\n excluded_count += 1\n\n # Just-in-time allocation:\n if excluded_values_sum is None:\n excluded_values_sum = numpy.zeros_like(image)\n\n # In this case we remove the excluded value, first we:\n if any(abs(ev) > 0 for ev in excluded_voxel):\n excluded_values_sum = self._translate_and_add_image(\n excluded_values_sum, excluded_voxel, image\n )\n else:\n _fast_inplace_add(excluded_values_sum, image)\n\n # We check if we need to touch the feature at all:\n if excluded_count > 0:\n\n # Calculating the uniform feature size:\n size = tuple((n + 1 + s for n, s in zip(negative_extent, positive_extent)))\n\n # we compute the volume of the footprint in voxels:\n footprint_volume = prod(size)\n\n # If we have more than one excluded voxel, we need to be carefull: if the proportion of excluded voxels\n # in a feature becomes too large (> 10%), we need to exclude that feature entirely, otherwise it will be\n # too much difference between the feature with or without excluded voxels, and thus too confusing for the regressor.\n # Some of this comes from empirical evidence.\n num_of_excluded_voxels = len(excluded_voxels)\n exclude_feature_entirely = num_of_excluded_voxels > 1 and (\n excluded_count / footprint_volume > 0.1\n )\n\n if exclude_feature_entirely:\n # If all voxels of the footprint are excluded then the whole feature must be zeroed out:\n feature_out[...] = 0\n else:\n # Then we compute the correction factor so that the feature is the average of the remaining voxels:\n _apply_correction(\n feature_out, excluded_values_sum, footprint_volume, excluded_count\n )\n\n def _compute_uniform_filter(self, image, size):\n \"\"\"\n Override this method to provide an accelerated version\n \"\"\"\n\n def no_cuda_cpu_mode():\n # No CUDA? we use CPU mode instead:\n # Different methods perform differently based on filter size:\n max_size = max(size) if isinstance(size, tuple) else size\n if max_size > 128:\n # Numba scales well for large filter sizes:\n output = numba_cpu_uniform_filter(image, size=size, mode=\"nearest\")\n lprint(f\"Computed filter of size: {size} with Numba\")\n else:\n # Scipy parallel is more efficient for small filter sizes:\n output = parallel_uniform_filter(image, size=size, mode=\"nearest\")\n lprint(f\"Computed filter of size: {size} with parallel scipy\")\n\n return output\n\n if image.size < 1024:\n lprint(\"Image too small, CUDA not needed!\")\n output = no_cuda_cpu_mode()\n else:\n try:\n # No point of using CUDA for very small images!\n # Let's try CUDA first:\n # Note: this is not optimal as the image is pushed to GPU every time...\n from aydin.util.fast_uniform_filter.numba_gpu_uf import (\n numba_gpu_uniform_filter,\n )\n\n if self.cuda_stream is None:\n self.cuda_stream = cuda.stream()\n\n if self.gpu_image is None:\n image = numpy.ascontiguousarray(image)\n self.gpu_image = cuda.to_device(image, stream=self.cuda_stream)\n\n output = numba_gpu_uniform_filter(\n self.gpu_image,\n size=size,\n mode=\"nearest\",\n cuda_stream=self.cuda_stream,\n )\n lprint(f\"Computed filter of size: {size} with CUDA\")\n except Exception as e:\n if isinstance(e, CudaSupportError):\n lprint(\n \"CUDA not supported on this machine, falling back to numba and scipy.\"\n )\n else:\n import sys\n\n error_str = (str(sys.exc_info()[0]) + ', and: ' + str(e)).replace(\n '\\n', ', '\n )\n lprint(\n f\"Cannot use CUDA for computing uniform filter because of: {error_str}\"\n )\n output = no_cuda_cpu_mode()\n\n # Ensure correct type:\n dtype = image.dtype if self.dtype is None else self.dtype\n dtype = numpy.float32 if dtype == numpy.float16 else dtype\n output = output.astype(dtype=dtype, copy=False)\n\n return output\n\n def _translate_and_add_image(self, sum_image, translation, image):\n \"\"\"\n Override this method to provide an accelerated version\n \"\"\"\n fast_shift(image, shift=tuple(translation), output=sum_image, add=True)\n return sum_image\n\n def _translate_image(self, feature_in, feature_out, translation):\n \"\"\"\n Override this method to provide an accelerated version\n \"\"\"\n fast_shift(feature_in, shift=tuple(translation), output=feature_out)\n\n def finish(self):\n # Here we cleanup any resource alocated for the last feature computation:\n self.image = None\n self.excluded_voxels = None\n self.kwargs = None\n self._feature_descriptions_list = None\n self._size_to_full_feature = None\n self.gpu_image = None\n self.cuda_stream = None\n\n\ndef _apply_correction(\n feature, excluded_values_sum, footprint_volume: int, excluded_count: int\n):\n if (\n feature.dtype == numpy.int16\n or feature.dtype == numpy.uint16\n or feature.dtype == numpy.int8\n or feature.dtype == numpy.uint8\n ):\n # feature_float = feature.astype(dtype=numpy.float32, copy=False)\n # _apply_correction_numba(feature_float, excluded_values_sum, footprint_volume, excluded_count)\n # feature[...] = feature_float.astype(dtype=feature.dtype, copy=False)\n alpha = footprint_volume / (footprint_volume - excluded_count)\n beta = -alpha / footprint_volume # noqa: F841\n\n numexpr.evaluate(\n \"alpha*feature + beta*excluded_values_sum\", out=feature, casting='unsafe'\n )\n else:\n _apply_correction_numba(\n feature, excluded_values_sum, footprint_volume, excluded_count\n )\n\n\n@jit(\n nopython=True,\n # parallel=True,\n error_model='numpy',\n fastmath={'contract', 'afn', 'reassoc'},\n)\ndef _apply_correction_numba(\n feature, excluded_values_sum, footprint_volume: int, excluded_count: int\n):\n alpha = footprint_volume / (footprint_volume - excluded_count)\n beta = -alpha / footprint_volume\n\n feature *= alpha\n feature += beta * excluded_values_sum\n\n\n@jit(\n nopython=True,\n parallel=True,\n error_model='numpy',\n fastmath={'contract', 'afn', 'reassoc'},\n)\ndef _fast_inplace_add(a, b):\n a += b\n\n\ndef prod(atuple: Tuple[Union[float, int]]):\n # In python 3.8 there is a prod function in math, until then we have:\n return reduce(mul, atuple)\n" ]
[ [ "numpy.ascontiguousarray", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nineye/pytorch_geometric
[ "a52728dcd8167827a6fa058047190e23d2982d18" ]
[ "examples/graph_saint.py" ]
[ "import os.path as osp\n\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.datasets import Flickr\nfrom torch_geometric.data import GraphSAINTRandomWalkSampler\nfrom torch_geometric.nn import SAGEConv\nfrom torch_geometric.utils import degree\n\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Flickr')\ndataset = Flickr(path)\ndata = dataset[0]\nrow, col = data.edge_index\ndata.edge_attr = 1. / degree(col, data.num_nodes)[col] # Norm by in-degree.\n\nloader = GraphSAINTRandomWalkSampler(data, batch_size=6000, walk_length=2,\n num_steps=5, sample_coverage=1000,\n save_dir=dataset.processed_dir,\n num_workers=0)\n\n\nclass Net(torch.nn.Module):\n def __init__(self, hidden_channels):\n super(Net, self).__init__()\n in_channels = dataset.num_node_features\n out_channels = dataset.num_classes\n self.conv1 = SAGEConv(in_channels, hidden_channels, concat=True)\n self.conv2 = SAGEConv(hidden_channels, hidden_channels, concat=True)\n self.conv3 = SAGEConv(hidden_channels, hidden_channels, concat=True)\n self.lin = torch.nn.Linear(3 * hidden_channels, out_channels)\n\n def set_aggr(self, aggr):\n self.conv1.aggr = aggr\n self.conv2.aggr = aggr\n self.conv3.aggr = aggr\n\n def forward(self, x0, edge_index, edge_weight=None):\n x1 = F.relu(self.conv1(x0, edge_index, edge_weight))\n x1 = F.dropout(x1, p=0.2, training=self.training)\n x2 = F.relu(self.conv2(x1, edge_index, edge_weight))\n x2 = F.dropout(x2, p=0.2, training=self.training)\n x3 = F.relu(self.conv3(x2, edge_index, edge_weight))\n x3 = F.dropout(x3, p=0.2, training=self.training)\n x = torch.cat([x1, x2, x3], dim=-1)\n x = self.lin(x)\n return x.log_softmax(dim=-1)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net(hidden_channels=256).to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n\ndef train():\n model.train()\n model.set_aggr('add')\n\n total_loss = total_examples = 0\n for data in loader:\n data = data.to(device)\n optimizer.zero_grad()\n out = model(data.x, data.edge_index, data.edge_norm * data.edge_attr)\n loss = F.nll_loss(out, data.y, reduction='none')\n loss = (loss * data.node_norm)[data.train_mask].sum()\n loss.backward()\n optimizer.step()\n total_loss += loss.item() * data.num_nodes\n total_examples += data.num_nodes\n return total_loss / total_examples\n\n\ndef train_full():\n model.train()\n model.set_aggr('mean')\n\n optimizer.zero_grad()\n out = model(data.x.to(device), data.edge_index.to(device))\n loss = F.nll_loss(out[data.train_mask], data.y.to(device)[data.train_mask])\n loss.backward()\n optimizer.step()\n return loss.item()\n\n\[email protected]_grad()\ndef test():\n model.eval()\n model.set_aggr('mean')\n\n out = model(data.x.to(device), data.edge_index.to(device))\n pred = out.argmax(dim=-1)\n correct = pred.eq(data.y.to(device))\n\n accs = []\n for _, mask in data('train_mask', 'val_mask', 'test_mask'):\n accs.append(correct[mask].sum().item() / mask.sum().item())\n return accs\n\n\nfor epoch in range(1, 51):\n loss = train()\n accs = test()\n print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {accs[0]:.4f}, '\n f'Val: {accs[1]:.4f}, Test: {accs[2]:.4f}')\n" ]
[ [ "torch.nn.functional.nll_loss", "torch.cat", "torch.nn.functional.dropout", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saipraneeth133/VoidDetection
[ "0d17cb9255d3f09ca97e630cb220eaa4fb0b3edf" ]
[ "rcnn_implementation.py" ]
[ "# split into train and test set\nfrom os import listdir\nfrom xml.etree import ElementTree\nfrom numpy import expand_dims\nfrom numpy import mean\nfrom numpy import zeros\nfrom numpy import asarray\nfrom mrcnn.utils import Dataset\nfrom matplotlib import pyplot\nfrom mrcnn.visualize import display_instances\nfrom mrcnn.utils import extract_bboxes\nfrom matplotlib.patches import Rectangle\nfrom mrcnn.config import Config\nfrom mrcnn.model import MaskRCNN\nfrom mrcnn.utils import compute_ap\nfrom mrcnn.model import load_image_gt\nfrom mrcnn.model import mold_image\n\n# class that defines and loads the kangaroo dataset\nclass KangarooDataset(Dataset):\n\t# load the dataset definitions\n\tdef load_dataset(self, dataset_dir, is_train=True):\n\t\t# define one class\n\t\tself.add_class(\"dataset\", 1, \"void\")\n\t\t# define data locations\n\t\timages_dir = dataset_dir + '/train/'\n\t\tannotations_dir = dataset_dir + '/annot/'\n\t\t# find all images\n\t\tfor filename in listdir(images_dir):\n\t\t\t# extract image id\n\t\t\timage_id = filename[:-4]\n\t\t\tid = int(str(image_id).split(\"_\")[1])\n\t\t\tprint (id)\n\t\t\t# skip bad images\n\n\t\t\tif id in ['00090']:\n\t\t\t\tcontinue\n\t\t\t# skip all images after 150 if we are building the train set\n\t\t\tif is_train and int(id) >= 150:\n\t\t\t\tcontinue\n\t\t\t# skip all images before 150 if we are building the test/val set\n\t\t\tif not is_train and int(id) < 150:\n\t\t\t\tcontinue\n\n\t\t\timg_path = images_dir + filename\n\t\t\tann_path = annotations_dir + image_id + '.xml'\n\t\t\t# add to dataset\n\t\t\tself.add_image('dataset', image_id=image_id, path=img_path, annotation=ann_path)\n\n\t# extract bounding boxes from an annotation file\n\tdef extract_boxes(self, filename):\n\t\t# load and parse the file\n\t\ttree = ElementTree.parse(filename)\n\t\t# get the root of the document\n\t\troot = tree.getroot()\n\t\t# extract each bounding box\n\t\tboxes = list()\n\t\tfor box in root.findall('.//bndbox'):\n\t\t\txmin = int(box.find('xmin').text)\n\t\t\tymin = int(box.find('ymin').text)\n\t\t\txmax = int(box.find('xmax').text)\n\t\t\tymax = int(box.find('ymax').text)\n\t\t\tcoors = [xmin, ymin, xmax, ymax]\n\t\t\tboxes.append(coors)\n\t\t# extract image dimensions\n\t\twidth = int(root.find('.//size/width').text)\n\t\theight = int(root.find('.//size/height').text)\n\t\treturn boxes, width, height\n\n\t# load the masks for an image\n\tdef load_mask(self, image_id):\n\t\t# get details of image\n\t\tinfo = self.image_info[image_id]\n\t\t# define box file location\n\t\tpath = info['annotation']\n\t\t# load XML\n\t\tboxes, w, h = self.extract_boxes(path)\n\t\t# create one array for all masks, each on a different channel\n\t\tmasks = zeros([h, w, len(boxes)], dtype='uint8')\n\t\t# create masks\n\t\tclass_ids = list()\n\t\tfor i in range(len(boxes)):\n\t\t\tbox = boxes[i]\n\t\t\trow_s, row_e = box[1], box[3]\n\t\t\tcol_s, col_e = box[0], box[2]\n\t\t\tmasks[row_s:row_e, col_s:col_e, i] = 1\n\t\t\tclass_ids.append(self.class_names.index('void'))\n\t\treturn masks, asarray(class_ids, dtype='int32')\n\n\t# load an image reference\n\tdef image_reference(self, image_id):\n\t\tinfo = self.image_info[image_id]\n\t\treturn info['path']\n\nclass KangarooConfig(Config):\n\t# define the name of the configuration\n\tNAME = \"void_cfg\"\n\t# number of classes (background + kangaroo)\n\tNUM_CLASSES = 1 + 1\n\t# number of training steps per epoch\n\tSTEPS_PER_EPOCH = 131\n\nclass PredictionConfig(Config):\n\t# define the name of the configuration\n\tNAME = \"void_cfg\"\n\t# number of classes (background + kangaroo)\n\tNUM_CLASSES = 1 + 1\n\t# simplify GPU config\n\tGPU_COUNT = 1\n\tIMAGES_PER_GPU = 1\n\ndef evaluate_model(dataset, model, cfg):\n\tAPs = list()\n\tfor image_id in dataset.image_ids:\n\t\t# load image, bounding boxes and masks for the image id\n\t\timage, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(dataset, cfg, image_id, use_mini_mask=False)\n\t\t# convert pixel values (e.g. center)\n\t\tscaled_image = mold_image(image, cfg)\n\t\t# convert image into one sample\n\t\tsample = expand_dims(scaled_image, 0)\n\t\t# make prediction\n\t\tyhat = model.detect(sample, verbose=0)\n\t\t# extract results for first sample\n\t\tr = yhat[0]\n\t\t# calculate statistics, including AP\n\t\tAP, _, _, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r[\"rois\"], r[\"class_ids\"], r[\"scores\"], r['masks'])\n\t\t# store\n\t\tAPs.append(AP)\n\t# calculate the mean AP across all images\n\tmAP = mean(APs)\n\treturn mAP\n\ndef plot_actual_vs_predicted(dataset, model, cfg, n_images=2):\n\t# load image and mask\n\tfor i in range(n_images):\n\t\t# load the image and mask\n\t\timage = dataset.load_image(i)\n\t\tmask, _ = dataset.load_mask(i)\n\t\t# convert pixel values (e.g. center)\n\t\tscaled_image = mold_image(image, cfg)\n\t\t# convert image into one sample\n\t\tsample = expand_dims(scaled_image, 0)\n\t\t# make prediction\n\t\tyhat = model.detect(sample, verbose=0)[0]\n\t\t# define subplot\n\t\tpyplot.subplot(n_images, 2, i*2+1)\n\t\t# plot raw pixel data\n\t\tpyplot.imshow(image)\n\t\tpyplot.title('Actual')\n\t\t# plot masks\n\t\tfor j in range(mask.shape[2]):\n\t\t\tpyplot.imshow(mask[:, :, j], cmap='gray', alpha=0.3)\n\t\t# get the context for drawing boxes\n\t\tpyplot.subplot(n_images, 2, i*2+2)\n\t\t# plot raw pixel data\n\t\tpyplot.imshow(image)\n\t\tpyplot.title('Predicted')\n\t\tax = pyplot.gca()\n\t\t# plot each box\n\t\tfor box in yhat['rois']:\n\t\t\t# get coordinates\n\t\t\ty1, x1, y2, x2 = box\n\t\t\t# calculate width and height of the box\n\t\t\twidth, height = x2 - x1, y2 - y1\n\t\t\t# create the shape\n\t\t\trect = Rectangle((x1, y1), width, height, fill=False, color='red')\n\t\t\t# draw the box\n\t\t\tax.add_patch(rect)\n\t# show the figure\n\tpyplot.show()\n\n# train set\ntrain_set = KangarooDataset()\ntrain_set.load_dataset(r'C:\\Users\\sai\\Desktop\\images', is_train=True)\ntrain_set.prepare()\nprint('Train: %d' % len(train_set.image_ids))\n\ntest_set = KangarooDataset()\ntest_set.load_dataset(r'C:\\Users\\sai\\Desktop\\images', is_train=False)\ntest_set.prepare()\nprint('Test: %d' % len(test_set.image_ids))\n\n\nconfig = KangarooConfig()\nconfig.display()\n# define the model\nmodel = MaskRCNN(mode='training', model_dir='./', config=config)\n# load weights (mscoco) and exclude the output layers\nmodel.load_weights('mask_rcnn_void_cfg_0009.h5', by_name=True, exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\", \"mrcnn_bbox\", \"mrcnn_mask\"])\n# train weights (output layers or 'heads')\nmodel.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=10, layers='heads')\n\n\n'''\ncfg = PredictionConfig()\n# define the model\nmodel = MaskRCNN(mode='inference', model_dir='./', config=cfg)\n# load model weights\nmodel.load_weights('mask_rcnn_void_cfg_0005.h5', by_name=True)\n# evaluate model on training dataset\ntrain_mAP = evaluate_model(train_set, model, cfg)\nprint(\"Train mAP: %.3f\" % train_mAP)\n# evaluate model on test dataset\ntest_mAP = evaluate_model(test_set, model, cfg)\nprint(\"Test mAP: %.3f\" % test_mAP)\n'''\n\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.imshow", "numpy.expand_dims", "matplotlib.pyplot.title", "numpy.asarray", "matplotlib.patches.Rectangle", "matplotlib.pyplot.subplot", "numpy.mean", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kuraisle/multipletau
[ "0321de77616f05ca90106075f7f6ecd137437be7" ]
[ "tests/test_ac_cc.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Test correlation-autocorrelation identity\"\"\"\nfrom __future__ import division, print_function\n\nimport sys\n\nimport numpy as np\n\nimport multipletau\n\nfrom test_autocorrelate import get_sample_arrays\n\n\ndef test_ac_cc_m():\n myframe = sys._getframe()\n myname = myframe.f_code.co_name\n print(\"running \", myname)\n\n arrs = get_sample_arrays()\n\n ms = [8, 16, 32, 64, 128]\n a = np.concatenate(arrs)\n\n res = []\n for m in ms:\n r = multipletau.autocorrelate(a=a,\n m=m,\n deltat=1,\n normalize=False,\n copy=True,\n dtype=np.float_)\n res.append(r)\n res = np.concatenate(res)\n\n rescc = []\n for m in ms:\n r = multipletau.correlate(a=a, v=a,\n m=m,\n deltat=1,\n normalize=False,\n copy=True,\n dtype=np.float_)\n rescc.append(r)\n # test minimal length of array\n multipletau.correlate(a=a[:2*m], v=a[:2*m],\n m=m,\n deltat=1,\n normalize=False,\n copy=True,\n dtype=np.float_)\n\n rescc = np.concatenate(rescc)\n assert np.all(res == rescc)\n\n\ndef test_ac_cc_normalize():\n myframe = sys._getframe()\n myname = myframe.f_code.co_name\n print(\"running \", myname)\n\n arrs = get_sample_arrays()\n\n res = []\n for a in arrs:\n r = multipletau.autocorrelate(a=a,\n m=16,\n deltat=1,\n normalize=True,\n copy=True,\n dtype=np.float_)\n res.append(r)\n\n res = np.concatenate(res)\n\n rescc = []\n for a in arrs:\n r = multipletau.correlate(a=a, v=a,\n m=16,\n deltat=1,\n normalize=True,\n copy=True,\n dtype=np.float_)\n rescc.append(r)\n\n rescc = np.concatenate(rescc)\n\n assert np.all(res == rescc)\n\n\ndef test_ac_cc_simple():\n myframe = sys._getframe()\n myname = myframe.f_code.co_name\n print(\"running \", myname)\n\n arrs = get_sample_arrays()\n\n rescc = []\n for a in arrs:\n r = multipletau.correlate(a=a, v=a,\n m=16,\n deltat=1,\n normalize=False,\n copy=True,\n dtype=np.float_)\n rescc.append(r)\n\n rescc = np.concatenate(rescc)\n\n resac = []\n for a in arrs:\n r = multipletau.autocorrelate(a=a,\n m=16,\n deltat=1,\n normalize=False,\n copy=True,\n dtype=np.float_)\n resac.append(r)\n\n resac = np.concatenate(resac)\n\n assert np.all(resac == rescc)\n\n\nif __name__ == \"__main__\":\n # Run all tests\n loc = locals()\n for key in list(loc.keys()):\n if key.startswith(\"test_\") and hasattr(loc[key], \"__call__\"):\n loc[key]()\n" ]
[ [ "numpy.concatenate", "numpy.all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hongbo-Miao/ogb
[ "1d6dde8080261931bc6ce2491e9149298af1ea98" ]
[ "examples/linkproppred/collab/mlp.py" ]
[ "import argparse\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nfrom ogb.linkproppred import PygLinkPropPredDataset, Evaluator\n\nfrom logger import Logger\n\n\nclass LinkPredictor(torch.nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, num_layers,\n dropout):\n super(LinkPredictor, self).__init__()\n\n self.lins = torch.nn.ModuleList()\n self.lins.append(torch.nn.Linear(in_channels, hidden_channels))\n for _ in range(num_layers - 2):\n self.lins.append(torch.nn.Linear(hidden_channels, hidden_channels))\n self.lins.append(torch.nn.Linear(hidden_channels, out_channels))\n\n self.dropout = dropout\n\n def reset_parameters(self):\n for lin in self.lins:\n lin.reset_parameters()\n\n def forward(self, x_i, x_j):\n x = x_i * x_j\n for lin in self.lins[:-1]:\n x = lin(x)\n x = F.relu(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.lins[-1](x)\n return torch.sigmoid(x)\n\n\ndef train(predictor, x, split_edge, optimizer, batch_size):\n predictor.train()\n\n pos_train_edge = split_edge['train']['edge'].to(x.device)\n\n total_loss = total_examples = 0\n for perm in DataLoader(range(pos_train_edge.size(0)), batch_size,\n shuffle=True):\n optimizer.zero_grad()\n\n edge = pos_train_edge[perm].t()\n\n pos_out = predictor(x[edge[0]], x[edge[1]])\n pos_loss = -torch.log(pos_out + 1e-15).mean()\n\n # Just do some trivial random sampling.\n edge = torch.randint(0, x.size(0), edge.size(), dtype=torch.long,\n device=x.device)\n neg_out = predictor(x[edge[0]], x[edge[1]])\n neg_loss = -torch.log(1 - neg_out + 1e-15).mean()\n\n loss = pos_loss + neg_loss\n loss.backward()\n optimizer.step()\n\n num_examples = pos_out.size(0)\n total_loss += loss.item() * num_examples\n total_examples += num_examples\n\n return total_loss / total_examples\n\n\[email protected]_grad()\ndef test(predictor, x, split_edge, evaluator, batch_size):\n predictor.eval()\n\n pos_train_edge = split_edge['train']['edge'].to(x.device)\n pos_valid_edge = split_edge['valid']['edge'].to(x.device)\n neg_valid_edge = split_edge['valid']['edge_neg'].to(x.device)\n pos_test_edge = split_edge['test']['edge'].to(x.device)\n neg_test_edge = split_edge['test']['edge_neg'].to(x.device)\n\n pos_train_preds = []\n for perm in DataLoader(range(pos_train_edge.size(0)), batch_size):\n edge = pos_train_edge[perm].t()\n pos_train_preds += [predictor(x[edge[0]], x[edge[1]]).squeeze().cpu()]\n pos_train_pred = torch.cat(pos_train_preds, dim=0)\n\n pos_valid_preds = []\n for perm in DataLoader(range(pos_valid_edge.size(0)), batch_size):\n edge = pos_valid_edge[perm].t()\n pos_valid_preds += [predictor(x[edge[0]], x[edge[1]]).squeeze().cpu()]\n pos_valid_pred = torch.cat(pos_valid_preds, dim=0)\n\n neg_valid_preds = []\n for perm in DataLoader(range(neg_valid_edge.size(0)), batch_size):\n edge = neg_valid_edge[perm].t()\n neg_valid_preds += [predictor(x[edge[0]], x[edge[1]]).squeeze().cpu()]\n neg_valid_pred = torch.cat(neg_valid_preds, dim=0)\n\n pos_test_preds = []\n for perm in DataLoader(range(pos_test_edge.size(0)), batch_size):\n edge = pos_test_edge[perm].t()\n pos_test_preds += [predictor(x[edge[0]], x[edge[1]]).squeeze().cpu()]\n pos_test_pred = torch.cat(pos_test_preds, dim=0)\n\n neg_test_preds = []\n for perm in DataLoader(range(neg_test_edge.size(0)), batch_size):\n edge = neg_test_edge[perm].t()\n neg_test_preds += [predictor(x[edge[0]], x[edge[1]]).squeeze().cpu()]\n neg_test_pred = torch.cat(neg_test_preds, dim=0)\n\n results = {}\n for K in [10, 50, 100]:\n evaluator.K = K\n train_hits = evaluator.eval({\n 'y_pred_pos': pos_train_pred,\n 'y_pred_neg': neg_valid_pred,\n })[f'hits@{K}']\n valid_hits = evaluator.eval({\n 'y_pred_pos': pos_valid_pred,\n 'y_pred_neg': neg_valid_pred,\n })[f'hits@{K}']\n test_hits = evaluator.eval({\n 'y_pred_pos': pos_test_pred,\n 'y_pred_neg': neg_test_pred,\n })[f'hits@{K}']\n\n results[f'Hits@{K}'] = (train_hits, valid_hits, test_hits)\n\n return results\n\n\ndef main():\n parser = argparse.ArgumentParser(description='OGBL-COLLAB (MLP)')\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--log_steps', type=int, default=1)\n parser.add_argument('--use_node_embedding', action='store_true')\n parser.add_argument('--num_layers', type=int, default=3)\n parser.add_argument('--hidden_channels', type=int, default=256)\n parser.add_argument('--dropout', type=float, default=0.0)\n parser.add_argument('--batch_size', type=int, default=64 * 1024)\n parser.add_argument('--lr', type=float, default=0.01)\n parser.add_argument('--epochs', type=int, default=200)\n parser.add_argument('--eval_steps', type=int, default=1)\n parser.add_argument('--runs', type=int, default=10)\n args = parser.parse_args()\n print(args)\n\n device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'\n device = torch.device(device)\n\n dataset = PygLinkPropPredDataset(name='ogbl-collab')\n split_edge = dataset.get_edge_split()\n data = dataset[0]\n\n x = data.x\n if args.use_node_embedding:\n embedding = torch.load('embedding.pt', map_location='cpu')\n x = torch.cat([x, embedding], dim=-1)\n x = x.to(device)\n\n predictor = LinkPredictor(x.size(-1), args.hidden_channels, 1,\n args.num_layers, args.dropout).to(device)\n\n evaluator = Evaluator(name='ogbl-collab')\n loggers = {\n 'Hits@10': Logger(args.runs, args),\n 'Hits@50': Logger(args.runs, args),\n 'Hits@100': Logger(args.runs, args),\n }\n\n for run in range(args.runs):\n predictor.reset_parameters()\n optimizer = torch.optim.Adam(predictor.parameters(), lr=args.lr)\n\n for epoch in range(1, 1 + args.epochs):\n loss = train(predictor, x, split_edge, optimizer, args.batch_size)\n\n if epoch % args.eval_steps == 0:\n results = test(predictor, x, split_edge, evaluator,\n args.batch_size)\n for key, result in results.items():\n loggers[key].add_result(run, result)\n\n if epoch % args.log_steps == 0:\n for key, result in results.items():\n train_hits, valid_hits, test_hits = result\n print(key)\n print(f'Run: {run + 1:02d}, '\n f'Epoch: {epoch:02d}, '\n f'Loss: {loss:.4f}, '\n f'Train: {100 * train_hits:.2f}%, '\n f'Valid: {100 * valid_hits:.2f}%, '\n f'Test: {100 * test_hits:.2f}%')\n print('---')\n\n for key in loggers.keys():\n print(key)\n loggers[key].print_statistics(run)\n\n for key in loggers.keys():\n print(key)\n loggers[key].print_statistics()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.sigmoid", "torch.cat", "torch.load", "torch.nn.functional.dropout", "torch.nn.ModuleList", "torch.nn.Linear", "torch.nn.functional.relu", "torch.no_grad", "torch.log", "torch.cuda.is_available", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jshleap/alphafold
[ "dc1742fd8b96f71e884e2cc1cf2c4204b0cfe577" ]
[ "alphafold/data/pipeline_multimer.py" ]
[ "# Copyright 2021 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions for building the features for the AlphaFold multimer model.\"\"\"\n\nimport collections\nimport contextlib\nimport copy\nimport dataclasses\nimport json\nimport os\nimport tempfile\nfrom typing import Mapping, MutableMapping, Sequence\n\nfrom absl import logging\nfrom alphafold.common import protein\nfrom alphafold.common import residue_constants\nfrom alphafold.data import feature_processing\nfrom alphafold.data import msa_pairing\nfrom alphafold.data import parsers\nfrom alphafold.data import pipeline\nfrom alphafold.data.tools import jackhmmer\nimport numpy as np\n\n# Internal import (7716).\n\n\[email protected](frozen=True)\nclass _FastaChain:\n sequence: str\n description: str\n\n\ndef _make_chain_id_map(*,\n sequences: Sequence[str],\n descriptions: Sequence[str],\n ) -> Mapping[str, _FastaChain]:\n \"\"\"Makes a mapping from PDB-format chain ID to sequence and description.\"\"\"\n if len(sequences) != len(descriptions):\n raise ValueError('sequences and descriptions must have equal length. '\n f'Got {len(sequences)} != {len(descriptions)}.')\n if len(sequences) > protein.PDB_MAX_CHAINS:\n raise ValueError('Cannot process more chains than the PDB format supports. '\n f'Got {len(sequences)} chains.')\n chain_id_map = {}\n for chain_id, sequence, description in zip(\n protein.PDB_CHAIN_IDS, sequences, descriptions):\n chain_id_map[chain_id] = _FastaChain(\n sequence=sequence, description=description)\n return chain_id_map\n\n\[email protected]\ndef temp_fasta_file(fasta_str: str):\n with tempfile.NamedTemporaryFile('w', suffix='.fasta') as fasta_file:\n fasta_file.write(fasta_str)\n fasta_file.seek(0)\n yield fasta_file.name\n\n\ndef convert_monomer_features(\n monomer_features: pipeline.FeatureDict,\n chain_id: str) -> pipeline.FeatureDict:\n \"\"\"Reshapes and modifies monomer features for multimer models.\"\"\"\n converted = {}\n converted['auth_chain_id'] = np.asarray(chain_id, dtype=np.object_)\n unnecessary_leading_dim_feats = {\n 'sequence', 'domain_name', 'num_alignments', 'seq_length'}\n for feature_name, feature in monomer_features.items():\n if feature_name in unnecessary_leading_dim_feats:\n # asarray ensures it's a np.ndarray.\n feature = np.asarray(feature[0], dtype=feature.dtype)\n elif feature_name == 'aatype':\n # The multimer model performs the one-hot operation itself.\n feature = np.argmax(feature, axis=-1).astype(np.int32)\n elif feature_name == 'template_aatype':\n feature = np.argmax(feature, axis=-1).astype(np.int32)\n new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE\n feature = np.take(new_order_list, feature.astype(np.int32), axis=0)\n elif feature_name == 'template_all_atom_masks':\n feature_name = 'template_all_atom_mask'\n converted[feature_name] = feature\n return converted\n\n\ndef int_id_to_str_id(num: int) -> str:\n \"\"\"Encodes a number as a string, using reverse spreadsheet style naming.\n\n Args:\n num: A positive integer.\n\n Returns:\n A string that encodes the positive integer using reverse spreadsheet style,\n naming e.g. 1 = A, 2 = B, ..., 27 = AA, 28 = BA, 29 = CA, ... This is the\n usual way to encode chain IDs in mmCIF files.\n \"\"\"\n if num <= 0:\n raise ValueError(f'Only positive integers allowed, got {num}.')\n\n num = num - 1 # 1-based indexing.\n output = []\n while num >= 0:\n output.append(chr(num % 26 + ord('A')))\n num = num // 26 - 1\n return ''.join(output)\n\n\ndef add_assembly_features(\n all_chain_features: MutableMapping[str, pipeline.FeatureDict],\n ) -> MutableMapping[str, pipeline.FeatureDict]:\n \"\"\"Add features to distinguish between chains.\n\n Args:\n all_chain_features: A dictionary which maps chain_id to a dictionary of\n features for each chain.\n\n Returns:\n all_chain_features: A dictionary which maps strings of the form\n `<seq_id>_<sym_id>` to the corresponding chain features. E.g. two\n chains from a homodimer would have keys A_1 and A_2. Two chains from a\n heterodimer would have keys A_1 and B_1.\n \"\"\"\n # Group the chains by sequence\n seq_to_entity_id = {}\n grouped_chains = collections.defaultdict(list)\n for chain_id, chain_features in all_chain_features.items():\n seq = str(chain_features['sequence'])\n if seq not in seq_to_entity_id:\n seq_to_entity_id[seq] = len(seq_to_entity_id) + 1\n grouped_chains[seq_to_entity_id[seq]].append(chain_features)\n\n new_all_chain_features = {}\n chain_id = 1\n for entity_id, group_chain_features in grouped_chains.items():\n for sym_id, chain_features in enumerate(group_chain_features, start=1):\n new_all_chain_features[\n f'{int_id_to_str_id(entity_id)}_{sym_id}'] = chain_features\n seq_length = chain_features['seq_length']\n chain_features['asym_id'] = chain_id * np.ones(seq_length)\n chain_features['sym_id'] = sym_id * np.ones(seq_length)\n chain_features['entity_id'] = entity_id * np.ones(seq_length)\n chain_id += 1\n\n return new_all_chain_features\n\n\ndef pad_msa(np_example, min_num_seq):\n np_example = dict(np_example)\n num_seq = np_example['msa'].shape[0]\n if num_seq < min_num_seq:\n for feat in ('msa', 'deletion_matrix', 'bert_mask', 'msa_mask'):\n np_example[feat] = np.pad(\n np_example[feat], ((0, min_num_seq - num_seq), (0, 0)))\n np_example['cluster_bias_mask'] = np.pad(\n np_example['cluster_bias_mask'], ((0, min_num_seq - num_seq),))\n return np_example\n\n\nclass DataPipeline:\n \"\"\"Runs the alignment tools and assembles the input features.\"\"\"\n\n def __init__(self,\n monomer_data_pipeline: pipeline.DataPipeline,\n jackhmmer_binary_path: str,\n uniprot_database_path: str,\n max_uniprot_hits: int = 50000,\n use_precomputed_msas: bool = False,\n n_cpu: int = 8):\n \"\"\"Initializes the data pipeline.\n\n Args:\n monomer_data_pipeline: An instance of pipeline.DataPipeline - that runs\n the data pipeline for the monomer AlphaFold system.\n jackhmmer_binary_path: Location of the jackhmmer binary.\n uniprot_database_path: Location of the unclustered uniprot sequences, that\n will be searched with jackhmmer and used for MSA pairing.\n max_uniprot_hits: The maximum number of hits to return from uniprot.\n use_precomputed_msas: Whether to use pre-existing MSAs; see run_alphafold.\n \"\"\"\n self.n_cpu = n_cpu\n self._monomer_data_pipeline = monomer_data_pipeline\n self._uniprot_msa_runner = jackhmmer.Jackhmmer(\n binary_path=jackhmmer_binary_path, n_cpu=self.n_cpu,\n database_path=uniprot_database_path)\n self._max_uniprot_hits = max_uniprot_hits\n self.use_precomputed_msas = use_precomputed_msas\n\n def _process_single_chain(\n self,\n chain_id: str,\n sequence: str,\n description: str,\n msa_output_dir: str,\n is_homomer_or_monomer: bool) -> pipeline.FeatureDict:\n \"\"\"Runs the monomer pipeline on a single chain.\"\"\"\n chain_fasta_str = f'>chain_{chain_id}\\n{sequence}\\n'\n chain_msa_output_dir = os.path.join(msa_output_dir, chain_id)\n if not os.path.exists(chain_msa_output_dir):\n os.makedirs(chain_msa_output_dir)\n with temp_fasta_file(chain_fasta_str) as chain_fasta_path:\n logging.info('Running monomer pipeline on chain %s: %s',\n chain_id, description)\n chain_features = self._monomer_data_pipeline.process(\n input_fasta_path=chain_fasta_path,\n msa_output_dir=chain_msa_output_dir)\n\n # We only construct the pairing features if there are 2 or more unique\n # sequences.\n if not is_homomer_or_monomer:\n all_seq_msa_features = self._all_seq_msa_features(chain_fasta_path,\n chain_msa_output_dir)\n chain_features.update(all_seq_msa_features)\n return chain_features\n\n def _all_seq_msa_features(self, input_fasta_path, msa_output_dir):\n \"\"\"Get MSA features for unclustered uniprot, for pairing.\"\"\"\n out_path = os.path.join(msa_output_dir, 'uniprot_hits.sto')\n result = pipeline.run_msa_tool(\n self._uniprot_msa_runner, input_fasta_path, out_path, 'sto',\n self.use_precomputed_msas)\n msa = parsers.parse_stockholm(result['sto'])\n msa = msa.truncate(max_seqs=self._max_uniprot_hits)\n all_seq_features = pipeline.make_msa_features([msa])\n valid_feats = msa_pairing.MSA_FEATURES + (\n 'msa_uniprot_accession_identifiers',\n 'msa_species_identifiers',\n )\n feats = {f'{k}_all_seq': v for k, v in all_seq_features.items()\n if k in valid_feats}\n return feats\n\n def process(self,\n input_fasta_path: str,\n msa_output_dir: str,\n is_prokaryote: bool = False) -> pipeline.FeatureDict:\n \"\"\"Runs alignment tools on the input sequences and creates features.\"\"\"\n with open(input_fasta_path) as f:\n input_fasta_str = f.read()\n input_seqs, input_descs = parsers.parse_fasta(input_fasta_str)\n\n chain_id_map = _make_chain_id_map(sequences=input_seqs,\n descriptions=input_descs)\n chain_id_map_path = os.path.join(msa_output_dir, 'chain_id_map.json')\n with open(chain_id_map_path, 'w') as f:\n chain_id_map_dict = {chain_id: dataclasses.asdict(fasta_chain)\n for chain_id, fasta_chain in chain_id_map.items()}\n json.dump(chain_id_map_dict, f, indent=4, sort_keys=True)\n\n all_chain_features = {}\n sequence_features = {}\n is_homomer_or_monomer = len(set(input_seqs)) == 1\n for chain_id, fasta_chain in chain_id_map.items():\n if fasta_chain.sequence in sequence_features:\n all_chain_features[chain_id] = copy.deepcopy(\n sequence_features[fasta_chain.sequence])\n continue\n chain_features = self._process_single_chain(\n chain_id=chain_id,\n sequence=fasta_chain.sequence,\n description=fasta_chain.description,\n msa_output_dir=msa_output_dir,\n is_homomer_or_monomer=is_homomer_or_monomer)\n\n chain_features = convert_monomer_features(chain_features,\n chain_id=chain_id)\n all_chain_features[chain_id] = chain_features\n sequence_features[fasta_chain.sequence] = chain_features\n\n all_chain_features = add_assembly_features(all_chain_features)\n\n np_example = feature_processing.pair_and_merge(\n all_chain_features=all_chain_features,\n is_prokaryote=is_prokaryote,\n )\n\n # Pad MSA to avoid zero-sized extra_msa.\n np_example = pad_msa(np_example, 512)\n\n return np_example\n" ]
[ [ "numpy.asarray", "numpy.argmax", "numpy.pad", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MiroK/tieler
[ "5204e12157528b18ab1161d4bc5655194c4e8dcb" ]
[ "tieler/tile_data.py" ]
[ "from tieler.tile_cpp import fill_mesh_function\n \nfrom dolfin import MeshFunction, info, SubsetIterator\nfrom collections import defaultdict\nimport numpy as np\nimport six\n\n\ndef mf_from_data(mesh, data):\n '''Build tdim mesh function from the data of TileMesh'''\n return _mx_from_data(mesh, data,\n fill=fill_mesh_function,\n init_container=lambda m, t: MeshFunction('size_t', m, t, 0))\n\n\ndef groupby(pairs, index):\n '''Organize pairs by pairs[index]'''\n groups = defaultdict(list)\n for pair in pairs: groups[pair[index]].append(pair)\n\n for item in six.iteritems(groups):\n yield item\n\n \ndef _mx_from_data(mesh, data, fill, init_container):\n '''Fill the container over mesh by data. Get back dict{tdim -> MeshFoo}'''\n try: \n assert mesh.mpi_comm().tompi4py().size == 1\n # FEniCS 2018\n except AttributeError:\n assert mesh.mpi_comm().size == 1\n\n containers = {}\n # We have define entities in terms of vertex numbering\n # Order keys such by tdim (the first key)\n for tdim, keys in groupby(data.keys(), 0):\n # So we'll be getting the entity index by lookup\n mesh.init(tdim)\n mesh.init(0, tdim)\n # Build the meshfunction from data\n f = init_container(mesh, tdim)\n for key in keys:\n indices = data[key]\n indices.shape = (np.prod(indices.shape), )\n # These entity indices get the 'color'\n fill(mesh, indices, tdim, key[1], f)\n containers[tdim] = f\n\n return containers\n\n\ndef load_data(mesh, mesh_f, dim, data):\n '''\n Represent mesh_f over dim entities of mesh as collection of vertices.\n Can have mesh as mesh function or (h5_file, data_set)\n '''\n try:\n h5_file, data_set = mesh_f\n mf = MeshFunction('size_t', mesh, dim, 0)\n h5_file.read(mf, data_set)\n except ValueError:\n mf = mesh_f\n data_set = '%d dim entites' % mf.dim()\n \n # Mapf for encoding entities as vertices\n mesh.init(dim, 0)\n e2v = mesh.topology()(dim, 0)\n\n tags = set(mf.array())\n # Don't encode zero - we initialize to it\n if 0 in tags: tags.remove(0)\n info('%s evolves tags %r' % (data_set, tags))\n\n for tag in tags:\n data[(dim, tag)] = np.array([e2v(e.index()) for e in SubsetIterator(mf, tag)],\n dtype='uintp')\n return data\n" ]
[ [ "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OverHall27/DLMugenKnock
[ "f08553213028e90baff7b4de3c640b51485f4a15" ]
[ "Question_theory/myanswers/perceptron_2.py" ]
[ "import numpy as np\nnp.random.seed(0)\n\nclass NN:\n\n def __init__(self, x, lr):\n #self.weight = np.random.normal(0., 1, [x.shape[1], 1])\n self.weight = np.random.normal(0., 1, x.shape[1])\n self.b = 1.\n self.lr = lr\n self.y = np.zeros(x.shape[0])\n\n def forward(self, x):\n self.y = np.dot(x, self.weight)\n\n # t is training-data\n def train(self, x, t):\n is_changed = False\n # t == y となってれば学習しなくていい\n # ただし,yは0以上で1, 0未満で-1とみなす\n dy = self.y.copy()\n dt = t.copy()\n\n dt[dt * dy >= 0] = 0\n # yとtの符号が同じなら学習しなくていいので,0にする\n # plus and plus, or minus and minus -> +0\n\n En = np.dot(dt, x)\n self.weight += En * self.lr\n\n is_changed = len(np.where(dt != 0)[0]) > 0\n\n return is_changed\n\nxs = np.array([[0,0], [0,1], [1,0], [1,1]], dtype=np.float32)\nts = np.array([(-1), (-1), (-1), (1)], dtype=np.float32)\nlr = 0.1\n_xs = np.hstack([xs, [[1] for _ in range(4)]])\n\n\nnn = NN(_xs, lr)\nite = 0\nprint(nn.weight)\n\nwhile True:\n nn.forward(_xs)\n is_changed = nn.train(_xs, ts)\n ite += 1\n\n print(\"iteration: \" + str(ite) + \" y >> \" + str(nn.y))\n if not(is_changed):\n break\n\nprint(\"training finished\")\nprint(\"weight >> \" + str(nn.weight))\nfor i in range(4):\n print(\"in >> \" + str(_xs[i]) + \" y >> \" + str(nn.y[i]))\n\n\n\n" ]
[ [ "numpy.dot", "numpy.random.seed", "numpy.random.normal", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elv-pierre/lpips-tensorflow
[ "f444e441be0bdfa2eeb3ed014ccba10ffd433f43" ]
[ "lpips_tf.py" ]
[ "import os\nimport sys\n\nimport tensorflow as tf\nfrom six.moves import urllib\n\n_URL = 'http://rail.eecs.berkeley.edu/models/lpips'\n\n\ndef _download(url, output_dir):\n \"\"\"Downloads the `url` file into `output_dir`.\n\n Modified from https://github.com/tensorflow/models/blob/master/research/slim/datasets/dataset_utils.py\n \"\"\"\n filename = url.split('/')[-1]\n filepath = os.path.join(output_dir, filename)\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\n\ndef lpips(input0, input1, model='net-lin', net='alex', version=0.1):\n \"\"\"\n Learned Perceptual Image Patch Similarity (LPIPS) metric.\n\n Args:\n input0: An image tensor of shape `[..., height, width, channels]`,\n with values in [0, 1].\n input1: An image tensor of shape `[..., height, width, channels]`,\n with values in [0, 1].\n\n Returns:\n The Learned Perceptual Image Patch Similarity (LPIPS) distance.\n\n Reference:\n Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, Oliver Wang.\n The Unreasonable Effectiveness of Deep Features as a Perceptual Metric.\n In CVPR, 2018.\n \"\"\"\n # flatten the leading dimensions\n batch_shape = tf.shape(input0)[:-3]\n input0 = tf.to_float(input0)\n input1 = tf.to_float(input1)\n input0 = tf.reshape(input0, tf.concat([[-1], tf.shape(input0)[-3:]], axis=0))\n input1 = tf.reshape(input1, tf.concat([[-1], tf.shape(input1)[-3:]], axis=0))\n # NHWC to NCHW\n input0 = tf.transpose(input0, [0, 3, 1, 2])\n input1 = tf.transpose(input1, [0, 3, 1, 2])\n # normalize to [-1, 1]\n input0 = input0 * 2.0 - 1.0\n input1 = input1 * 2.0 - 1.0\n\n input0_name, input1_name = 'in0:0', 'in1:0'\n\n default_graph = tf.get_default_graph()\n producer_version = default_graph.graph_def_versions.producer\n\n cache_dir = os.path.expanduser('~/.lpips')\n os.makedirs(cache_dir, exist_ok=True)\n # files to try. try a specific producer version, but fallback to the version-less version (latest).\n pb_fnames = [\n '%s_%s_v%s_%d.pb' % (model, net, version, producer_version),\n '%s_%s_v%s.pb' % (model, net, version),\n ]\n for pb_fname in pb_fnames:\n if not os.path.isfile(os.path.join(cache_dir, pb_fname)):\n try:\n _download(os.path.join(_URL, pb_fname), cache_dir)\n except urllib.error.HTTPError:\n pass\n if os.path.isfile(os.path.join(cache_dir, pb_fname)):\n break\n\n with open(os.path.join(cache_dir, pb_fname), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def,\n input_map={input0_name: input0, input1_name: input1})\n distance, = default_graph.get_operations()[-1].outputs\n\n if distance.shape.ndims == 4:\n distance = tf.squeeze(distance, axis=[-3, -2, -1])\n # reshape the leading dimensions\n distance = tf.reshape(distance, batch_shape)\n return distance\n" ]
[ [ "tensorflow.import_graph_def", "tensorflow.transpose", "tensorflow.shape", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.to_float", "tensorflow.get_default_graph", "tensorflow.GraphDef" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
muralispaiitm/IRIS_Flowers_Prediction
[ "c81874f2df4f5d2d97412719959ecfa312b923d7" ]
[ "app.py" ]
[ "from flask import Flask, render_template, request\nimport pickle\nimport numpy as np\n\nmodel = pickle.load(open('iri.pkl', 'rb'))\n\napp = Flask(__name__)\n\[email protected]('/')\ndef Home():\n return render_template('home.html')\n\[email protected]('/predict', methods=['POST'])\ndef Predict_Val():\n data1 = request.form['a'] # a=5.1\n data2 = request.form['b'] # b=2.2\n data3 = request.form['c'] # c=1.4\n data4 = request.form['d'] # d=0.2\n arr = np.array([[data1, data2, data3, data4]])\n pred = model.predict(arr)\n return render_template('result.html', data=pred)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=5002)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vanavski/Text_tonal_analyzer
[ "a990d965dc4eca5bb43a70a050eb3ad10323760b" ]
[ "Python/Temp/ModelsFit.py" ]
[ "import os\n\nimport pandas\nfrom sklearn.externals import joblib\nfrom sklearn.naive_bayes import GaussianNB\n\ncwd = os.getcwd()\n\n\ndef read_training_data():\n training_data = dict()\n data = pandas.read_csv(os.path.join('..', '..', 'Databases', 'dataset_with_trigrams.csv'), sep=';',\n encoding='utf-8')\n\n training_data['features'] = data.loc()[:, ['unigrams_weight', 'bigrams_weight', 'trigrams_weight']]\n training_data['labels'] = data['tonal']\n\n return training_data\n\n\ndef model_fit(classifier, training_data):\n classifier.fit(training_data['features'], training_data['labels'])\n joblib.dump(classifier, 'model_trigrams.pkl', compress=9)\n\n\ntraining_data = read_training_data()\nclassifier = GaussianNB()\nmodel_fit(classifier, training_data)\n" ]
[ [ "sklearn.externals.joblib.dump", "sklearn.naive_bayes.GaussianNB" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GSGSS/git_learning
[ "ceeabfe88edf2ecbfaf58eb42ca878a7a0910c4c" ]
[ "AIlearning/pandas_test.py" ]
[ "# from pandas import Series,DataFrame\nimport pandas as pd\n\nobj = pd.Series([3, 4, 6, -1])\n\n# print(obj)\n# print(obj.values)\n# print({'a': 1, 'b': 2})\n\n# obj2 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) # 索引可以重复\n# print(obj2)\n\n# DataFram操作高维数组\ndata = {'city': ['shanghai', 'shanghai', 'beijing', 'beijing'],\n 'year': [2018, 2017, 2018, 2017],\n 'pop': [1.5, 1.6, 2.2, 2.3]\n }\nframe = pd.DataFrame(data)\nprint(frame)\n\n# 按年份,城市,人口排序\nframe2 = pd.DataFrame(data, columns=['year', 'city', 'pop'],)\n\n# print(frame2)\n# print(frame2['city'])\n# print(frame2.year)\n\nframe2['new'] = 100\nframe2['cap'] = frame2.city == 'beijing'\n\nframe2['new'] = 100\nprint(frame2)\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
MattH688/BERNAISE
[ "fdeff715c1730dd7867ee371d1150f06b4c52d15" ]
[ "problems/flow_spiral2D.py" ]
[ "import dolfin as df\nimport numpy as np\nimport os\nfrom . import *\nfrom common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\nfrom common.cmd import MPI_rank\n# from mpi4py import MPI\nfrom common.bcs import Fixed, Pressure, NoSlip\n#\nfrom ufl import max_value\n__author__ = \"Matthew Hockley\"\n\n\ndef FaceLength(faceNum, mesh, subdomains_file, dim):\n\n # State desired face which measures are taking place upon\n if mpi_is_root():\n print(faceNum)\n\n # Display how mesh is separated\n # print(\"Node: \", MPI_rank, \"Mesh Cells: \", mesh.cells().size)\n \n # Import subdomains\n mvc = df.MeshValueCollection(\"size_t\", mesh, dim-1)\n with df.XDMFFile(mpi_comm(), subdomains_file) as infile:\n infile.read(mvc, \"name_to_read\")\n facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)\n\n ## Calculate limits so inflow parabolic can work on co-ordinates not at 0\n\n # Create empty variable space\n X = []\n Y = []\n xInflowRange = 0\n yInflowRange = 0\n xInflowMin = 0\n yInflowMin = 0\n\n # Retrive all co-ords as element for desired face\n It_facet = df.SubsetIterator(facet_domains,faceNum)\n mpi_barrier()\n # print(\"Faces: \", df.SubsetIterator(facet_domains,faceNum))\n \n\n #https://fenicsproject.org/qa/13995/print-coordinate-of-boundary-seperately-i-e-left-boundary/\n #It_mesh = vertices([facet_domains.array() == 26])\n\n # Collected all co-ords for desired face\n for facet_domains in It_facet:\n for v in df.vertices(facet_domains):\n X.append(v.point().x())\n Y.append(v.point().y())\n\n # Ensure all processes collect co-ords for desired face\n mpi_barrier()\n\n # Gather all co-ords to calc length/min\n X = mpi_gather(X, 0)\n Y = mpi_gather(Y, 0)\n\n # Sync all parallel processes for length/min calc\n mpi_barrier()\n \n if mpi_is_root():\n # Remove empty and combine all arrays\n X = np.concatenate(X)\n Y = np.concatenate(Y)\n # Calculate length and min values\n xInflowRange = np.ptp(X,axis=0)\n yInflowRange = np.ptp(Y,axis=0)\n xInflowMin = np.amin(X)\n yInflowMin = np.amin(Y)\n\n # END: Sync all parallel processes for length/min calc\n mpi_barrier()\n\n # Broadcast all length/min calc to all nodes used\n xInflowRange = mpi_bcast(xInflowRange, 0)\n yInflowRange = mpi_bcast(yInflowRange, 0)\n xInflowMin = mpi_bcast(xInflowMin, 0)\n yInflowMin = mpi_bcast(yInflowMin, 0)\n\n # Clear variables\n v = []\n It_facet = []\n facet_domains = []\n\n return xInflowRange, yInflowRange, xInflowMin, yInflowMin\n\ndef problem():\n info_cyan(\"Flow around 2D spiral benchmark.\")\n # 2, beta in phase 1, beta in phase 2\n solutes = [[\"c_p\", 0, 1e-3, 1e-2, 4., 1.]]\n\n # Format: name : (family, degree, is_vector)\n base_elements = dict(u=[\"Lagrange\", 2, True],\n p=[\"Lagrange\", 1, False],\n phi=[\"Lagrange\", 1, False],\n g=[\"Lagrange\", 1, False],\n c=[\"Lagrange\", 1, False],\n V=[\"Lagrange\", 1, False])\n\n factor = 2\n scaling_factor = 1\n \n # Default parameters to be loaded unless starting from checkpoint.\n parameters = dict(\n solver=\"basic\", # Type of problem sovler\n folder=\"results_spiral2D\", # Save folder\n import_mesh = True, # If importing XDMF mesh files\n scale_factor = scaling_factor, # Change mesh dimension (Use if mesh not in metres)\n mesh_file = \"meshes/mesh_Spiral2D.xdmf\", # Mesh filepath\n subdomains_file = \"meshes/mf_Spiral2D.xdmf\", # Subdomains filepath\n name_Facet = \"inlet\", # Name of inlet within \"boundaries_Facet\" for Hmin/H\n restart_folder=False, # Use if restarting from different folder\n enable_NS=True, # Enable Navier Stokes (NS)\n enable_PF=False, # Enable Phase Field (PF)\n enable_EC=False, # Enable Electrochem (EC)\n save_intv=5, # Export data time point interval\n stats_intv=5, # Export stats interval\n checkpoint_intv=50, # Export checkpoint for restart\n tstep=0, # Unsure\n dt=0.0015/factor, # s Time steps\n t_0=0., # s Start time\n T=8., # s Total time\n solutes=solutes, # I believe are electrochem (EC)/phase field (PF) related\n base_elements=base_elements, # Basic \"CG\"/\"Lagrange\" function space\n #\n H=0.41, # Length of inlet (Updated in \"faceLength\")\n Hmin=0, # Minimum of inlet (Updated in \"faceLength\")\n dim = 2, # Dimensions\n XInflow = True, # Direction of flow along X axis\n #\n # Simulation parameters\n grav_const=0.0, # 0 gravity as microfluidic\n inlet_velocity=-1.5, # m/s (Negative due to -x inflow direction)\n V_0=0., # Unsure\n #\n # Fluid parameters (Water at 22C)\n density=[998.2, 998.2], # Kg/m3\n viscosity=[1.003e-3, 1.003e-3], # Kg/m.s kinematic viscosity\n permittivity=[1., 1.], # EC?\n #\n # Solver parameters\n use_iterative_solvers=True, # if False, might have memory issues\n use_pressure_stabilization=False, # Seems to be a type of SUPG, unsure (see solver)\n #\n # Boundary related physical labels (Numbers within mf_subdomains.xdmf)\n # Typically created within GMSH/Netgen and converted by Meshio\n boundaries_Facet = {'inlet': 10,\n 'outletL': 6,\n 'outletR': 3,\n 'wall': [2,4,5,7,8,9],\n }\n )\n\n # Retrieve inlet dimensions (min/length) from mesh\n [mesh1, parameters1] = mesh(parameters[\"mesh_file\"], \n parameters[\"subdomains_file\"], parameters[\"XInflow\"],\n parameters[\"boundaries_Facet\"], \"inlet\", parameters[\"scale_factor\"], False)\n \n # Remove temp mesh, not required\n mesh1 = []\n\n # Save parameters to main dictionary (Already bcast from mesh function)\n parameters[\"dim\"] = parameters1[\"dim\"]\n parameters[\"H\"] = parameters1[\"H\"]\n parameters[\"Hmin\"] = parameters1[\"Hmin\"]\n\n # Output of Hmin and H for inlet velocity calculations (see \"velocity_init\")\n # mpi_barrier()\n # if mpi_is_root():\n # print(\"Hmin: \", parameters[\"Hmin\"])\n # print(\"H: \", parameters[\"H\"])\n\n # Ensure all processes complete before return (Might be redundant)\n mpi_barrier()\n\n return parameters\n\n\ndef mesh(mesh_file, subdomains_file, XInflow,\n boundaries_Facet, name_Facet, scale_factor,\n import_mesh, **namespace):\n # Load mesh from file (NETGEN mesh as .grid to .xml using DOLFIN)\n \n mesh = df.Mesh()\n with df.XDMFFile(mpi_comm(),mesh_file) as infile:\n infile.read(mesh)\n\n # # Scale mesh from mm to m\n x = mesh.coordinates()\n x[:, :] *= scale_factor\n # # Move mesh so co-ords always positive\n #\n xymin = x.min(axis=0) \n mpi_barrier()\n xymin = np.min(mpi_gather(xymin, 0))\n mpi_barrier()\n xymin = mpi_bcast(xymin, 0)\n mpi_barrier()\n\n x[:, :] = x[:, :] - xymin\n # Apply to mesh\n mesh.bounding_box_tree().build(mesh) \n\n # Define boundary conditions\n dim = mesh.topology().dim()\n if mpi_is_root(): \n print('Dim:',dim)\n\n # Ensure all processes have completed\n mpi_barrier()\n\n if import_mesh: #Mesh import only if true\n return mesh\n else: #Otherwise generating length and min of boundary facet assuming line\n\n # Retrieve length and min of boundary facet (inlet in most cases)\n [X, Y, Xmin, Ymin] = FaceLength(boundaries_Facet[name_Facet], mesh,\n subdomains_file, dim)\n\n # Display boundary dimensions (inlet in most cases)\n mpi_barrier()\n if mpi_is_root():\n info_yellow(\"Boundary Dimensions\")\n print(\"x: \",X)\n print(\"y: \",Y)\n print(\"xMin: \",Xmin)\n print(\"yMin: \",Ymin)\n\n # Save length/min to dictionary\n # This will not overwrite prior dictionary\n # as this is in an indepenent function\n parameters = dict()\n parameters[\"dim\"] = dim\n if XInflow == True:\n parameters[\"H\"] = Y\n parameters[\"Hmin\"] = Ymin\n else:\n parameters[\"H\"] = X\n parameters[\"Hmin\"] = Xmin\n\n # Ensure all processes have completed (Might be redundant)\n mpi_barrier()\n\n return mesh, parameters\n\n\ndef initialize(H, Hmin, solutes, restart_folder,\n field_to_subspace,\n inlet_velocity,\n enable_NS, enable_PF, enable_EC,\n **namespace):\n \"\"\" Create the initial state.\n The initial states are specified in a dict indexed by field. The format\n should be\n w_init_field[field] = 'df.Function(...)'.\n The work dicts w_ and w_1 are automatically initialized from these\n functions elsewhere in the code.\n Note: You only need to specify the initial states that are nonzero.\n \"\"\"\n w_init_field = dict()\n # if not restart_folder:\n # if enable_NS:\n # try:\n # subspace = field_to_subspace[\"u\"].collapse()\n # except:\n # subspace = field_to_subspace[\"u\"]\n # u_init = velocity_init(H, inlet_velocity, 0, 1, Hmin)\n # w_init_field[\"u\"] = df.interpolate(u_init, subspace)\n\n # Ensure all processes have completed (Might be redundant)\n mpi_barrier()\n return w_init_field\n\n\ndef create_bcs(dim, H, Hmin, inlet_velocity,\n V_0, solutes, subdomains_file,\n enable_NS, enable_PF, enable_EC, \n mesh, boundaries_Facet, **namespace):\n \"\"\" The boundaries and boundary conditions are defined here. \"\"\"\n mvc = df.MeshValueCollection(\"size_t\", mesh, dim-1) \n with df.XDMFFile(subdomains_file) as infile:\n infile.read(mvc, \"name_to_read\")\n facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)\n\n # Re-create boundaries with facet_domain for mesh relevance\n\n boundaries = dict(\n inlet = [facet_domains, boundaries_Facet[\"inlet\"]],\n outletL = [facet_domains, boundaries_Facet[\"outletL\"]],\n outletR = [facet_domains, boundaries_Facet[\"outletR\"]],\n wall = [facet_domains, boundaries_Facet[\"wall\"]],\n )\n\n # Alocating the boundary dicts\n bcs = dict()\n bcs_pointwise = dict()\n for boundary in boundaries:\n bcs[boundary] = dict()\n\n ## Velocity Phase Flow In (Retrieve expression)\n #\n #length inlet, water inflow, X/Y, Positive/neg flow along axis\n velocity_expr = velocity_init(H, inlet_velocity, 0, 1, Hmin) \n velocity_in = Fixed(velocity_expr)\n\n # Pressure set to 0 at outlet\n pressure_out = Pressure(0.0)\n # Create NoSlip function for walls\n noslip = NoSlip() # Fixed((0., 0.)) no difference using either.\n\n ## Define boundaries\n # Note we have two outlets\n if enable_NS:\n bcs[\"inlet\"][\"u\"] = velocity_in # Velocity expression for inflow\n bcs[\"outletL\"][\"p\"] = pressure_out # 0 pressure expression for outflow\n bcs[\"outletR\"][\"p\"] = pressure_out # 0 pressure expression for outflow\n bcs[\"wall\"][\"u\"] = noslip # No slip for walls\n\n\n # Ensure all processes have completed (Might be redundant) \n mpi_barrier()\n return boundaries, bcs, bcs_pointwise\n\n\ndef velocity_init(H, inlet_velocity, XY, Pos, xyMin, degree=2):\n #length inlet, inflow (m/s), X or Y dir, Positive/neg flow along axis\n if XY == 0:\n return df.Expression(\n (\"Pos*4*U*(x[1] - xyMin)*(H-(x[1] - xyMin))/pow(H, 2)\", \"0.0\"),\n Pos=Pos, H=H, U=inlet_velocity, xyMin = xyMin, degree=degree)\n else: # if XY == 1\n return df.Expression(\n (\"0.0\", \"Pos*4*U*(x[0] - xyMin)*(H-(x[0] - xyMin))/pow(H, 2)\"),\n Pos=Pos, H=H, U=inlet_velocity, xyMin = xyMin, degree=degree)\n\n ## If you want a constant and not parabolic inflow, comment above and use...\n #\n # return df.Expression((\"U\",\"0.0\"), U=inlet_velocity, degree=degree)\n # Remember to define X or Y inflow manually if constant (current X)\n\ndef tstep_hook(t, tstep, stats_intv, statsfile, field_to_subspace,\n field_to_subproblem, subproblems, w_, **namespace):\n info_blue(\"Timestep = {}\".format(tstep))\n # Function which runs every simulation tick\n\n\ndef start_hook(newfolder, **namespace):\n statsfile = os.path.join(newfolder, \"Statistics/stats.dat\")\n return dict(statsfile=statsfile)\n # Function which runs at start of simulation" ]
[ [ "numpy.concatenate", "numpy.ptp", "numpy.amin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elitap/classimbalance
[ "ae807ec533da5eef18f4180b29383399bc57696a" ]
[ "src/contrib/transforms.py" ]
[ "from typing import Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport warnings\n\nfrom monai.config import KeysCollection\nfrom monai.transforms.croppad.array import (\n SpatialCrop,\n)\n\nfrom monai.transforms.transform import MapTransform, Randomizable\nfrom monai.transforms.utils import (\n generate_pos_neg_label_crop_centers,\n map_binary_to_indices,\n)\nfrom monai.utils import ImageMetaKey as Key\nfrom monai.utils import fall_back_tuple\n\nclass RandCropByPosNeg(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandCropByPosNegLabel`.\n Crop random fixed sized regions with the center being a foreground or background voxel\n based on the Pos Neg Ratio.\n Suppose all the expected fields specified by `keys` have same shape,\n and add `patch_index` to the corresponding meta data.\n And will return a list of dictionaries for all the cropped images.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n label_key: name of key for label image, this will be used for finding foreground/background.\n labels: if given only consider labels in list as foreground\n spatial_size: the spatial size of the crop region e.g. [224, 224, 128].\n If its components have non-positive values, the corresponding size of `data[label_key]` will be used.\n pos: used with `neg` together to calculate the ratio ``pos / (pos + neg)`` for the probability\n to pick a foreground voxel as a center rather than a background voxel.\n neg: used with `pos` together to calculate the ratio ``pos / (pos + neg)`` for the probability\n to pick a foreground voxel as a center rather than a background voxel.\n num_samples: number of samples (crop regions) to take in each list.\n image_key: if image_key is not None, use ``label == 0 & image > image_threshold`` to select\n the negative sample(background) center. so the crop center will only exist on valid image area.\n image_threshold: if enabled image_key, use ``image > image_threshold`` to determine\n the valid image content area.\n fg_indices_key: if provided pre-computed foreground indices of `label`, will ignore above `image_key` and\n `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices_key`\n and `bg_indices_key` together, expect to be 1 dim array of spatial indices after flattening.\n a typical usage is to call `FgBgToIndicesd` transform first and cache the results.\n bg_indices_key: if provided pre-computed background indices of `label`, will ignore above `image_key` and\n `image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices_key`\n and `bg_indices_key` together, expect to be 1 dim array of spatial indices after flattening.\n a typical usage is to call `FgBgToIndicesd` transform first and cache the results.\n meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,\n default is `meta_dict`, the meta data is a dictionary object.\n used to add `patch_index` to the meta dict.\n allow_missing_keys: don't raise exception if key is missing.\n batch_size: used to force foreground or background sampling, if not set batch_size is assumed to be num_samples\n\n Raises:\n ValueError: When ``pos`` or ``neg`` are negative.\n ValueError: When ``pos=0`` and ``neg=0``. Incompatible values.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n spatial_size: Union[Sequence[int], int],\n label_key: str,\n labels: Optional[List] = None,\n pos: float = 1.0,\n neg: float = 1.0,\n num_samples: int = 1,\n image_key: Optional[str] = None,\n image_threshold: float = 0.0,\n fg_indices_key: Optional[str] = None,\n bg_indices_key: Optional[str] = None,\n meta_key_postfix: str = \"meta_dict\",\n allow_missing_keys: bool = False,\n probabilistic_pos_neg: bool = True,\n batch_size: int = 0,\n ) -> None:\n MapTransform.__init__(self, keys, allow_missing_keys)\n self.label_key = label_key\n self.labels = labels\n self.spatial_size: Union[Tuple[int, ...], Sequence[int], int] = spatial_size\n if pos < 0 or neg < 0:\n raise ValueError(f\"pos and neg must be nonnegative, got pos={pos} neg={neg}.\")\n if pos + neg == 0:\n raise ValueError(\"Incompatible values: pos=0 and neg=0.\")\n self.pos_ratio = pos / (pos + neg)\n self.num_samples = num_samples\n self.image_key = image_key\n self.image_threshold = image_threshold\n self.fg_indices_key = fg_indices_key\n self.bg_indices_key = bg_indices_key\n self.meta_key_postfix = meta_key_postfix\n self.centers: Optional[List[List[np.ndarray]]] = None\n self.probabilistic_pos_neg = probabilistic_pos_neg\n if batch_size <= 0:\n self.batch_size = num_samples\n else:\n self.batch_size = batch_size\n self.batch_idx = 0\n\n def generate_pos_neg_label_crop_centers(\n self,\n spatial_size: Union[Sequence[int], int],\n num_samples: int,\n pos_ratio: float,\n label_spatial_shape: Sequence[int],\n fg_indices: np.ndarray,\n bg_indices: np.ndarray,\n rand_state: np.random.RandomState = np.random,\n ) -> List[List[np.ndarray]]:\n \"\"\"\n Generate valid sample locations based on the label with option for specifying foreground ratio\n Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W]\n\n Args:\n spatial_size: spatial size of the ROIs to be sampled.\n num_samples: total sample centers to be generated.\n pos_ratio: ratio of total locations generated that have center being foreground.\n label_spatial_shape: spatial shape of the original label data to unravel selected centers.\n fg_indices: pre-computed foreground indices in 1 dimension.\n bg_indices: pre-computed background indices in 1 dimension.\n rand_state: numpy randomState object to align with other modules.\n\n Raises:\n ValueError: When the proposed roi is larger than the image.\n ValueError: When the foreground and background indices lengths are 0.\n\n \"\"\"\n spatial_size = fall_back_tuple(spatial_size, default=label_spatial_shape)\n if not (np.subtract(label_spatial_shape, spatial_size) >= 0).all():\n raise ValueError(\"The proposed roi is larger than the image.\")\n\n # Select subregion to assure valid roi\n valid_start = np.floor_divide(spatial_size, 2)\n # add 1 for random\n valid_end = np.subtract(label_spatial_shape + np.array(1), spatial_size / np.array(2)).astype(np.uint16)\n # int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range\n # from being too high\n for i in range(len(valid_start)): # need this because np.random.randint does not work with same start and end\n if valid_start[i] == valid_end[i]:\n valid_end[i] += 1\n\n def _correct_centers(\n center_ori: List[np.ndarray], valid_start: np.ndarray, valid_end: np.ndarray\n ) -> List[np.ndarray]:\n for i, c in enumerate(center_ori):\n center_i = c\n if c < valid_start[i]:\n center_i = valid_start[i]\n if c >= valid_end[i]:\n center_i = valid_end[i] - 1\n center_ori[i] = center_i\n return center_ori\n\n centers = []\n fg_indices, bg_indices = np.asarray(fg_indices), np.asarray(bg_indices)\n if fg_indices.size == 0 and bg_indices.size == 0:\n raise ValueError(\"No sampling location available.\")\n\n if fg_indices.size == 0 or bg_indices.size == 0:\n warnings.warn(\n f\"N foreground {len(fg_indices)}, N background {len(bg_indices)}, \"\n \"unable to generate class balanced samples.\"\n )\n pos_ratio = 0 if fg_indices.size == 0 else 1\n\n for _ in range(num_samples):\n if self.probabilistic_pos_neg:\n indices_to_use = fg_indices if rand_state.rand() < pos_ratio else bg_indices\n else:\n indices_to_use = fg_indices if self.batch_idx < round(self.batch_size * pos_ratio) else bg_indices\n random_int = rand_state.randint(len(indices_to_use))\n center = np.unravel_index(indices_to_use[random_int], label_spatial_shape)\n # shift center to range of valid centers\n center_ori = list(center)\n centers.append(_correct_centers(center_ori, valid_start, valid_end))\n\n self.batch_idx += 1\n if self.batch_idx == self.batch_size:\n self.batch_idx = 0\n\n return centers\n\n def randomize(\n self,\n label: np.ndarray,\n fg_indices: Optional[np.ndarray] = None,\n bg_indices: Optional[np.ndarray] = None,\n image: Optional[np.ndarray] = None,\n ) -> None:\n self.spatial_size = fall_back_tuple(self.spatial_size, default=label.shape[1:])\n if fg_indices is None or bg_indices is None:\n fg_indices_, bg_indices_ = map_binary_to_indices(label, image, self.image_threshold)\n else:\n fg_indices_ = fg_indices\n bg_indices_ = bg_indices\n self.centers = self.generate_pos_neg_label_crop_centers(\n self.spatial_size, self.num_samples, self.pos_ratio, label.shape[1:], fg_indices_, bg_indices_, self.R\n )\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, np.ndarray]]:\n d = dict(data)\n label = d[self.label_key].copy()\n\n if self.labels:\n unique_labels = np.unique(label)\n for label_id in unique_labels:\n if label_id not in self.labels:\n label[label == label_id] = 0\n\n image = d[self.image_key] if self.image_key else None\n fg_indices = d.get(self.fg_indices_key) if self.fg_indices_key is not None else None\n bg_indices = d.get(self.bg_indices_key) if self.bg_indices_key is not None else None\n\n self.randomize(label, fg_indices, bg_indices, image)\n if not isinstance(self.spatial_size, tuple):\n raise AssertionError\n if self.centers is None:\n raise AssertionError\n results: List[Dict[Hashable, np.ndarray]] = [{} for _ in range(self.num_samples)]\n\n for i, center in enumerate(self.centers):\n for key in self.key_iterator(d):\n img = d[key]\n cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore\n results[i][key] = cropper(img)\n # fill in the extra keys with unmodified data\n for key in set(data.keys()).difference(set(self.keys)):\n results[i][key] = data[key]\n # add `patch_index` to the meta data\n for key in self.key_iterator(d):\n meta_data_key = f\"{key}_{self.meta_key_postfix}\"\n if meta_data_key not in results[i]:\n results[i][meta_data_key] = {} # type: ignore\n results[i][meta_data_key][Key.PATCH_INDEX] = i\n\n return results" ]
[ [ "numpy.unique", "numpy.asarray", "numpy.floor_divide", "numpy.subtract", "numpy.array", "numpy.unravel_index" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
patrikperssonmath/MDPN
[ "b7ffa649118c9b882c9e5050d7b784abefeb99ed" ]
[ "Converters/sfm_colmap_converter.py" ]
[ "# MIT License\n\n# Copyright (c) 2020 Patrik Persson and Linn Öström\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import division\nimport Converters.colmap_reader as cm\nimport numpy as np\nfrom glob import glob\nimport os\nimport imageio\nimport scipy.io\nimport csv\nimport uuid\nfrom skimage.io import imread, imsave\nfrom skimage.transform import resize\nimport numpy as np\nimport pickle\nfrom PIL import Image\nimport shutil\nimport argparse\nimport yaml\n\n# Own packages\nfrom Sfm.sfm_image import sfm_image\n\n\nclass sfm_colmap_converter:\n def __init__(self, config, input_folder, datasets):\n self.dataset_dir = config[\"sfm_dataset\"][\"root_dir\"]\n self.dataset_colmap_dir = input_folder\n self.dataset_name = datasets.split(\" \")\n self.shape = (config['dataset']['image_height'],\n config['dataset']['image_width'], 3)\n self.output_folder = os.path.join(self.dataset_dir, \"processed\")\n\n def create_camera_files(self, dataset):\n\n cameras, images, points3D = cm.read_model(os.path.join(\n self.dataset_colmap_dir, dataset, \"dense/sparse\"), \".bin\")\n\n image_path = os.path.join(\n self.dataset_colmap_dir, dataset, \"dense/images\")\n\n uuids = dict()\n\n sfm_images = []\n\n for key, image in images.items():\n uuid_val = dataset+\"_\"+image.name # str(uuid.uuid4())\n uuids.update({key: uuid_val})\n\n for key, image in images.items():\n name = os.path.join(image_path, image.name)\n\n camera = cameras[image.camera_id]\n\n params = camera.params\n\n K = np.array([[params[0], 0.0, params[2]],\n [0.0, params[1], params[3]],\n [0.0, 0.0, 1.0]], dtype=np.float32)\n\n R = cm.qvec2rotmat(image.qvec)\n\n t = np.expand_dims(image.tvec, axis=1)\n\n P = np.concatenate((R, t), axis=1)\n\n nbr_points = 0\n\n covisible = dict()\n\n uuid_val = uuids[key]\n\n for obj in image.point3D_ids:\n if obj > -1:\n point = points3D[obj]\n\n for im_id in point.image_ids:\n id = uuids[im_id]\n\n if id in covisible:\n covisible[id] += 1\n else:\n covisible.update({id: 1})\n\n nbr_points += 1\n\n covisible.update({uuid_val: nbr_points})\n\n covisible = [(k, v/nbr_points) for k, v in covisible.items()]\n\n covisible.sort(key=lambda x: x[1], reverse=True)\n\n U = np.zeros((3, nbr_points), dtype=np.float32)\n\n nbr_points = 0\n\n for obj in image.point3D_ids:\n if obj > -1:\n point = points3D[obj]\n\n U[:, nbr_points] = point.xyz\n\n nbr_points += 1\n\n sfm_images.append(\n sfm_image(name, covisible, P, K, uuid_val, U))\n\n return sfm_images\n\n def process(self):\n\n for dir_ in self.dataset_name:\n #path = os.path.join(self.dataset_dir, dir_)\n self.convert(self.create_camera_files(dir_), dir_)\n\n def convert(self, sfm_images, dir):\n\n dict_file = {}\n\n output_folder_dataset = os.path.join(self.output_folder, dir)\n\n if os.path.exists(output_folder_dataset):\n shutil.rmtree(output_folder_dataset)\n\n for i, im in enumerate(sfm_images):\n\n image = Image.open(im.getFileName())\n image = np.array(image)\n # image = imread(im.getFileName())\n\n dim = image.shape\n\n sy = self.shape[0]/dim[0]\n sx = self.shape[1]/dim[1]\n\n image_r = resize(image, self.shape)\n\n path = im.getFileName().split(\"/\")\n\n name = path[-1]\n\n im.rescale(sx, sy)\n\n im.setFileName(os.path.join(output_folder_dataset, \"image\", name))\n\n image_folder_path = os.path.join(output_folder_dataset, \"image\")\n\n if not os.path.exists(image_folder_path):\n\n os.makedirs(image_folder_path)\n\n imsave(im.getFileName(), (image_r*255.0).astype(np.uint8))\n\n im.image = None\n im.depth = None\n\n dict_file.update({im.getId(): im.__dict__})\n\n print(\"Done %i out of %i\" % (i+1, len(sfm_images)), end=\"\\r\")\n\n print(\"\")\n\n with open(os.path.join(output_folder_dataset, \"sfm_images.pickle\"), 'wb') as file:\n pickle.dump(dict_file, file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('config', type=str)\n parser.add_argument('--input_folder', \"--i\", default=\"/data/colmap\",\n type=str, required=False)\n parser.add_argument('--datasets', \"--d\", default=\"Kallerum\",\n type=str, required=False)\n\n args = parser.parse_args()\n\n with open(args.config, 'r') as stream:\n try:\n config = yaml.safe_load(stream)\n\n sfm_colmap_converter(config, args.input_folder,\n args.datasets).process()\n\n except yaml.YAMLError as exc:\n print(exc)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.expand_dims", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
plageon/TSP
[ "8f0680bfc341728921a285ae13b3063a72c85f55" ]
[ "Genetic Algorithm.py" ]
[ "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport random\n\npos_list = np.array([[530.0, 209.0], [1126.0, 1229.0], [1492.0, 901.0], [692.0, 1332.0],\n [1485.0, 163.0], [1960.0, 710.0], [1602.0, 922.0], [1692.0, 1134.0],\n [733.0, 1444.0], [570.0, 1280.0], [1243.0, 705.0], [664.0, 1318.0],\n [217.0, 156.0], [1319.0, 579.0], [933.0, 1323.0], [375.0, 93.0],\n [295.0, 1368.0], [1417.0, 1243.0], [1833.0, 933.0], [1980.0, 238.0],\n [1136.0, 1320.0], [123.0, 999.0], [1022.0, 496.0], [19.0, 492.0],\n [261.0, 1022.0], [846.0, 115.0], [216.0, 612.0], [570.0, 905.0],\n [1854.0, 516.0], [1041.0, 332.0], [17.0, 825.0], [1790.0, 1203.0],\n [1715.0, 307.0], [1357.0, 8.0], [348.0, 20.0], [371.0, 1176.0],\n [171.0, 1067.0], [1230.0, 1052.0], [368.0, 549.0], [23.0, 1329.0],\n [1823.0, 1343.0], [1178.0, 893.0], [590.0, 842.0], [339.0, 766.0],\n [1826.0, 1437.0], [1751.0, 849.0], [322.0, 1379.0], [1858.0, 232.0],\n [348.0, 1112.0], [1136.0, 898.0], [242.0, 1154.0], [1598.0, 680.0],\n [288.0, 530.0], [1808.0, 328.0], [1742.0, 249.0], [1935.0, 65.0],\n [503.0, 1199.0], [36.0, 452.0], [1400.0, 970.0], [128.0, 1388.0],\n [976.0, 230.0], [666.0, 1444.0], [1430.0, 1456.0], [1643.0, 425.0]])\n\ncity_num = pos_list.shape[0]\ncity_list = list()\ncolony_size = 300\nlast_generation = list()\nnext_generation = list()\n\nmax_iter = 481\n\niter_aver_list = np.zeros((max_iter))\niter_min_list = np.zeros((max_iter))\nbest_path_history = np.zeros((max_iter, city_num))\n\nswitch_possibility =1\nmutate_possibility = 0.5\n\n\nclass City():\n def __init__(self, x, y, id):\n self.x = x\n self.y = y\n self.phero_density = np.zeros((city_num))\n self.dis_count = np.zeros((city_num))\n\n\nclass Path():\n def __init__(self):\n self.id = id\n self.path = np.zeros((city_num))\n self.fitness = 0\n self.length = 0\n\n def gen_random_path(self):\n self.path = np.random.permutation(range(city_num))\n\n def cal_length(self):\n self.length = 0\n for i in range(city_num - 1):\n m, n = int(self.path[i]), int(self.path[i + 1])\n self.length += city_list[m].dis_count[n]\n m, n = int(self.path[city_num - 1]), int(self.path[0])\n self.length += city_list[m].dis_count[n]\n self.fitness = 1 / self.length\n return self.length\n\n\ndef cal_dis(c1: City, c2: City) -> float:\n dis = math.sqrt(math.pow(c1.x - c2.x, 2) + math.pow(c1.y - c2.y, 2))\n return dis\n\n\ndef crossover(p1: Path, p2: Path):\n start = random.randint(0, city_num)\n finish = random.randint(0, city_num)\n if finish < start:\n start, finish = finish, start\n new_path1 = np.zeros((city_num))\n new_path2 = np.zeros((city_num))\n swap1 = p1.path[start:finish]\n swap2 = p2.path[start:finish]\n new_path1[start:finish] = swap2\n new_path2[start:finish] = swap1\n j = 0\n for i in p1.path:\n if i not in swap2:\n if j == start:\n j = finish\n new_path1[j] = i\n j += 1\n j = 0\n for i in p2.path:\n if i not in swap1:\n if j == start:\n j = finish\n new_path2[j] = i\n j += 1\n c1 = Path()\n c2 = Path()\n c1.path = new_path1\n c2.path = new_path2\n return c1, c2\n\n\ndef mutate(p: Path):\n m = random.randint(0, city_num - 1)\n n = random.randint(0, city_num - 1)\n new_path = p.path\n new_path[m], new_path[n] = new_path[n], new_path[m]\n c = Path()\n c.path = new_path\n return c\n\n\nif __name__ == \"__main__\":\n best_iter = 0\n global_min_lenth = 30000\n # initialize cities\n\n for i in range(city_num):\n new_city = City(pos_list[i][0], pos_list[i][1], i)\n dis_map = np.zeros(city_num)\n city_list.append(new_city)\n for i in range(city_num):\n for j in range(city_num):\n city_list[i].dis_count[j] = cal_dis(city_list[i], city_list[j])\n city_list[j].dis_count[i] = cal_dis(city_list[i], city_list[j])\n\n # initialize the colony\n for i in range(colony_size):\n new_path = Path()\n new_path.gen_random_path()\n last_generation.append(new_path)\n\n for iter in range(max_iter):\n iter_len = np.zeros((colony_size))\n iter_best_path = np.zeros((city_num))\n\n next_generation.clear()\n\n for i in range(int(colony_size / 2 - 1)):\n if random.random() < switch_possibility:\n c1, c2 = crossover(last_generation[i], last_generation[-i])\n next_generation.append(c1)\n next_generation.append(c2)\n\n for i in range(colony_size):\n next_generation.append(last_generation[i])\n if random.random() < mutate_possibility:\n c = mutate(last_generation[i])\n next_generation.append(c)\n # choose\n iter_min_len = last_generation[0].cal_length()\n gen_size = len(next_generation)\n lenth_list = np.zeros((gen_size))\n for i in range(gen_size):\n length = next_generation[i].cal_length()\n lenth_list[i] = length\n if length < iter_min_len:\n iter_best_path = next_generation[i].path\n iter_min_len = length\n lenth_list.sort()\n iter_len = lenth_list[0:colony_size]\n threshold = lenth_list[colony_size - 1]\n last_generation.clear()\n for p in next_generation:\n if p.length <= threshold:\n last_generation.append(p)\n last_generation = last_generation[0:colony_size]\n\n iter_aver_len = np.average(iter_len)\n iter_aver_list[iter] = iter_aver_len\n iter_min_list[iter] = iter_min_len\n best_path_history[iter] = iter_best_path\n if iter_min_len < global_min_lenth:\n global_min_lenth = iter_min_len\n best_iter = iter\n print(\"iteration: \", iter, \"aver:\", iter_aver_len, \"min:\", iter_min_len)\n\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(12, 10))\n axes[0].plot(iter_aver_list, 'k', marker='*')\n axes[0].set_title('Average Length')\n axes[0].set_xlabel(u'iteration')\n\n axes[1].plot(iter_min_list, 'k', marker='<')\n axes[1].set_title('Best Length')\n axes[1].set_xlabel(u'iteration')\n fig.savefig('Average_Best.png', dpi=500, bbox_inches='tight')\n plt.close()\n plt.show()\n\n k = 0\n while k < max_iter:\n finalpath = best_path_history[k]\n plt.plot(pos_list[:, 0], pos_list[:, 1], 'r.', marker='>')\n plt.xlim([-100, 2000])\n plt.ylim([-100, 1500])\n for i in range(city_num - 1):\n m, n = int(finalpath[i]), int(finalpath[i + 1])\n plt.plot([pos_list[m][0], pos_list[n][0]], [pos_list[m][1], pos_list[n][1]], \"k\")\n m, n = int(finalpath[city_num - 1]), int(finalpath[0])\n plt.plot([pos_list[m][0], pos_list[n][0]], [pos_list[m][1], pos_list[n][1]], \"k\")\n ax = plt.gca()\n ax.set_title(\"Best Path\")\n ax.set_xlabel('X_axis')\n ax.set_ylabel('Y_axis')\n\n plt.savefig(str(k) + 'path.png', dpi=500, bbox_inches='tight')\n plt.close()\n plt.show()\n if k <= 40:\n k += 20\n elif k<300:\n k+=60\n else:\n k += 180\n\n finalpath = best_path_history[best_iter]\n print(\"Best iter:\", best_iter, \"minimum length:\", global_min_lenth)\n print(\"best path:\", finalpath)\n plt.plot(pos_list[:, 0], pos_list[:, 1], 'r.', marker='>')\n plt.xlim([-100, 2100])\n plt.ylim([-100, 1600])\n for i in range(city_num - 1):\n m, n = int(finalpath[i]), int(finalpath[i + 1])\n plt.plot([pos_list[m][0], pos_list[n][0]], [pos_list[m][1], pos_list[n][1]], \"k\")\n m, n = int(finalpath[city_num - 1]), int(finalpath[0])\n plt.plot([pos_list[m][0], pos_list[n][0]], [pos_list[m][1], pos_list[n][1]], \"k\")\n ax = plt.gca()\n ax.set_title(\"Best Path \" + \" iter: \" + str(best_iter) + \" minlen: \" + str(global_min_lenth))\n ax.set_xlabel('X_axis')\n ax.set_ylabel('Y_axis')\n\n plt.savefig('Bestpath.png', dpi=500, bbox_inches='tight')\n plt.close()\n plt.show()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.close", "numpy.average", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lenas95/pyunicorn
[ "cf02dabd14108cebbeaa36112e644f95b00630f2" ]
[ "tests/test_climate/test_tsonis.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# This file is part of pyunicorn.\n# Copyright (C) 2008--2019 Jonathan F. Donges and pyunicorn authors\n# URL: <http://www.pik-potsdam.de/members/donges/software>\n# License: BSD (3-clause)\n#\n# Please acknowledge and cite the use of this software and its authors\n# when results are used in publications or published elsewhere.\n#\n# You can use the following reference:\n# J.F. Donges, J. Heitzig, B. Beronov, M. Wiedermann, J. Runge, Q.-Y. Feng,\n# L. Tupikina, V. Stolbova, R.V. Donner, N. Marwan, H.A. Dijkstra,\n# and J. Kurths, \"Unified functional network and nonlinear time series analysis\n# for complex systems science: The pyunicorn package\"\n\"\"\"\nTests for the TsonisClimateNetwork class.\n\"\"\"\nimport numpy as np\n\nfrom pyunicorn.climate.climate_data import ClimateData\nfrom pyunicorn.climate.tsonis import TsonisClimateNetwork\n\ndef test_str(capsys):\n print(TsonisClimateNetwork.SmallTestNetwork())\n out, err = capsys.readouterr()\n out_ref = \"TsonisClimateNetwork:\\n\" + \\\n \"ClimateNetwork:\\n\" + \\\n \"GeoNetwork:\\n\" + \\\n \"SpatialNetwork:\\n\" + \\\n \"Network: undirected, 6 nodes, 6 links, link density 0.400.\" + \\\n \"\\nGeographical boundaries:\\n\" + \\\n \" time lat lon\\n\" + \\\n \" min 0.0 0.00 2.50\\n\" + \\\n \" max 9.0 25.00 15.00\\n\" + \\\n \"Threshold: 0.5\\n\" + \\\n \"Local connections filtered out: False\\n\" + \\\n \"Use only data points from winter months: False\\n\"\n assert out == out_ref\n\ndef test_SmallTestNetwork():\n res = TsonisClimateNetwork.SmallTestNetwork().adjacency\n exp = np.array([[0, 0, 1, 0, 1, 0], [0, 0, 0, 1, 0, 1],\n [1, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1],\n [1, 0, 1, 0, 0, 0], [0, 1, 0, 1, 0, 0]])\n assert np.allclose(res, exp, atol=1e-04)\n\ndef test_calculate_similarity_measure():\n res = TsonisClimateNetwork.SmallTestNetwork().calculate_similarity_measure(\n anomaly=ClimateData.SmallTestData().anomaly())\n exp = np.array([[1., -0.2538, -1., 0.2538, 1., -0.2538],\n [-0.2538, 1., 0.2538, -1., -0.2538, 1.],\n [-1., 0.2538, 1., -0.2538, -1., 0.2538],\n [0.2538, -1., -0.2538, 1., 0.2538, -1.],\n [1., -0.2538, -1., 0.2538, 1., -0.2538],\n [-0.2538, 1., 0.2538, -1., -0.2538, 1.]],\n dtype=np.float32)\n assert np.allclose(res, exp, atol=1e-04)\n\ndef test_correlation():\n res = TsonisClimateNetwork.SmallTestNetwork().correlation()\n exp = np.array([[1., 0.25377226, 1., 0.25377226, 1., 0.25377226],\n [0.25377226, 1., 0.25377226, 1., 0.25377226, 1.],\n [1., 0.25377226, 1., 0.25377226, 1., 0.25377226],\n [0.25377226, 1., 0.25377226, 1., 0.25377226, 1.],\n [1., 0.25377226, 1., 0.25377226, 1., 0.25377226],\n [0.25377226, 1., 0.25377226, 1., 0.25377226, 1.]],\n dtype=np.float32)\n assert np.allclose(res, exp, atol=1e-04)\n\ndef test_winter_only():\n res = TsonisClimateNetwork.SmallTestNetwork().winter_only()\n exp = False\n assert res == exp\n\ndef test_set_winter_only():\n net = TsonisClimateNetwork.SmallTestNetwork()\n net.set_winter_only(winter_only=False)\n\n res = net.n_links\n exp = 6\n assert res == exp\n\ndef test_correlation_weighted_average_path_length():\n res = TsonisClimateNetwork.SmallTestNetwork().\\\n correlation_weighted_average_path_length()\n exp = 1.0\n assert np.isclose(res, exp, atol=1e-04)\n\ndef test_correlation_weighted_closeness():\n res = TsonisClimateNetwork.SmallTestNetwork().\\\n correlation_weighted_closeness()\n exp = np.array([0.25, 0.25, 0.25, 0.25, 0.25, 0.25])\n assert np.allclose(res, exp, atol=1e-04)\n\ndef test_local_correlation_weighted_vulnerability():\n res = TsonisClimateNetwork.SmallTestNetwork().\\\n local_correlation_weighted_vulnerability()\n exp = np.array([0., 0., 0., 0., 0., 0.])\n assert np.allclose(res, exp, atol=1e-04)\n" ]
[ [ "numpy.array", "numpy.allclose", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
basiralab/MONAI
[ "5b36d0227c1326701b86dbdfc0f209845240b96f" ]
[ "monai/metrics/utils.py" ]
[ "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.transforms.croppad.array import SpatialCrop\nfrom monai.transforms.utils import generate_spatial_bounding_box\nfrom monai.utils import MetricReduction, look_up_option, optional_import\n\nbinary_erosion, _ = optional_import(\"scipy.ndimage.morphology\", name=\"binary_erosion\")\ndistance_transform_edt, _ = optional_import(\"scipy.ndimage.morphology\", name=\"distance_transform_edt\")\ndistance_transform_cdt, _ = optional_import(\"scipy.ndimage.morphology\", name=\"distance_transform_cdt\")\n\n__all__ = [\"ignore_background\", \"do_metric_reduction\", \"get_mask_edges\", \"get_surface_distance\"]\n\n\ndef ignore_background(y_pred: Union[np.ndarray, torch.Tensor], y: Union[np.ndarray, torch.Tensor]):\n \"\"\"\n This function is used to remove background (the first channel) for `y_pred` and `y`.\n Args:\n y_pred: predictions. As for classification tasks,\n `y_pred` should has the shape [BN] where N is larger than 1. As for segmentation tasks,\n the shape should be [BNHW] or [BNHWD].\n y: ground truth, the first dim is batch.\n\n \"\"\"\n y = y[:, 1:] if y.shape[1] > 1 else y\n y_pred = y_pred[:, 1:] if y_pred.shape[1] > 1 else y_pred\n return y_pred, y\n\n\ndef do_metric_reduction(f: torch.Tensor, reduction: Union[MetricReduction, str] = MetricReduction.MEAN):\n \"\"\"\n This function is to do the metric reduction for calculated `not-nan` metrics of each sample's each class.\n The function also returns `not_nans`, which counts the number of not nans for the metric.\n\n Args:\n f: a tensor that contains the calculated metric scores per batch and\n per class. The first two dims should be batch and class.\n reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,\n available reduction modes: {``\"none\"``, ``\"mean\"``, ``\"sum\"``, ``\"mean_batch\"``, ``\"sum_batch\"``,\n ``\"mean_channel\"``, ``\"sum_channel\"``}, default to ``\"mean\"``.\n if \"none\", return the input f tensor and not_nans.\n Define the mode to reduce computation result of 1 batch data. Defaults to ``\"mean\"``.\n\n Raises:\n ValueError: When ``reduction`` is not one of\n [\"mean\", \"sum\", \"mean_batch\", \"sum_batch\", \"mean_channel\", \"sum_channel\" \"none\"].\n \"\"\"\n\n # some elements might be Nan (if ground truth y was missing (zeros))\n # we need to account for it\n nans = torch.isnan(f)\n not_nans = (~nans).float()\n\n t_zero = torch.zeros(1, device=f.device, dtype=f.dtype)\n reduction = look_up_option(reduction, MetricReduction)\n if reduction == MetricReduction.NONE:\n return f, not_nans\n\n f[nans] = 0\n if reduction == MetricReduction.MEAN:\n # 2 steps, first, mean by channel (accounting for nans), then by batch\n not_nans = not_nans.sum(dim=1)\n f = torch.where(not_nans > 0, f.sum(dim=1) / not_nans, t_zero) # channel average\n\n not_nans = (not_nans > 0).float().sum(dim=0)\n f = torch.where(not_nans > 0, f.sum(dim=0) / not_nans, t_zero) # batch average\n\n elif reduction == MetricReduction.SUM:\n not_nans = not_nans.sum(dim=[0, 1])\n f = torch.sum(f, dim=[0, 1]) # sum over the batch and channel dims\n elif reduction == MetricReduction.MEAN_BATCH:\n not_nans = not_nans.sum(dim=0)\n f = torch.where(not_nans > 0, f.sum(dim=0) / not_nans, t_zero) # batch average\n elif reduction == MetricReduction.SUM_BATCH:\n not_nans = not_nans.sum(dim=0)\n f = f.sum(dim=0) # the batch sum\n elif reduction == MetricReduction.MEAN_CHANNEL:\n not_nans = not_nans.sum(dim=1)\n f = torch.where(not_nans > 0, f.sum(dim=1) / not_nans, t_zero) # channel average\n elif reduction == MetricReduction.SUM_CHANNEL:\n not_nans = not_nans.sum(dim=1)\n f = f.sum(dim=1) # the channel sum\n elif reduction != MetricReduction.NONE:\n raise ValueError(\n f\"Unsupported reduction: {reduction}, available options are \"\n '[\"mean\", \"sum\", \"mean_batch\", \"sum_batch\", \"mean_channel\", \"sum_channel\" \"none\"].'\n )\n return f, not_nans\n\n\ndef get_mask_edges(\n seg_pred: Union[np.ndarray, torch.Tensor],\n seg_gt: Union[np.ndarray, torch.Tensor],\n label_idx: int = 1,\n crop: bool = True,\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Do binary erosion and use XOR for input to get the edges. This\n function is helpful to further calculate metrics such as Average Surface\n Distance and Hausdorff Distance.\n The input images can be binary or labelfield images. If labelfield images\n are supplied, they are converted to binary images using `label_idx`.\n\n `scipy`'s binary erosion is used to to calculate the edges of the binary\n labelfield.\n\n In order to improve the computing efficiency, before getting the edges,\n the images can be cropped and only keep the foreground if not specifies\n ``crop = False``.\n\n We require that images are the same size, and assume that they occupy the\n same space (spacing, orientation, etc.).\n\n Args:\n seg_pred: the predicted binary or labelfield image.\n seg_gt: the actual binary or labelfield image.\n label_idx: for labelfield images, convert to binary with\n `seg_pred = seg_pred == label_idx`.\n crop: crop input images and only keep the foregrounds. In order to\n maintain two inputs' shapes, here the bounding box is achieved\n by ``(seg_pred | seg_gt)`` which represents the union set of two\n images. Defaults to ``True``.\n \"\"\"\n\n # Get both labelfields as np arrays\n if isinstance(seg_pred, torch.Tensor):\n seg_pred = seg_pred.detach().cpu().numpy()\n if isinstance(seg_gt, torch.Tensor):\n seg_gt = seg_gt.detach().cpu().numpy()\n\n if seg_pred.shape != seg_gt.shape:\n raise ValueError(\"seg_pred and seg_gt should have same shapes.\")\n\n # If not binary images, convert them\n if seg_pred.dtype != bool:\n seg_pred = seg_pred == label_idx\n if seg_gt.dtype != bool:\n seg_gt = seg_gt == label_idx\n\n if crop:\n if not np.any(seg_pred | seg_gt):\n return np.zeros_like(seg_pred), np.zeros_like(seg_gt)\n\n seg_pred, seg_gt = np.expand_dims(seg_pred, 0), np.expand_dims(seg_gt, 0)\n box_start, box_end = generate_spatial_bounding_box(np.asarray(seg_pred | seg_gt))\n cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)\n seg_pred, seg_gt = np.squeeze(cropper(seg_pred)), np.squeeze(cropper(seg_gt))\n\n # Do binary erosion and use XOR to get edges\n edges_pred = binary_erosion(seg_pred) ^ seg_pred\n edges_gt = binary_erosion(seg_gt) ^ seg_gt\n\n return edges_pred, edges_gt\n\n\ndef get_surface_distance(seg_pred: np.ndarray, seg_gt: np.ndarray, distance_metric: str = \"euclidean\") -> np.ndarray:\n \"\"\"\n This function is used to compute the surface distances from `seg_pred` to `seg_gt`.\n\n Args:\n seg_pred: the edge of the predictions.\n seg_gt: the edge of the ground truth.\n distance_metric: : [``\"euclidean\"``, ``\"chessboard\"``, ``\"taxicab\"``]\n the metric used to compute surface distance. Defaults to ``\"euclidean\"``.\n\n - ``\"euclidean\"``, uses Exact Euclidean distance transform.\n - ``\"chessboard\"``, uses `chessboard` metric in chamfer type of transform.\n - ``\"taxicab\"``, uses `taxicab` metric in chamfer type of transform.\n\n Note:\n If seg_pred or seg_gt is all 0, may result in nan/inf distance.\n\n \"\"\"\n\n if not np.any(seg_gt):\n dis = np.inf * np.ones_like(seg_gt)\n else:\n if not np.any(seg_pred):\n dis = np.inf * np.ones_like(seg_gt)\n return np.asarray(dis[seg_gt])\n if distance_metric == \"euclidean\":\n dis = distance_transform_edt(~seg_gt)\n elif distance_metric in {\"chessboard\", \"taxicab\"}:\n dis = distance_transform_cdt(~seg_gt, metric=distance_metric)\n else:\n raise ValueError(f\"distance_metric {distance_metric} is not implemented.\")\n\n return np.asarray(dis[seg_pred])\n" ]
[ [ "numpy.expand_dims", "numpy.ones_like", "torch.isnan", "torch.zeros", "numpy.asarray", "torch.sum", "numpy.zeros_like", "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saadabbasi/LatEstimator
[ "f0397f54d73226511bd2925c9a9e83e6cfff98d6" ]
[ "hw_nas_bench_api/nas_201_models/shape_searchs/generic_size_tiny_cell_model.py" ]
[ "#####################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #\n#####################################################\nfrom typing import List, Text, Any\nimport random, torch\nimport torch.nn as nn\n\nfrom models.cell_operations import ResNetBasicblock\nfrom models.cell_infers.cells import InferCell\nfrom models.shape_searchs.SoftSelect import select2withP, ChannelWiseInter\n\n\nclass GenericNAS301Model(nn.Module):\n\n def __init__(self, candidate_Cs: List[int], max_num_Cs: int, genotype: Any, num_classes: int, affine: bool, track_running_stats: bool):\n super(GenericNAS301Model, self).__init__()\n self._max_num_Cs = max_num_Cs\n self._candidate_Cs = candidate_Cs\n if max_num_Cs % 3 != 2:\n raise ValueError('invalid number of layers : {:}'.format(max_num_Cs))\n self._num_stage = N = max_num_Cs // 3\n self._max_C = max(candidate_Cs)\n\n stem = nn.Sequential(\n nn.Conv2d(3, self._max_C, kernel_size=3, padding=1, bias=not affine),\n nn.BatchNorm2d(self._max_C, affine=affine, track_running_stats=track_running_stats))\n\n layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N\n\n c_prev = self._max_C\n self._cells = nn.ModuleList()\n self._cells.append(stem)\n for index, reduction in enumerate(layer_reductions):\n if reduction : cell = ResNetBasicblock(c_prev, self._max_C, 2, True)\n else : cell = InferCell(genotype, c_prev, self._max_C, 1, affine, track_running_stats)\n self._cells.append(cell)\n c_prev = cell.out_dim\n self._num_layer = len(self._cells)\n\n self.lastact = nn.Sequential(nn.BatchNorm2d(c_prev, affine=affine, track_running_stats=track_running_stats), nn.ReLU(inplace=True))\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(c_prev, num_classes)\n # algorithm related\n self.register_buffer('_tau', torch.zeros(1))\n self._algo = None\n\n def set_algo(self, algo: Text):\n # used for searching\n assert self._algo is None, 'This functioin can only be called once.'\n assert algo in ['fbv2', 'tunas', 'tas'], 'invalid algo : {:}'.format(algo)\n self._algo = algo\n self._arch_parameters = nn.Parameter(1e-3*torch.randn(self._max_num_Cs, len(self._candidate_Cs)))\n if algo == 'fbv2' or algo == 'tunas':\n self.register_buffer('_masks', torch.zeros(len(self._candidate_Cs), max(self._candidate_Cs)))\n for i in range(len(self._candidate_Cs)):\n self._masks.data[i, :self._candidate_Cs[i]] = 1\n \n @property\n def tau(self):\n return self._tau\n\n def set_tau(self, tau):\n self._tau.data[:] = tau\n\n @property\n def weights(self):\n xlist = list(self._cells.parameters())\n xlist+= list(self.lastact.parameters())\n xlist+= list(self.global_pooling.parameters())\n xlist+= list(self.classifier.parameters())\n return xlist\n\n @property\n def alphas(self):\n return [self._arch_parameters]\n\n def show_alphas(self):\n with torch.no_grad():\n return 'arch-parameters :\\n{:}'.format(nn.functional.softmax(self._arch_parameters, dim=-1).cpu())\n\n @property\n def random(self):\n cs = []\n for i in range(self._max_num_Cs):\n index = random.randint(0, len(self._candidate_Cs)-1)\n cs.append(str(self._candidate_Cs[index]))\n return ':'.join(cs)\n \n @property\n def genotype(self):\n cs = []\n for i in range(self._max_num_Cs):\n with torch.no_grad():\n index = self._arch_parameters[i].argmax().item()\n cs.append(str(self._candidate_Cs[index]))\n return ':'.join(cs)\n\n def get_message(self) -> Text:\n string = self.extra_repr()\n for i, cell in enumerate(self._cells):\n string += '\\n {:02d}/{:02d} :: {:}'.format(i, len(self._cells), cell.extra_repr())\n return string\n\n def extra_repr(self):\n return ('{name}(candidates={_candidate_Cs}, num={_max_num_Cs}, N={_num_stage}, L={_num_layer})'.format(name=self.__class__.__name__, **self.__dict__))\n\n def forward(self, inputs):\n feature = inputs\n\n log_probs = []\n for i, cell in enumerate(self._cells):\n feature = cell(feature)\n # apply different searching algorithms\n idx = max(0, i-1)\n if self._algo == 'fbv2':\n weights = nn.functional.gumbel_softmax(self._arch_parameters[idx:idx+1], tau=self.tau, dim=-1)\n mask = torch.matmul(weights, self._masks).view(1, -1, 1, 1)\n feature = feature * mask\n elif self._algo == 'tas':\n selected_cs, selected_probs = select2withP(self._arch_parameters[idx:idx+1], self.tau, num=2)\n with torch.no_grad():\n i1, i2 = selected_cs.cpu().view(-1).tolist()\n c1, c2 = self._candidate_Cs[i1], self._candidate_Cs[i2]\n out_channel = max(c1, c2)\n out1 = ChannelWiseInter(feature[:, :c1], out_channel)\n out2 = ChannelWiseInter(feature[:, :c2], out_channel)\n out = out1 * selected_probs[0, 0] + out2 * selected_probs[0, 1]\n if feature.shape[1] == out.shape[1]:\n feature = out\n else:\n miss = torch.zeros(feature.shape[0], feature.shape[1]-out.shape[1], feature.shape[2], feature.shape[3], device=feature.device)\n feature = torch.cat((out, miss), dim=1)\n elif self._algo == 'tunas':\n prob = nn.functional.softmax(self._arch_parameters[idx:idx+1], dim=-1)\n dist = torch.distributions.Categorical(prob)\n action = dist.sample()\n log_probs.append(dist.log_prob(action))\n mask = self._masks[action.item()].view(1, -1, 1, 1)\n feature = feature * mask\n else:\n raise ValueError('invalid algorithm : {:}'.format(self._algo))\n\n out = self.lastact(feature)\n out = self.global_pooling(out)\n out = out.view(out.size(0), -1)\n logits = self.classifier(out)\n\n return out, logits, log_probs\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.functional.gumbel_softmax", "torch.zeros", "torch.cat", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.matmul", "torch.distributions.Categorical", "torch.nn.AdaptiveAvgPool2d", "torch.no_grad", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
an2018x/trafficAI
[ "8dcb32bc23b665dc3d624879f1b08916ac528a5e" ]
[ "lightTest.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets,models,transforms\nimport torchvision\nimport os\nimport cv2\nfrom PIL import Image\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net,self).__init__()\n # 1 input image channel 6 output channels 5*5 square convolution\n #kernal\n # 60*32*3->56*28*6\n self.conv1=nn.Conv2d(3,6,5)\n \n self.conv2=nn.Conv2d(6,16,5)\n # an affine operation y=wx+b\n self.fc1=nn.Linear(12*5*16,120)\n self.fc2=nn.Linear(120,84)\n self.fc3=nn.Linear(84,3)\n \n def forward(self,x):\n # Max pooling over a (2,2) window\n # 56*28*6->28*14*6\n x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))\n # 24*10*16->12*5*16\n x=F.max_pool2d(F.relu(self.conv2(x)),2)\n x=x.view(-1,16*5*12)\n x=F.relu(self.fc1(x))\n x=F.relu(self.fc2(x))\n x=self.fc3(x)\n x=F.log_softmax(x,dim=1)\n return x\n \n def num_flat_features(self,x):\n size=x.size()[1:] # all dimensions except the batch dimension\n num_features=1\n for s in size:\n num_features*=s\n return num_features\n\n\nimage_size=(60,32)\n\nprocess_transform=transforms.Compose([transforms.Resize(image_size),\ntransforms.ToTensor(),\ntransforms.Normalize([0.5,0.5,0.5], [0.5, 0.5, 0.5])])\nclasses=[\n \"red\",\"green\",\"yellow\"\n]\n\ndef detectLight(imgPath):\n net=Net()\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n net.to(device)\n net.eval()\n net.load_state_dict(torch.load(\"lightWeight.pth\"))\n img=Image.open(imgPath)\n img_tensor=process_transform(img)\n img_tensor.unsqueeze_(0)\n img_tensor=img_tensor.to(device)\n out=net(img_tensor)\n _,predicted=torch.max(out,1)\n #print(classes[predicted[0]])\n \ndef detectImg(img):\n img=Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))\n #img=Image.fromarray(img)\n net=Net()\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n net.to(device)\n net.eval()\n net.load_state_dict(torch.load(\"lightWeight.pth\"))\n img_tensor=process_transform(img)\n img_tensor.unsqueeze_(0)\n img_tensor=img_tensor.to(device)\n out=net(img_tensor)\n _,predicted=torch.max(out,1)\n #print(classes[predicted[0]])\n return classes[predicted[0]]\n\ndef detectImg2(img):\n img=Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))\n net=Net()\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n net.to(device)\n net.eval()\n net.load_state_dict(torch.load(\"lightWeight_res.pth\"))\n img_tensor=process_transform(img)\n img_tensor.unsqueeze_(0)\n img_tensor=img_tensor.to(device)\n out=net(img_tensor)\n _,predicted=torch.max(out,1)\n #print(classes[predicted[0]])\n return classes[predicted[0]]\n\n\n\n# detectLight(\"test03.jpg\")\n\n# img=cv2.imread(\"test01.jpg\")\n# detectImg(img)\n\n" ]
[ [ "torch.max", "torch.nn.functional.log_softmax", "torch.load", "torch.nn.Conv2d", "torch.nn.Linear", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mahdinobar/V2V-PoseNet-pytorch
[ "90045b61c45f18dc20b410e2de14bd22be55fe0e" ]
[ "integral-pose/show_acc.py" ]
[ "import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom accuracy import *\nfrom plot import *\n\n\ngt_file = r'./test_s3_gt.txt'\npred_file = r'./test_res.txt'\n\n\ngt = np.loadtxt(gt_file)\ngt = gt.reshape(gt.shape[0], -1, 3)\n\npred = np.loadtxt(pred_file)\npred = pred.reshape(pred.shape[0], -1, 3)\n\nprint('gt: ', gt.shape)\nprint('pred: ', pred.shape)\n\n\nkeypoints_num = 21\nnames = ['joint'+str(i+1) for i in range(keypoints_num)]\n\n\ndist, acc = compute_dist_acc_wrapper(pred, gt, max_dist=100, num=100)\n\nfig, ax = plt.subplots()\nplot_acc(ax, dist, acc, names)\nfig.savefig('msra_s3_joint_acc.png')\nplt.show()\n\n\nmean_err = compute_mean_err(pred, gt)\nfig, ax = plt.subplots()\nplot_mean_err(ax, mean_err, names)\nfig.savefig('msra_s3_joint_acc.png')\nplt.show()\n\n\nprint('mean_err: {}'.format(mean_err))\nmean_err_all = compute_mean_err(pred.reshape((-1, 1, 3)), gt.reshape((-1, 1,3)))\nprint('mean_err_all: ', mean_err_all)\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cooparation/detection_pytorch
[ "fea3b5a04a13eba060406845a65ac07f177b6112" ]
[ "ssd/modeling/resnet_ssd.py" ]
[ "import torch.nn as nn\nfrom ssd.modeling.ssd import SSD\n\n\n# borrowed from https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py\ndef add_vgg(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers\n\n\ndef add_extras(cfg, i, size=300):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n if size == 512:\n layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))\n layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))\n return layers\n\n\ndef add_header(vgg, extra_layers, boxes_per_location, num_classes):\n regression_headers = []\n classification_headers = []\n vgg_source = [21, -2]\n for k, v in enumerate(vgg_source):\n regression_headers += [nn.Conv2d(vgg[v].out_channels,\n boxes_per_location[k] * 4, kernel_size=3, padding=1)]\n classification_headers += [nn.Conv2d(vgg[v].out_channels,\n boxes_per_location[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n regression_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]\n * 4, kernel_size=3, padding=1)]\n classification_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]\n * num_classes, kernel_size=3, padding=1)]\n return regression_headers, classification_headers\n\n\ndef build_resnet_ssd_model(cfg, is_test=False):\n num_classes = cfg.MODEL.NUM_CLASSES\n size = cfg.INPUT.IMAGE_SIZE\n vgg_base = {\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n '512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n }\n extras_base = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],\n }\n\n boxes_per_location = cfg.MODEL.PRIORS.BOXES_PER_LOCATION\n\n vgg_config = vgg_base[str(size)]\n extras_config = extras_base[str(size)]\n\n vgg = nn.ModuleList(add_vgg(vgg_config))\n extras = nn.ModuleList(add_extras(extras_config, i=1024, size=size))\n\n regression_headers, classification_headers = add_header(vgg, extras, boxes_per_location, num_classes=num_classes)\n regression_headers = nn.ModuleList(regression_headers)\n classification_headers = nn.ModuleList(classification_headers)\n\n return SSD(cfg=cfg,\n vgg=vgg,\n extras=extras,\n classification_headers=classification_headers,\n regression_headers=regression_headers,\n is_test=is_test)\n" ]
[ [ "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
theodoho/udkm1Dsim
[ "1d2c10bc052f0b6ebb8f65c45cf8ce3f32dddddb" ]
[ "udkm1Dsim/helpers.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# The MIT License (MIT)\n# Copyright (c) 2020 Daniel Schick\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n\n__all__ = ['make_hash_md5', 'make_hashable', 'm_power_x',\n 'm_times_n', 'finderb', 'multi_gauss']\n\n__docformat__ = 'restructuredtext'\n\nimport hashlib\nimport numpy as np\n\n\ndef make_hash_md5(obj):\n \"\"\"make_hash_md5\n\n\n Args:\n obj (any): anything that can be hashed.\n\n Returns:\n hash (str): hash from object.\n\n \"\"\"\n hasher = hashlib.md5()\n hasher.update(repr(make_hashable(obj)).encode())\n return hasher.hexdigest()\n\n\ndef make_hashable(obj):\n \"\"\"make_hashable\n\n Recursive calls to elements of tuples, lists, dicts, set, and frozensets.\n\n Args:\n obj (any): anything that can be hashed..\n\n Returns:\n obj (tuple): hashable object.\n\n \"\"\"\n if isinstance(obj, (tuple, list)):\n return tuple((make_hashable(e) for e in obj))\n\n if isinstance(obj, dict):\n return tuple(sorted((k, make_hashable(v)) for k, v in obj.items()))\n\n if isinstance(obj, (set, frozenset)):\n return tuple(sorted(make_hashable(e) for e in obj))\n\n return obj\n\n\ndef m_power_x(m, x):\n \"\"\"m_power_x\n\n Apply ``numpy.linalg.matrix_power`` to last 2 dimensions of 4-dimensional\n input matrix.\n\n Args:\n m (ndarray[float, complex]): 4-dimensional input matrix.\n x (float): exponent.\n\n Returns:\n m (ndarray[float, complex]): resulting matrix.\n\n \"\"\"\n if x > 1:\n for i in range(np.size(m, 0)):\n for j in range(np.size(m, 1)):\n m[i, j, :, :] = np.linalg.matrix_power(m[i, j, :, :], x)\n return m\n\n\ndef m_times_n(m, n):\n \"\"\"m_times_n\n\n Matrix multiplication of last 2 dimensions for two 4-dimensional input\n matrices.\n\n Args:\n m (ndarray[float, complex]): 4-dimensional input matrix.\n n (ndarray[float, complex]): 4-dimensional input matrix.\n\n Returns:\n res (ndarray[float, complex]): 4-dimensional multiplication result.\n\n \"\"\"\n return np.einsum(\"lmij,lmjk->lmik\", m, n)\n\n\ndef finderb(key, array):\n \"\"\"finderb\n\n Binary search algorithm for sorted array. Searches for the first index\n ``i`` of array where ``key`` >= ``array[i]``. ``key`` can be a scalar or\n a np.ndarray of keys. ``array`` must be a sorted np.ndarray.\n\n Author: André Bojahr.\n Licence: BSD.\n\n Args:\n key (float, ndarray[float]): single or multiple sorted keys.\n array (ndarray[float]): sorted array.\n\n Returns:\n i (ndarray[float]): position indices for each key in the array.\n\n \"\"\"\n key = np.array(key, ndmin=1)\n n = len(key)\n i = np.zeros([n], dtype=int)\n\n for m in range(n):\n i[m] = finderb_nest(key[m], array)\n return i\n\n\ndef finderb_nest(key, array):\n \"\"\"finderb_nest\n\n Nested sub-function of :func:`.finderb` for one single key.\n\n Author: André Bojahr.\n Licence: BSD.\n\n Args:\n key (float): single key.\n array (ndarray[float]): sorted array.\n\n Returns:\n a (float): position index of key in the array.\n\n \"\"\"\n a = 0 # start of intervall\n b = len(array) # end of intervall\n\n # if the key is smaller than the first element of the\n # vector we return 1\n if key < array[0]:\n return 0\n\n while (b-a) > 1: # loop until the intervall is larger than 1\n c = int(np.floor((a+b)/2)) # center of intervall\n if key < array[c]:\n # the key is in the left half-intervall\n b = c\n else:\n # the key is in the right half-intervall\n a = c\n\n return a\n\n\ndef multi_gauss(x, s=[1], x0=[0], A=[1]):\n \"\"\"multi_gauss\n\n Multiple gauss functions with width ``s`` given as FWHM and area normalized\n to input ``A`` and maximum of gauss at ``x0``.\n\n Args:\n x (ndarray[float]): argument of multi_gauss.\n s (ndarray[float], optional): FWHM of Gaussians. Defaults to 1.\n x0 (ndarray[float], optional): centers of Gaussians. Defaults to 0.\n A (ndarray[float], optional): amplitudes of Gaussians. Defaults to 1.\n\n Returns:\n y (ndarray[float]): multiple Gaussians.\n\n \"\"\"\n s = np.asarray(s)/(2*np.sqrt(2*np.log(2)))\n a = np.asarray(A)/np.sqrt(2*np.pi*s**2) # normalize area to 1\n x0 = np.asarray(x0)\n\n y = np.zeros_like(x)\n for i in range(len(s)):\n y = y + a[i] * np.exp(-((x-x0[i])**2)/(2*s[i]**2))\n return y\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.einsum", "numpy.asarray", "numpy.linalg.matrix_power", "numpy.size", "numpy.zeros_like", "numpy.floor", "numpy.exp", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Geosyntec/pybmp
[ "9264c3d569d66f55d97f9287acb7d147dfc948ea" ]
[ "pybmpdb/bmpdb.py" ]
[ "import os\r\nimport logging\r\nfrom pkg_resources import resource_filename\r\nfrom functools import partial\r\nfrom pathlib import Path\r\n\r\nimport numpy\r\nimport pandas\r\nfrom bulwark import checks\r\n\r\nfrom . import info, utils\r\n\r\nimport wqio\r\n\r\n\r\n__all__ = [\"load_data\", \"transform_parameters\", \"paired_qual\"]\r\n\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _handle_ND_factors(df, qualcol=\"qual\", rescol=\"res\", dlcol=\"DL\", quals=None, nd_correction=2):\r\n \"\"\"Determines the scaling factor to be applied to the water quality result\r\n based on the result qualifiers in the BMP Database.\r\n\r\n Parameters\r\n ----------\r\n df : pandas.DataFrame\r\n qualcol : str, optional (default = 'qual')\r\n The column in *df* that contain the qualifiers.\r\n rescol : str, optional (default = 'res')\r\n The column in *df* that contain the results.\r\n dlcol : str, optional (default = 'DL')\r\n The column in *df* that contain the detection limts.\r\n quals : list of str, optional.\r\n A list of qualifiers that signify that a result is non-detect. Falls\r\n back to ``['U', 'UK', 'UA', 'UC', 'K']`` when not provided.\r\n nd_correction : float, optional (default = 2.0)\r\n The factor by which non-detect results will be multiplied.\r\n\r\n Returns\r\n -------\r\n factors : numpy.array\r\n\r\n Notes\r\n -----\r\n The underlying assumption here is that the BMP Database reports non-detects\r\n at half of their detection limit. So we need to double the reported value\r\n to get the upper limit of the result for ROS/Kaplan-Meier imputation.\r\n\r\n Also note that there are some weird cases where UJ-flagged data should be\r\n given a different. This occurs when the reported result is greater than the\r\n reported DL. Lastly, UJ-flagged data where the result is less than the DL\r\n should be scaled by the ratio of the result to the DL, such that\r\n result * factor = DL.\r\n\r\n \"\"\"\r\n\r\n quals = wqio.validate.at_least_empty_list(quals)\r\n if not quals:\r\n quals.extend([\"U\", \"UK\", \"UA\", \"UC\", \"K\"])\r\n\r\n normal_ND = [df[qualcol].isin(quals), float(nd_correction)]\r\n weird_UJ = [\r\n (df[qualcol] == \"UJ\") & (df[rescol] < df[dlcol]),\r\n df[dlcol] / df[rescol],\r\n ]\r\n return wqio.utils.selector(1, normal_ND, weird_UJ)\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _handle_ND_qualifiers(df, qualcol=\"qual\", rescol=\"res\", dlcol=\"DL\", quals=None):\r\n \"\"\"Determines final qualifier to be applied to the water quality result\r\n based on the result qualifiers in the BMP Database. Non-detects get \"ND\",\r\n detected values get \"=\".\r\n\r\n Parameters\r\n ----------\r\n df : pandas.DataFrame\r\n qualcol : str, optional (default = 'qual')\r\n The column in *df* that contain the qualifiers.\r\n rescol : str, optional (default = 'res')\r\n The column in *df* that contain the results.\r\n dlcol : str, optional (default = 'DL')\r\n The column in *df* that contain the detection limts.\r\n quals : list of str, optional.\r\n A list of qualifiers that signify that a result is non-detect. Falls\r\n back to ``['U', 'UA', 'UI', 'UC', 'UK', 'K']`` when not provided.\r\n\r\n Returns\r\n -------\r\n qualifiers : numpy.array\r\n\r\n See also\r\n --------\r\n _handle_ND_factors\r\n\r\n Notes\r\n -----\r\n Same basic premise as _handle_ND_factors, but different qualifiers count\r\n as ND compared to what we used to determine the ND-scaling factors.\r\n\r\n \"\"\"\r\n\r\n quals = wqio.validate.at_least_empty_list(quals)\r\n if not quals:\r\n quals.extend([\"U\", \"UA\", \"UI\", \"UC\", \"UK\", \"K\"])\r\n\r\n is_ND = df[qualcol].isin(quals) | ((df[qualcol] == \"UJ\") & (df[rescol] <= df[dlcol]))\r\n return numpy.where(is_ND, \"ND\", \"=\")\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _process_screening(df, screencol):\r\n yes = df[screencol].str.lower().isin([\"inc\", \"yes\", \"y\"])\r\n no = df[screencol].str.lower().isin([\"exc\", \"no\", \"n\"])\r\n return wqio.utils.selector(\"invalid\", [yes, \"yes\"], [no, \"no\"])\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _process_sampletype(df, sampletype):\r\n grab = [df[sampletype].str.lower().str.contains(\"grab\"), \"grab\"]\r\n composite = [\r\n df[sampletype].str.lower().str.contains(\"emc\") | df[sampletype].str.lower().str.contains(\"comp\"),\r\n \"composite\",\r\n ]\r\n return wqio.utils.selector(\"unknown\", grab, composite)\r\n\r\n\r\ndef _check_levelnames(levels):\r\n good_levels = [\r\n \"category\",\r\n \"site\",\r\n \"bmp\",\r\n \"parameter\",\r\n \"sampletype\",\r\n \"epazone\",\r\n \"state\",\r\n \"paramgroup\",\r\n ]\r\n msg = \"valid levels are {}\".format(good_levels)\r\n\r\n for lvl in levels:\r\n if lvl not in good_levels:\r\n raise ValueError(msg)\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef transform_parameters(\r\n df,\r\n existingparams,\r\n newparam,\r\n newunits,\r\n resfxn,\r\n qualfxn,\r\n indexMods=None,\r\n paramlevel=\"parameter\",\r\n):\r\n \"\"\"Apply an arbitrary transformation to a parameter in the data\r\n\r\n Parameters\r\n ----------\r\n df : pandas.DataFrame\r\n existingparams : list of strings\r\n List of the existing parameters that will be used to compute\r\n the new values\r\n newparam : string\r\n Name of the new parameter to be generated\r\n newunits : string\r\n Units of the newly computed values\r\n resfxn : callable\r\n Function (or lambda) that will determine the result of\r\n ``newparam`` based on the values of ``existingparams``.\r\n Function must assume to be operating on a row of\r\n ``self.data`` with the elements of ``existingparams`` stored\r\n as columns.\r\n qualfxn : function\r\n Same as ``resfxn``, but for determining the final qualifier\r\n of the ``newparam`` results.\r\n indexMods : dict, optional (keys = index level names)\r\n Dictionary of index level name whose values are the new\r\n values of those levels where ``parameter == newparam``.\r\n\r\n Returns\r\n -------\r\n transformed : pandas.DataFrame\r\n\r\n \"\"\"\r\n\r\n index_name_cache = df.index.names\r\n existingparams = wqio.validate.at_least_empty_list(existingparams)\r\n\r\n transformed = (\r\n df.query(\"{} in @existingparams\".format(paramlevel))\r\n .pipe(utils.refresh_index)\r\n .unstack(level=paramlevel)\r\n .pipe(wqio.utils.assign_multilevel_column, qualfxn, \"qual\", newparam)\r\n .pipe(wqio.utils.assign_multilevel_column, resfxn, \"res\", newparam)\r\n .xs(newparam, level=paramlevel, axis=\"columns\", drop_level=False)\r\n .stack(level=paramlevel)\r\n )\r\n\r\n indexMods = wqio.validate.at_least_empty_dict(indexMods, units=newunits)\r\n # add the units into indexMod, apply all changes\r\n indexMods[\"units\"] = newunits\r\n for levelname, value in indexMods.items():\r\n transformed = wqio.utils.redefine_index_level(transformed, levelname, value, criteria=None, dropold=True)\r\n\r\n # return the *full* dataset (preserving original params)\r\n result = pandas.concat([df.reset_index(), transformed.reset_index()], sort=False).set_index(index_name_cache)\r\n return result\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef paired_qual(df, qualin=\"qual_inflow\", qualout=\"qual_outflow\"):\r\n ND_neither = [(df[qualin] == \"=\") & (df[qualout] == \"=\"), \"Pair\"]\r\n ND_in = [(df[qualin] == \"ND\") & (df[qualout] == \"=\"), \"Influent ND\"]\r\n ND_out = [(df[qualin] == \"=\") & (df[qualout] == \"ND\"), \"Effluent ND\"]\r\n ND_both = [(df[qualin] == \"ND\") & (df[qualout] == \"ND\"), \"Both ND\"]\r\n return wqio.utils.selector(\"=\", ND_neither, ND_in, ND_out, ND_both)\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _pick_non_null(df, maincol, preferred, secondary):\r\n return df[(maincol, preferred)].combine_first(df[(maincol, secondary)])\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _pick_best_station(df):\r\n def best_col(df, mainstation, backupstation, valcol):\r\n for sta in [mainstation, backupstation]:\r\n if (sta, valcol) not in df.columns:\r\n df = wqio.utils.assign_multilevel_column(df, numpy.nan, sta, valcol)\r\n\r\n return df[(mainstation, valcol)].combine_first(df[(backupstation, valcol)])\r\n\r\n orig_index = df.index.names\r\n data = (\r\n df.pipe(utils.refresh_index)\r\n .unstack(level=\"station\")\r\n .pipe(wqio.utils.swap_column_levels, 0, 1)\r\n .pipe(\r\n wqio.utils.assign_multilevel_column,\r\n lambda df: best_col(df, \"outflow\", \"subsurface\", \"res\"),\r\n \"final_outflow\",\r\n \"res\",\r\n )\r\n .pipe(\r\n wqio.utils.assign_multilevel_column,\r\n lambda df: best_col(df, \"outflow\", \"subsurface\", \"qual\"),\r\n \"final_outflow\",\r\n \"qual\",\r\n )\r\n .pipe(\r\n wqio.utils.assign_multilevel_column,\r\n lambda df: best_col(df, \"inflow\", \"reference outflow\", \"res\"),\r\n \"final_inflow\",\r\n \"res\",\r\n )\r\n .pipe(\r\n wqio.utils.assign_multilevel_column,\r\n lambda df: best_col(df, \"inflow\", \"reference outflow\", \"qual\"),\r\n \"final_inflow\",\r\n \"qual\",\r\n )\r\n .loc[:, lambda df: df.columns.map(lambda c: \"final_\" in c[0])]\r\n .rename(columns=lambda col: col.replace(\"final_\", \"\"))\r\n .stack(level=\"station\")\r\n )\r\n\r\n return data\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _pick_best_sampletype(df):\r\n orig_cols = df.columns\r\n xtab = df.pipe(utils.refresh_index).unstack(level=\"sampletype\")\r\n for col in orig_cols:\r\n grabvalues = numpy.where(xtab[(col, \"composite\")].isnull(), xtab[(col, \"grab\")], numpy.nan)\r\n xtab = wqio.utils.assign_multilevel_column(xtab, grabvalues, col, \"grab\")\r\n\r\n data = xtab.loc[:, xtab.columns.map(lambda c: c[1] != \"unknown\")].stack(level=[\"sampletype\"])\r\n return data\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _maybe_filter_onesided_BMPs(df, balanced_only):\r\n grouplevels = [\"site\", \"bmp\", \"parameter\", \"category\"]\r\n pivotlevel = \"station\"\r\n\r\n if balanced_only:\r\n return (\r\n df.unstack(level=pivotlevel)\r\n .groupby(level=grouplevels)\r\n .filter(lambda g: numpy.all(g[\"res\"].describe().loc[\"count\"] > 0))\r\n .stack(level=pivotlevel)\r\n )\r\n else:\r\n return df\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _filter_by_storm_count(df, minstorms):\r\n # filter out all monitoring stations with less than /N/ storms\r\n grouplevels = [\"site\", \"bmp\", \"parameter\", \"station\"]\r\n\r\n data = df.groupby(level=grouplevels).filter(lambda g: g.count()[\"res\"] >= minstorms)\r\n return data\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _filter_by_BMP_count(df, minbmps):\r\n grouplevels = [\"category\", \"parameter\", \"station\"]\r\n\r\n data = df.groupby(level=grouplevels).filter(lambda g: g.index.get_level_values(\"bmp\").unique().shape[0] >= minbmps)\r\n return data\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _maybe_combine_WB_RP(df, combine_WB_RP, catlevel=\"category\"):\r\n if combine_WB_RP:\r\n # merge Wetland Basins and Retention ponds, keeping\r\n # the original records\r\n wbrp_indiv = [\"Retention Pond\", \"Wetland Basin\"]\r\n wbrp_combo = \"Wetland Basin/Retention Pond\"\r\n level_pos = utils.get_level_position(df, catlevel)\r\n return wqio.utils.redefine_index_level(\r\n df,\r\n catlevel,\r\n wbrp_combo,\r\n dropold=False,\r\n criteria=lambda row: row[level_pos] in wbrp_indiv,\r\n ).pipe(\r\n checks.custom_check,\r\n lambda df: df.index.get_level_values(catlevel) == wbrp_combo,\r\n )\r\n else:\r\n return df\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _maybe_combine_nox(\r\n df,\r\n combine_nox,\r\n paramlevel=\"parameter\",\r\n rescol=\"res\",\r\n qualcol=\"qual\",\r\n finalunits=\"mg/L\",\r\n):\r\n if combine_nox:\r\n # combine NO3+NO2 and NO3 into NOx\r\n nitro_components = [\r\n \"Nitrogen, Nitrite (NO2) + Nitrate (NO3) as N\",\r\n \"Nitrogen, Nitrate (NO3) as N\",\r\n ]\r\n nitro_combined = \"Nitrogen, NOx as N\"\r\n\r\n picker = partial(_pick_non_null, preferred=nitro_components[0], secondary=nitro_components[1])\r\n\r\n return transform_parameters(\r\n df,\r\n nitro_components,\r\n nitro_combined,\r\n finalunits,\r\n partial(picker, maincol=rescol),\r\n partial(picker, maincol=qualcol),\r\n ).pipe(\r\n checks.custom_check,\r\n lambda df: df.index.get_level_values(paramlevel) == nitro_combined,\r\n )\r\n else:\r\n return df\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _maybe_fix_PFCs(df, fix_PFCs, catlevel=\"category\", typelevel=\"bmptype\"):\r\n if fix_PFCs:\r\n PFC = \"Permeable Friction Course\"\r\n type_level_pos = utils.get_level_position(df, typelevel)\r\n return wqio.utils.redefine_index_level(\r\n df,\r\n catlevel,\r\n PFC,\r\n dropold=True,\r\n criteria=lambda row: row[type_level_pos] == \"PF\",\r\n ).pipe(checks.custom_check, lambda df: df.index.get_level_values(catlevel) == PFC)\r\n else:\r\n return df\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _maybe_remove_grabs(df, remove_grabs, grab_ok_bmps=\"default\"):\r\n if remove_grabs:\r\n if grab_ok_bmps.lower() == \"default\":\r\n grab_ok_bmps = [\r\n \"Retention Pond\",\r\n \"Wetland Basin\",\r\n \"Wetland Basin/Retention Pond\",\r\n ]\r\n\r\n grab_ok_bmps = wqio.validate.at_least_empty_list(grab_ok_bmps)\r\n\r\n querytxt = (\r\n \"(sampletype == 'composite') | \"\r\n \"(((category in @grab_ok_bmps) | (paramgroup == 'Biological')) & \"\r\n \" (sampletype != 'unknown'))\"\r\n )\r\n return df.query(querytxt)\r\n return df\r\n\r\n\r\ndef _load_raw_data(csvfile=None):\r\n csvfile = Path(csvfile or wqio.download(\"bmpdata\"))\r\n return pandas.read_csv(csvfile, parse_dates=[\"sampledate\"], encoding=\"utf-8\")\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _clean_raw_data(raw_df, nd_correction=2):\r\n _row_headers = [\r\n \"category\",\r\n \"epazone\",\r\n \"state\",\r\n \"site\",\r\n \"bmp\",\r\n \"station\",\r\n \"storm\",\r\n \"sampletype\",\r\n \"watertype\",\r\n \"paramgroup\",\r\n \"units\",\r\n \"parameter\",\r\n \"fraction\",\r\n \"wq_initialscreen\",\r\n \"ms_indivscreen\",\r\n \"wq_catscreen\",\r\n \"bmptype\",\r\n \"ws_id\",\r\n \"site_id\",\r\n \"bmp_id\",\r\n \"dot_type\",\r\n ]\r\n\r\n units_norm = {u[\"unicode\"]: info.getNormalization(u[\"name\"]) for u in info.units}\r\n\r\n target_units = {p[\"name\"].lower(): info.getUnitsFromParam(p[\"name\"], attr=\"unicode\") for p in info.parameters}\r\n\r\n expected_rows = raw_df.loc[:, \"res\"].groupby(lambda x: x > 0).count().loc[True]\r\n\r\n drop_columns = [\"ms\", \"_parameter\"]\r\n prepped = (\r\n raw_df.fillna({\"qual\": \"=\"})\r\n .dropna(subset=[\"res\"])\r\n .assign(qual=lambda df: df[\"qual\"].str.strip())\r\n .assign(res=lambda df: df[\"res\"] * _handle_ND_factors(df, nd_correction=nd_correction))\r\n .assign(qual=lambda df: _handle_ND_qualifiers(df))\r\n .assign(wq_initialscreen=lambda df: _process_screening(df, \"wq_initialscreen\"))\r\n .assign(ms_indivscreen=lambda df: _process_screening(df, \"ms_indivscreen\"))\r\n .assign(wq_catscreen=lambda df: _process_screening(df, \"wq_catscreen\"))\r\n .assign(station=lambda df: df[\"station\"].str.lower())\r\n .assign(sampletype=lambda df: _process_sampletype(df, \"sampletype\"))\r\n .assign(sampledatetime=lambda df: df.apply(wqio.utils.makeTimestamp, axis=1))\r\n .assign(units=lambda df: df[\"units\"].map(lambda u: info.getUnits(u, attr=\"unicode\")))\r\n .assign(_parameter=lambda df: df[\"parameter\"].str.lower().str.strip())\r\n .assign(fraction=lambda df: numpy.where(df[\"_parameter\"].str.contains(\"dissolved\"), \"dissolved\", \"total\"))\r\n .pipe(\r\n wqio.utils.normalize_units,\r\n units_norm,\r\n target_units,\r\n paramcol=\"_parameter\",\r\n rescol=\"res\",\r\n unitcol=\"units\",\r\n napolicy=\"raise\",\r\n )\r\n .drop(drop_columns, axis=1)\r\n .query(\"res > 0\")\r\n .pipe(\r\n checks.multi_check,\r\n [\r\n lambda df: checks.has_no_nans(df, columns=_row_headers),\r\n lambda df: checks.has_no_nones(df, columns=_row_headers),\r\n ],\r\n )\r\n .groupby(by=_row_headers)\r\n .agg({\"res\": \"mean\", \"qual\": \"min\", \"sampledatetime\": \"min\"})\r\n .set_index(\"sampledatetime\", append=True)\r\n .pipe(checks.unique_index)\r\n )\r\n return prepped\r\n\r\n\r\[email protected]_df_shape(_logger)\r\ndef _prepare_for_summary(\r\n df,\r\n minstorms=3,\r\n minbmps=3,\r\n combine_nox=True,\r\n combine_WB_RP=True,\r\n remove_grabs=True,\r\n grab_ok_bmps=\"default\",\r\n balanced_only=True,\r\n fix_PFCs=True,\r\n excluded_bmps=None,\r\n excluded_params=None,\r\n):\r\n \"\"\"Prepare data for categorical summaries\r\n\r\n Parameter\r\n ---------\r\n df : pandas.DataFrame\r\n minstorms : int (default = 3)\r\n Minimum number of storms (monitoring events) for a BMP study to be included\r\n minbmps : int (default = 3)\r\n Minimum number of BMP studies for a parameter to be included\r\n combine_nox : bool (default = True)\r\n Toggles combining NO3 and NO2+NO3 into as new parameter NOx, giving\r\n preference to NO2+NO3 when both parameters are observed for an event.\r\n The underlying assuption is that NO2 concentrations are typically much\r\n smaller than NO3, thus NO2+NO3 ~ NO3.\r\n combine_WB_RP : bool (default = True)\r\n Toggles combining Retention Pond and Wetland Basin data into a new\r\n BMP category: Retention Pond/Wetland Basin.\r\n remove_grabs : bool (default = True)\r\n Toggles removing grab samples from the dataset except for:\r\n - biological parameters\r\n - BMPs categories that are whitelisted via *grab_ok_bmps*\r\n grab_ok_bmps : sequence of str, optional\r\n BMP categories for which grab data should be included. By default, this\r\n inclues Retention Ponds, Wetland Basins, and the combined\r\n Retention Pond/Wetland Basin category created when *combine_WB_RP* is\r\n True.\r\n balanced_only : bool (default = True)\r\n Toggles removing BMP studies which have only influent or effluent data,\r\n exclusively.\r\n fix_PFCs : bool (default = True)\r\n Makes correction to the category of Permeable Friction Course BMPs\r\n excluded_bmps, excluded_params : sequence of str, optional\r\n List of BMPs studies and parameters to exclude from the data.\r\n\r\n Returns\r\n -------\r\n summarizable : pandas.DataFrame\r\n\r\n \"\"\"\r\n\r\n excluded_bmps = wqio.validate.at_least_empty_list(excluded_bmps)\r\n excluded_params = wqio.validate.at_least_empty_list(excluded_params)\r\n\r\n return (\r\n df.pipe(_maybe_combine_WB_RP, combine_WB_RP)\r\n .pipe(_maybe_combine_nox, combine_nox)\r\n .pipe(_maybe_fix_PFCs, fix_PFCs)\r\n .pipe(_maybe_remove_grabs, remove_grabs, grab_ok_bmps)\r\n .query(\"bmp not in @excluded_bmps\")\r\n .query(\"parameter not in @excluded_params\")\r\n .pipe(_pick_best_sampletype)\r\n .pipe(_pick_best_station)\r\n .pipe(_maybe_filter_onesided_BMPs, balanced_only)\r\n .pipe(_filter_by_storm_count, minstorms)\r\n .pipe(_filter_by_BMP_count, minbmps)\r\n )\r\n\r\n\r\ndef load_data(\r\n datapath=None,\r\n minstorms=3,\r\n minbmps=3,\r\n combine_nox=True,\r\n combine_WB_RP=True,\r\n remove_grabs=True,\r\n grab_ok_bmps=\"default\",\r\n balanced_only=True,\r\n fix_PFCs=True,\r\n excluded_bmps=None,\r\n excluded_params=None,\r\n as_dataframe=False,\r\n **dc_kwargs\r\n):\r\n \"\"\"Prepare data for categorical summaries\r\n\r\n Parameter\r\n ---------\r\n datapath : Path-like, optional\r\n Path to the raw data CSV. If not provided, the latest data will be\r\n downloaded.\r\n minstorms : int (default = 3)\r\n Minimum number of storms (monitoring events) for a BMP study to be included\r\n minbmps : int (default = 3)\r\n Minimum number of BMP studies for a parameter to be included\r\n combine_nox : bool (default = True)\r\n Toggles combining NO3 and NO2+NO3 into as new parameter NOx, giving\r\n preference to NO2+NO3 when both parameters are observed for an event.\r\n The underlying assuption is that NO2 concentrations are typically much\r\n smaller than NO3, thus NO2+NO3 ~ NO3.\r\n combine_WB_RP : bool (default = True)\r\n Toggles combining Retention Pond and Wetland Basin data into a new\r\n BMP category: Retention Pond/Wetland Basin.\r\n remove_grabs : bool (default = True)\r\n Toggles removing grab samples from the dataset except for:\r\n - biological parameters\r\n - BMPs categories that are whitelisted via *grab_ok_bmps*\r\n grab_ok_bmps : sequence of str, optional\r\n BMP categories for which grab data should be included. By default, this\r\n inclues Retention Ponds, Wetland Basins, and the combined\r\n Retention Pond/Wetland Basin category created when *combine_WB_RP* is\r\n True.\r\n balanced_only : bool (default = True)\r\n Toggles removing BMP studies which have only influent or effluent data,\r\n exclusively.\r\n fix_PFCs : bool (default = True)\r\n Makes correction to the category of Permeable Friction Course BMPs\r\n excluded_bmps, excluded_params : sequence of str, optional\r\n List of BMPs studies and parameters to exclude from the data.\r\n as_dataframe : bool (default = False)\r\n When False, a wqio.DataCollection is returned\r\n\r\n Additional Parameters\r\n ---------------------\r\n Any additional keword arguments will be passed to wqio.DataCollection.\r\n\r\n Returns\r\n -------\r\n bmp : pandas.DataFrame or wqio.DataCollection\r\n\r\n \"\"\"\r\n othergroups = dc_kwargs.pop(\"othergroups\", [\"category\", \"units\"])\r\n pairgroups = dc_kwargs.pop(\"pairgroups\", [\"category\", \"units\", \"bmp_id\", \"site_id\", \"storm\"])\r\n rescol = dc_kwargs.pop(\"rescol\", \"res\")\r\n qualcol = dc_kwargs.pop(\"qualcol\", \"qual\")\r\n ndval = dc_kwargs.pop(\"ndval\", [\"ND\", \"<\"])\r\n stationcol = dc_kwargs.pop(\"stationcol\", \"station\")\r\n paramcol = dc_kwargs.pop(\"paramcol\", \"parameter\")\r\n bmp = (\r\n _load_raw_data(datapath)\r\n .pipe(_clean_raw_data)\r\n .pipe(\r\n _prepare_for_summary,\r\n minstorms=minstorms,\r\n minbmps=minbmps,\r\n combine_nox=combine_nox,\r\n combine_WB_RP=combine_WB_RP,\r\n remove_grabs=remove_grabs,\r\n grab_ok_bmps=grab_ok_bmps,\r\n balanced_only=balanced_only,\r\n fix_PFCs=fix_PFCs,\r\n excluded_bmps=excluded_bmps,\r\n excluded_params=excluded_params,\r\n )\r\n )\r\n if as_dataframe:\r\n return bmp\r\n return wqio.DataCollection(\r\n bmp,\r\n rescol=rescol,\r\n qualcol=qualcol,\r\n ndval=ndval,\r\n stationcol=stationcol,\r\n paramcol=paramcol,\r\n othergroups=othergroups,\r\n pairgroups=pairgroups,\r\n **dc_kwargs\r\n )\r\n" ]
[ [ "pandas.read_csv", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mohan-chinnappan-n/tensorflow
[ "91546f51184b92bbc4eb453b96cf1838f43b9350" ]
[ "tensorflow/contrib/data/python/framework/function.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An experimental fork of the Python TensorFlow-function library.\n\nNOTE: functions are currently experimental and subject to change!\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.util import tf_inspect\n\n# NOTE(mrry): This is an experimental extension of a core class that wasn't\n# designed to be extended, so we disable protected access checks for the\n# whole file.\n# pylint: disable=protected-access\n\n\nclass _ExperimentalFuncGraph(function._FuncGraph):\n \"\"\"A helper for construction a function (supporting capture-by-value).\n\n _ExperimentalFuncGraph overrides ops.Graph's create_op() so that we can keep\n track of every inputs into every op created inside the function. If\n any input is from other graphs, we keep track of it in self.capture\n and substitute the input with a place holder.\n\n Each captured input's corresponding place holder is converted into a\n function argument and the caller passes in the captured tensor.\n \"\"\"\n\n def __init__(self, capture_by_value, *args, **kwargs):\n super(_ExperimentalFuncGraph, self).__init__(*args, **kwargs)\n self._capture_by_value = capture_by_value\n self._building_function = True\n self._outer_graph = ops.get_default_graph()\n self._vscope = vs.get_variable_scope()\n self._old_custom_getter = self._vscope.custom_getter\n self._captured = {}\n self.extra_inputs = []\n self.extra_args = []\n self.extra_vars = []\n\n def create_op(self, op_type, inputs, data_types, **kwargs):\n for i, x in enumerate(inputs):\n if x.graph is not self:\n # Referring to a tensor from other graph.\n if x in self._captured:\n # Captured already.\n inputs[i] = self._captured[x]\n elif self._capture_by_value:\n inputs[i] = self._add_tensor_and_parents(x)\n else:\n # Substitute with a placeholder.\n self.extra_inputs.append(x)\n ph = array_ops.placeholder(x.dtype, shape=x.get_shape())\n # pylint: disable=protected-access\n ph._handle_data = x._handle_data\n # pylint: enable=protected-access\n inputs[i] = ph\n self._captured[x] = ph\n self.extra_args.append(ph)\n return super(_ExperimentalFuncGraph, self).create_op(op_type, inputs,\n data_types, **kwargs)\n\n def _add_tensor_and_parents(self, tensor):\n op = self._add_op_and_parents(tensor.op)\n return op.outputs[tensor.value_index]\n\n def _add_op_and_parents(self, op):\n op_def = function._get_op_def(op)\n if op_def.is_stateful:\n raise ValueError(\"Cannot capture a stateful node by value.\")\n elif op.type in (\"Placeholder\", \"PlaceholderV2\"):\n raise ValueError(\"Cannot capture a placeholder by value.\")\n\n captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]\n\n captured_op = self.create_op(op.type, captured_inputs,\n [o.dtype for o in op.outputs],\n name=op.name, attrs=op.node_def.attr,\n op_def=op_def)\n\n for t, captured_t in zip(op.outputs, captured_op.outputs):\n self._captured[t] = captured_t\n\n return captured_op\n\n\nclass _ExperimentalDefinedFunction(function._DefinedFunction):\n \"\"\"Overrides _DefinedFunction with support for capture-by-value.\"\"\"\n\n def __init__(self,\n func,\n argnames,\n input_types,\n func_name=None,\n grad_func=None,\n python_grad_func=None,\n out_names=None,\n shape_func=None,\n capture_by_value=False,\n **kwargs):\n \"\"\"Creates an _ExperimentalDefinedFunction.\n\n Args:\n func: A python callable which constructs a tf function body.\n argnames: A list of strings for function argument names.\n input_types: The function's argument types. Can be a tuple, list of\n tf data types.\n func_name: The function name. Defaults to None, in which derives from\n 'func'.\n grad_func: This function's gradient function, if not None. Defaults\n to None.\n python_grad_func: A python callable implementing the gradient of\n the function python-side.\n out_names: An optional list of strings for the function return value\n names.\n shape_func: An optional function mapping an op to a list of static\n output shapes.\n capture_by_value: Boolean (defaults to False). If True, captured values\n will be copied into the function body.\n **kwargs: The keyword arguments. **kwargs is passed to every call\n site of this function.\n\n Raises:\n ValueError: The function definition is invalid.\n \"\"\"\n super(_ExperimentalDefinedFunction, self).__init__(\n func, argnames, input_types, func_name, grad_func, python_grad_func,\n out_names, shape_func, **kwargs)\n self._capture_by_value = capture_by_value\n\n def _create_definition_if_needed(self):\n \"\"\"Creates the function definition if it's not created yet.\"\"\"\n\n if self._definition is not None:\n return\n\n # Create the func_def object.\n temp_graph = _ExperimentalFuncGraph(capture_by_value=self._capture_by_value)\n with temp_graph.as_default():\n # List of placeholders for the function_def.\n inputs = []\n for (argname, argtype) in self._args:\n argholder = array_ops.placeholder(argtype, name=argname)\n inputs.append(argholder)\n # Call func and gather the output tensors.\n with vs.variable_scope(\"\", custom_getter=temp_graph.getvar):\n outputs = self._func(*inputs)\n # If func only returned one value, make it a tuple.\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n if any([_ is None for _ in outputs]):\n raise ValueError(\"Function can not return None.\")\n # Ensures each output is a Tensor.\n outputs = [ops.convert_to_tensor(_) for _ in outputs]\n self._extra_inputs = temp_graph.extra_inputs\n inputs.extend(temp_graph.extra_args)\n self._sub_functions = temp_graph._functions\n\n # Build the FunctionDef\n self._definition = function._graph_to_function_def(\n temp_graph, temp_graph.get_operations(), inputs, outputs,\n out_names=self._out_names)\n\n # Extra kwargs are treated as attrs on the function def.\n sig_pre_func_name = self._func_name or function._get_func_name(self._func)\n kwargs_attr = function._parse_kwargs_as_attrs(\n sig_pre_func_name, **self._extra_kwargs)\n for k in kwargs_attr:\n self._definition.attr[k].CopyFrom(kwargs_attr[k])\n\n # Hash the definition and its dependencies.\n self._hash_str = self._create_hash_str(\n self._definition.signature.input_arg,\n self._definition.signature.output_arg,\n self._definition.node_def)\n\n # Finally, we decide the function name to use. If not specified,\n # make up something which is almost certainly unique (but deterministic).\n if not self._func_name:\n self._func_name = \"_\".join([function._get_func_name(self._func),\n self._hash_str])\n self._definition.signature.name = self._func_name\n if self._func.__doc__:\n self._definition.signature.description = self._func.__doc__\n\n\nclass Defun(function.Defun):\n \"\"\"Experimental version of Defun supporting capture-by-value.\"\"\"\n\n def __init__(self, *input_types, **kwargs):\n \"\"\"Create an experimental `Defun` decorator.\n\n Args:\n *input_types: A list of `tf.DType`\n **kwargs: Optional keyword arguments (see `function.Defun`) plus:\n capture_by_value - Boolean (defaults to False). If True, captured values\n will be copied into the function body.\n \"\"\"\n super(Defun, self).__init__(*input_types, **kwargs)\n\n def __call__(self, func):\n # Various sanity checks on the callable func.\n if not callable(func):\n raise ValueError(\"func %s must be callable\" % func)\n\n # Func should not use kwargs and defaults.\n argspec = tf_inspect.getargspec(func)\n if argspec.keywords or argspec.defaults:\n raise ValueError(\"Functions with argument defaults or keyword \"\n \"arguments are not supported.\")\n\n # Computes how many arguments 'func' has.\n min_args = len(argspec.args)\n max_args = min_args\n if argspec.varargs:\n max_args = 1000000\n argnames = argspec.args\n if tf_inspect.ismethod(func):\n # 1st argument is the \"class\" type.\n min_args -= 1\n argnames = argnames[1:]\n\n if self._input_types:\n # If Defun is given a list of types for the inputs, the number\n # of input types should be compatible with 'func'.\n num = len(self._input_types)\n if num < min_args or num > max_args:\n raise ValueError(\n \"The function has fewer arguments than the number of specified \"\n \"input types.\")\n return _ExperimentalDefinedFunction(\n func, argnames, self._input_types, self._func_name, self._grad_func,\n self._python_grad_func, out_names=self._out_names,\n **self._extra_kwargs)\n\n # 'func' expects no arguments and input types is an empty list.\n if min_args == 0 and max_args == 0:\n return _ExperimentalDefinedFunction(\n func, [], [], self._func_name, self._grad_func,\n self._python_grad_func, out_names=self._out_names,\n **self._extra_kwargs)\n\n # Input types are unknown. It's an overloaded function and hence\n # its definition needs to be deferred until it's called.\n return function._OverloadedFunction(\n func, argnames, self._func_name, self._grad_func,\n self._python_grad_func, out_names=self._out_names, **self._extra_kwargs)\n" ]
[ [ "tensorflow.python.util.tf_inspect.ismethod", "tensorflow.python.framework.function._parse_kwargs_as_attrs", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.framework.function._get_op_def", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.util.tf_inspect.getargspec", "tensorflow.python.framework.function._OverloadedFunction", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.function._get_func_name" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
autonlab/fastlvm
[ "05e77da39ca525eacf1a1e3aa2cc551e6cf18dcd" ]
[ "fastlvm/utils.py" ]
[ "import copy\nimport numpy as np\nimport utilsc\nimport typing\nfrom d3m.metadata import base as metadata_base\nfrom d3m import container\nfrom common_primitives import utils\n\n_stirling = utilsc.new_stirling()\n\n\ndef read_corpus(fname, vocab=[], stopwords=[]):\n return utilsc.read_corpus(fname, vocab, stopwords)\n\n\ndef get_ref_count(var):\n return utilsc.ref_count(var)\n\n\ndef kmeanspp(k, points):\n seed_idx = utilsc.kmeanspp(k, points)\n seeds = points[seed_idx]\n return seeds\n\n\ndef log_stirling_num(n, m):\n return utilsc.log_stirling_num(_stirling, n, m)\n\n\ndef uratio(n, m):\n return utilsc.uratio(_stirling, n, m)\n\n\ndef vratio(n, m):\n return utilsc.vratio(_stirling, n, m)\n\n\ndef wratio(n, m):\n return utilsc.wratio(_stirling, n, m)\n\n\n# Copied from common_primitives.utils\ndef list_columns_with_semantic_types(metadata: metadata_base.DataMetadata,\n semantic_types: typing.Sequence[str], *,\n at: metadata_base.Selector = ()) -> typing.Sequence[int]:\n \"\"\"\n This is similar to ``get_columns_with_semantic_type``, but it returns all column indices\n for a dimension instead of ``ALL_ELEMENTS`` element.\n\n Moreover, it operates on a list of semantic types, where a column is returned\n if it matches any semantic type on the list.\n \"\"\"\n\n columns = []\n\n for element in metadata.get_elements(list(at) + [metadata_base.ALL_ELEMENTS]):\n metadata_semantic_types = metadata.query(list(at) + [metadata_base.ALL_ELEMENTS, element]).get('semantic_types',\n ())\n # TODO: Should we handle inheritance between semantic types here?\n if any(semantic_type in metadata_semantic_types for semantic_type in semantic_types):\n if element is metadata_base.ALL_ELEMENTS:\n return list(range(\n metadata.query(list(at) + [metadata_base.ALL_ELEMENTS]).get('dimension', {}).get('length', 0)))\n else:\n columns.append(typing.cast(int, element))\n\n return columns\n\n\ndef split_inputs(tokenized, frac, seed=None):\n \"\"\"Uniformly split the data to training and validation\n :returns a tuple of training and validation\n \"\"\"\n num_training = int(round((1 - frac) * len(tokenized)))\n num_training = 1 if num_training == 0 else num_training\n random_generator = np.random.default_rng(seed=seed)\n permutation = random_generator.permutation(np.arange(len(tokenized)))\n training = tokenized[permutation[:num_training]]\n if num_training == len(tokenized): # self._frac == 0\n validation = training\n else:\n validation = tokenized[permutation[(num_training + 1):]]\n return training, validation\n\n\ndef get_documents(training_inputs, non_text=False):\n \"\"\"Extract the text columns and concatenate them row-wise\n\n non_text: True to return both text and non-text columns. False to only return text attribute.\n\n returns: if non_text == False, returns a Series of strings.\n If non_text == True, returns a tuple of a Series and a Data frame. Each element in the Series is a string.\n The Data Frame contains any non text columns. It could be empty.\n None if the `training_inputs` contain no text columns\n \"\"\"\n # Adapted from https://github.com/brekelma/dsbox_corex/blob/master/corex_text.py\n\n # Get the text columns\n text_attributes = list_columns_with_semantic_types(\n metadata=training_inputs.metadata,\n semantic_types=[\"http://schema.org/Text\"])\n all_attributes = list_columns_with_semantic_types(\n metadata=training_inputs.metadata,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/Attribute\"])\n categorical_attributes = list_columns_with_semantic_types(\n metadata=training_inputs.metadata,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/CategoricalData\"])\n\n # want text columns that are attributes\n text_columns = set(all_attributes).intersection(text_attributes)\n\n # but, don't want to edit categorical columns\n text_columns = set(text_columns) - set(categorical_attributes)\n\n non_text_columns = set(all_attributes) - set(text_columns)\n\n # and, we want the text columns as a list\n text_columns = list(text_columns)\n\n # if no text columns are present don't do anything\n if len(text_columns) == 0:\n return None\n\n # concatenate the columns row-wise\n raw_documents = None\n for column_index in text_columns:\n if raw_documents is not None:\n raw_documents = raw_documents.str.cat(training_inputs.iloc[:, column_index], sep=\" \")\n else:\n raw_documents = copy.deepcopy(training_inputs.iloc[:, column_index])\n\n if non_text:\n # data frame of non-text columns\n if len(non_text_columns) > 0:\n non_text_features = training_inputs.iloc[:, list(non_text_columns)].copy()\n\n # remove text_columns in the metadata\n non_text_features.metadata = non_text_features.metadata.remove_columns(text_columns)\n else:\n non_text_features = container.DataFrame(generate_metadata=True)\n\n return raw_documents, non_text_features\n else:\n return raw_documents\n\n\ndef tokenize(raw_documents, vocabulary, analyze):\n \"\"\"Tokenize the raw documents\n\n Returns a ndarray. Each element is an ndarray of items of unit32 type. The ndarray can be of different length.\n \"\"\"\n if vocabulary is None or analyze is None:\n return np.array()\n\n tokenized = []\n for doc in raw_documents:\n row = []\n for feature in analyze(doc):\n try:\n feature_idx = vocabulary[feature]\n row.append(feature_idx)\n except KeyError:\n # Ignore out-of-vocabulary items for fixed_vocab=True\n continue\n tokenized.append(np.array(row, dtype=np.uint32))\n\n return np.array(tokenized)\n\n\ndef _tpd(zs, k):\n \"\"\" Convert to feature vector\n Returns a 2D ndarray\n \"\"\"\n tpdm = np.zeros((len(zs), k))\n for i, doc in enumerate(zs):\n for z in doc:\n if z < k:\n tpdm[i, z] += 1\n if len(doc) > 0:\n tpdm[i] /= len(doc)\n return tpdm\n\n\ndef mk_text_features(prediction, ntopics):\n \"\"\" Convert to feature\n\n Returns a DataFrame with metadata\n \"\"\"\n tpdm = _tpd(prediction, ntopics)\n\n # create metadata for the text feature columns\n features = container.DataFrame(tpdm, generate_metadata=True)\n for column_index in range(features.shape[1]):\n col_dict = dict(features.metadata.query((metadata_base.ALL_ELEMENTS, column_index)))\n col_dict['structural_type'] = type(1.0)\n # FIXME: assume we apply fastlvm only once per template, otherwise column names might duplicate\n col_dict['name'] = 'fastlvm_' + str(column_index)\n col_dict['semantic_types'] = ('http://schema.org/Float',\n 'https://metadata.datadrivendiscovery.org/types/Attribute')\n features.metadata = features.metadata.update((metadata_base.ALL_ELEMENTS, column_index), col_dict)\n\n return features\n" ]
[ [ "numpy.array", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
emillj/gekkoJaponicus
[ "d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7" ]
[ "chart_web.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport glob\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport json\nimport os\n\nimport quantmod as qm\n\nimport flask\nimport dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom flask_caching import Cache\n\n#from plotInfo import plotEvolutionSummary\n\nimport gekkoWrapper\nimport Settings\nimport coreFunctions\nimport evolution_bayes\n\ngsettings = Settings.getSettings()['global']\nsettings = Settings.getSettings()['bayesian']\n\nMA_SMA, MA_EMA, MA_WMA, MA_DEMA, MA_TEMA, MA_TRIMA, MA_KAMA, MA_MAMA, MA_T3 = range(9)\nrename = {\n \"DEMA\": {\n \"long\": \"timeperiod\",\n },\n \"MACD\": {\n \"short\": \"fastperiod\",\n \"long\": \"slowperiod\",\n \"signal\": \"signalperiod\",\n },\n \"PPO\": {\n \"short\": \"fastperiod\",\n \"long\": \"slowperiod\",\n \"signal\": \"signalperiod\",\n },\n \"RSI\": {\n \"interval\": \"timeperiod\",\n },\n \"StochRSI\": {\n \"interval\": \"timeperiod\",\n },\n \"CCI\": {\n \"interval\": \"timeperiod\",\n },\n }\nindicators = rename.keys()\n\ndef talib_dict(params):\n # dict key rename\n newparams = {}\n for k in rename.keys():\n newparams[k] = {}\n if k == \"STOCHRSI\":\n k = \"StochRSI\"\n for old, new in rename[k.upper()].items():\n newparams[k.upper()][new] = params[k].pop(old)\n # add matype\n newparams[\"PPO\"][\"matype\"] = MA_EMA\n #newparams[\"STOCHRSI\"][\"matype\"] = MA_EMA\n\n return newparams\n\ndef run_server():\n\n # Setup the app\n server = flask.Flask(__name__)\n #server.secret_key = os.environ.get('secret_key', 'secret')\n app = dash.Dash(__name__, server=server, csrf_protect=False)\n\n app.scripts.config.serve_locally = False\n dcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-finance-1.28.0.min.js'\n\n # Setup config\n responses, configs = get_json()\n def setup_config(filename=None):\n if filename != None and filename in responses:\n config_filename = filename.replace(\"response\", \"config\")\n res = load_json(filename)\n gekko_config = load_json(config_filename)\n else:\n res = load_json(responses[-1])\n gekko_config = load_json(configs[-1])\n filename = gsettings['configFilename']\n configjs = Settings.get_configjs(filename)\n config = {k:v for k,v in configjs.items() if k in indicators}\n config2 = {k:v for k,v in gekko_config[\"gekkoConfig\"].items() if k in indicators}\n config.update(config2.copy())\n strategy = gekko_config[\"gekkoConfig\"][\"tradingAdvisor\"][\"method\"]\n return strategy, config, res\n\n # Setup chart\n def setup_chart(res):\n candles = pd.DataFrame.from_dict(res['candles'])\n candles[\"start\"] = pd.to_datetime(candles[\"start\"])\n candles.index = candles[\"start\"]\n trades = pd.DataFrame.from_dict(res['trades'])\n trades[\"start\"] = pd.to_datetime(trades[\"date\"])\n trades[\"color\"] = 'rgba(0, 0, 0, 0.)'\n trades[\"symbol\"] = 'triangle-down'\n trades.loc[trades.action.str.match(\"buy\"), \"color\"] = 'rgba(255, 182, 193, .5)'\n trades.loc[trades.action.str.match(\"sell\"), \"color\"] = 'rgba(182, 193, 255, .5)'\n trades.loc[trades.action.str.match(\"buy\"), \"symbol\"] = 'triangle-up'\n trade_scatter = dict(\n x=trades[\"start\"],\n y=trades[\"price\"],\n name=trades[\"action\"],\n mode=\"markers\",\n marker = dict(\n symbol = trades[\"symbol\"],\n size = 15,\n color = trades[\"color\"],\n showscale=True,\n )\n )\n return candles, trade_scatter\n\n strategy, config, res = setup_config()\n candles, trade_scatter = setup_chart(res)\n\n # Add caching\n cache = Cache(app.server, config={'CACHE_TYPE': 'simple'})\n timeout = 60 * 60 # 1 hour\n\n # Controls\n src = dict(\n index = 'start',\n op = 'open',\n hi = 'high',\n lo = 'low',\n cl = 'close',\n aop = None,\n ahi = None,\n alo = None,\n acl = None,\n vo = 'volume',\n di = None,\n )\n\n logs = responses\n logs = [dict(label=str(log), value=str(log))\n for log in logs]\n\n # Dynamic binding\n functions = dir(qm.ta)[9:-4]\n functions = [dict(label=str(function[4:]), value=str(function))\n for function in functions]\n\n # Layout\n app.layout = html.Div(\n [\n html.Div([\n html.H2(\n 'gekkoJaponicus Charts',\n style={'padding-top': '20', 'text-align': 'center'}\n ),\n html.Div([\n html.Label('Select log:'),\n dcc.Dropdown(\n id='dropdown',\n options=logs,\n value=str(responses[0]),\n )],\n style={\n 'width': '510', 'display': 'inline-block',\n 'padding-left': '40', 'margin-bottom': '20'}\n ),\n html.Div([\n html.Label('Select technical indicators:'),\n dcc.Dropdown(\n id='multi',\n options=functions,\n multi=True,\n value=[\"add_\"+strategy.upper()],\n )],\n style={\n 'width': '510', 'display': 'inline-block',\n 'padding-right': '40', 'margin-bottom': '20'}\n ),\n ]),\n html.Div([\n html.Label('Specify parameters of technical indicators:'),\n dcc.Input(\n id='arglist',\n style={'height': '32', 'width': '1020'},\n value=json.dumps(config),\n )],\n id='arg-controls',\n style={'display': 'none'}\n ),\n dcc.Graph(id='output')\n ],\n style={\n 'width': '1100',\n 'margin-left': 'auto',\n 'margin-right': 'auto',\n 'font-family': 'overpass',\n 'background-color': '#F3F3F3'\n }\n )\n\n\n @app.callback(Output('arg-controls', 'style'), [Input('multi', 'value')])\n def display_control(multi):\n if not multi:\n return {'display': 'none'}\n else:\n return {'margin-bottom': '20', 'padding-left': '40'}\n\n\n @cache.memoize(timeout=timeout)\n @app.callback(Output('output', 'figure'), [Input('dropdown', 'value'),\n Input('multi', 'value'),\n Input('arglist', 'value')])\n def update_graph_from_dropdown(dropdown, multi, arglist):\n\n # Get Quantmod Chart\n print('Loading')\n strategy, config, res = setup_config(dropdown)\n candles, trade_scatter = setup_chart(res)\n ch = qm.Chart(candles, src=src)\n\n # Get functions and arglist for technical indicators\n if arglist:\n for function in multi:\n try:\n config = talib_dict(json.loads(arglist))\n indicator = function.split(\"_\")[1]\n newargs = config[indicator]\n # Dynamic calling\n fn = getattr(qm, function)\n fn(ch, **newargs)\n except Exception as e:\n print(e)\n getattr(qm, function)(ch)\n pass\n else:\n for function in multi:\n # Dynamic calling\n getattr(qm, function)(ch)\n\n fig = ch.to_figure(width=1100)\n\n # hack figure\n index = 0\n for i in range(len(fig[\"layout\"].keys())):\n axis = \"yaxis\"+str(i)\n if axis in fig[\"layout\"]:\n index = i + 1\n yrange = [candles[\"low\"].min(), candles[\"high\"].max()]\n fig[\"layout\"][\"yaxis\"][\"range\"] = yrange\n fig[\"layout\"][\"yaxis\"+str(index)] = fig[\"layout\"][\"yaxis2\"]\n fig[\"layout\"][\"plot_bgcolor\"] = 'rgba(0, 0, 0, 0.00)'\n trade_scatter[\"yaxis\"] = \"y1\"\n fig[\"data\"].append(trade_scatter)\n\n return fig\n\n\n # External css\n\n external_css = [\"https://fonts.googleapis.com/css?family=Overpass:400,400i,700,700i\",\n \"https://cdn.rawgit.com/plotly/dash-app-stylesheets/c6a126a684eaaa94a708d41d6ceb32b28ac78583/dash-technical-charting.css\"]\n\n for css in external_css:\n app.css.append_css({\"external_url\": css})\n\n # Run the Dash app\n if __name__ == '__main__':\n app.server.run(debug=True)\n #app.server.run()\n\ndef get_json():\n files1 = os.path.join(gsettings[\"save_dir\"], '*_response.json')\n files2 = os.path.join(gsettings[\"save_dir\"], '*_config.json')\n response_files = list(filter(os.path.isfile, glob.glob(files1)))\n response_files.sort(key=lambda x: -os.path.getmtime(x))\n config_file = list(filter(os.path.isfile, glob.glob(files2)))\n config_file.sort(key=lambda x: -os.path.getmtime(x))\n return response_files, config_file\n\ndef load_json(filename):\n f = open(filename, \"r\")\n result = json.loads(f.read())\n f.close\n return result\n\ndef create_first_chart():\n print(\"log file not found: try to fetch\")\n strategy = settings[\"Strategy\"]\n deltaDays = settings['deltaDays']\n filename = gsettings['configFilename']\n configjs = Settings.get_configjs(filename)\n watch = settings[\"watch\"]\n dateset = gekkoWrapper.getAvailableDataset(watch)\n daterange = coreFunctions.getRandomDateRange(dateset, deltaDays=deltaDays)\n config = evolution_bayes.compressing_flatten_dict(configjs[strategy], strategy)\n config[\"watch\"] = watch\n gekko_config = gekkoWrapper.createConfig(config, daterange)\n res = evolution_bayes.EvaluateRaw(watch, daterange, configjs[strategy], strategy)\n score = res['report']['relativeProfit']\n\n filename = \"_\".join([watch[\"exchange\"], watch[\"currency\"], watch[\"asset\"], strategy, datetime.datetime.now().strftime('%Y%m%d_%H%M%S'), str(score)])\n save_dir = gsettings[\"save_dir\"]\n json_filename = os.path.join(save_dir, filename) + \"_config.json\"\n json2_filename = os.path.join(save_dir, filename) + \"_response.json\"\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n f = open(json_filename, \"w\")\n f.write(json.dumps(gekko_config, indent=2))\n f.close()\n print(\"Saved: \" + json_filename)\n f = open(json2_filename, \"w\")\n f.write(json.dumps(res, indent=2))\n f.close()\n print(\"Saved: \" + json2_filename)\n\nif __name__ == '__main__':\n res, config = get_json()\n if len(res) > 0 and len(config) > 0:\n run_server()\n else:\n create_first_chart()\n run_server()\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
fdibaldassarre/waifu2x-tensorflow
[ "aa170c306d655047a7d6b13f588d13b6bdd28736" ]
[ "src/Waifu2x.py" ]
[ "#!/usr/bin/env python3\n\nimport json\nimport os\n\nfrom PIL import Image\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras import layers\n\nfrom src.Places import MODELS_FOLDER\n\nOP_SCALE = 'scale'\nOP_NOISE = 'noise'\nOP_NOISE_SCALE = 'noise_scale'\n\nLEAKY_ALPHA = tf.constant(0.1)\n\n\ndef leaky_relu(x):\n return tf.where(tf.greater(0.0, x), tf.multiply(x, LEAKY_ALPHA), x)\n\n\ndef save_image_to(data, path):\n data = np.minimum(np.maximum(0., data[0]), 1.)\n data = np.uint8(np.round(data * 255.))\n image = Image.fromarray(data)\n image.save(path)\n\n\ndef load_weights(config):\n weights = np.asarray(config[\"weight\"], dtype=np.float32).transpose(2, 3, 1, 0)\n bias = np.asarray(config[\"bias\"], dtype=np.float32)\n return [weights, bias]\n\n\ndef create_conv2D_layer(config, activation=None):\n weights = load_weights(config)\n layer = layers.Conv2D(config[\"nOutputPlane\"],\n strides=(config[\"dH\"], config[\"dW\"]),\n kernel_size=(config[\"kH\"], config[\"kW\"]),\n activation=activation,\n weights=weights)\n return layer\n\n\ndef create_conv2Dtranspose_layer(config):\n weights = load_weights(config)\n layer = layers.Conv2DTranspose(config[\"nOutputPlane\"],\n strides=(config[\"dH\"], config[\"dW\"]),\n kernel_size=(config[\"kH\"], config[\"kW\"]),\n padding='same',\n weights=weights)\n return layer\n\n\ndef pad_image(img, padding):\n h, w = img.size\n size = (h + 2 * padding, w + 2 * padding)\n result = Image.new('RGB', size, (0, 0, 0))\n result.paste(img, (padding, padding))\n return result\n\n\nclass Waifu2x:\n\n def __init__(self, operation, noise_level=0):\n self._operation = operation\n self._noise_level = noise_level\n self.img = None\n\n def load_image(self, path):\n self.img = Image.open(path)\n if self.img.mode != 'RGB':\n # All images are either B/W (mode = 'L') or 'RGB'\n self.img = self.img.convert('RGB')\n\n def _get_model_path(self):\n if self._operation == OP_NOISE:\n model_name = 'vgg_7/art/noise%d_model.json' % self._noise_level\n elif self._operation == OP_SCALE:\n model_name = 'upconv_7/art/scale2.0x_model.json'\n elif self._operation == OP_NOISE_SCALE:\n model_name = 'upconv_7/art/noise%d_scale2.0x_model.json' % self._noise_level\n return os.path.join(MODELS_FOLDER, model_name)\n\n def _load_layers(self):\n model_path = self._get_model_path()\n decoder = json.JSONDecoder()\n with open(model_path, 'r') as hand:\n data = hand.read().strip()\n return decoder.decode(data)\n\n def _build_model(self):\n if self._operation == OP_NOISE:\n return self._build_vgg7()\n else:\n return self._build_upconv()\n\n def _build_vgg7(self):\n layers = self._load_layers()\n model = Sequential()\n for i in range(0, 6):\n model.add(create_conv2D_layer(layers[i], activation=leaky_relu))\n model.add(create_conv2D_layer(layers[6]))\n return model\n\n def _build_upconv(self):\n layers = self._load_layers()\n model = Sequential()\n for i in range(0, 6):\n model.add(create_conv2D_layer(layers[i], activation=leaky_relu))\n model.add(create_conv2Dtranspose_layer(layers[6]))\n return model\n\n def _get_input_tensor(self):\n if self._operation == OP_NOISE:\n padding = 7\n else:\n padding = 6\n img = pad_image(self.img, padding)\n data = np.asarray(img, dtype=np.float32) / 255.\n return np.expand_dims(data, axis=0)\n\n def run(self, input_path, output_path):\n self.load_image(input_path)\n model = self._build_model()\n input_data = self._get_input_tensor()\n result = model.predict(input_data)\n save_image_to(result, output_path)\n\n\ndef scale(input_path, output_path):\n waifu2x = Waifu2x(OP_SCALE)\n waifu2x.run(input_path, output_path)\n\n\ndef denoise(input_path, output_path, noise_level):\n waifu2x = Waifu2x(OP_NOISE, noise_level)\n waifu2x.run(input_path, output_path)\n\n\ndef denoise_scale(input_path, output_path, noise_level):\n waifu2x = Waifu2x(OP_NOISE_SCALE, noise_level)\n waifu2x.run(input_path, output_path)\n" ]
[ [ "tensorflow.multiply", "numpy.maximum", "tensorflow.constant", "numpy.expand_dims", "tensorflow.greater", "numpy.asarray", "tensorflow.keras.Sequential", "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
gma-coretechs/hccpy
[ "59d23e91a927f8c90ffadb067e9d804b5505d503" ]
[ "hccpy/adhoc/X_NW_diag_diff.py" ]
[ "from itertools import chain\nimport logging\nimport sys\n\nfrom pyspark.sql import functions as f\nfrom pyspark.sql.session import SparkSession\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.types import ArrayType, StringType\n\nfrom hccV2421.hcc_2421 import HCCEngine\nimport pandas as pd\nimport numpy as np\npd.set_option('display.max_colwidth', 1000)\npd.set_option('display.max_columns', 500)\n\n\nspark = SparkSession.builder.getOrCreate()\n# spark.conf.set(\"spark.sql.execution.arrow.enabled\", \"true\")\n\nlogger = logging.getLogger(__name__)\nlog_handler = logging.StreamHandler(sys.stdout)\nlog_handler.setFormatter(logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))\nlog_handler.setLevel(logging.DEBUG)\nlogger.addHandler(log_handler)\nlogger.setLevel(logging.DEBUG)\n\n\ninput_path = '/data/raw/'\noutput_path = '/data/data_science/raf/'\n\n\ndef load_datasets():\n cclf1 = spark.read.parquet(input_path + 'cclf1.parquet')\n cclf4 = spark.read.parquet(input_path + 'cclf4.parquet')\n cclf5 = spark.read.parquet(input_path + 'cclf5.parquet')\n cclf8 = spark.read.parquet(input_path + 'cclf8.parquet')\n return cclf1, cclf4, cclf5, cclf8\n\n\ndef write_output(df):\n logger.info(\"CREATING MASTER DIFF DATASET\")\n logger.info(\"WRITING: {}\".format(output_path + \"NW_diag_HCC_raf.parquet\"))\n df.write.mode('overwrite').parquet(output_path + 'NW_diag_HCC_raf.parquet')\n return df\n\n\ndef main():\n cclf1, cclf4, cclf5, cclf8 = load_datasets()\n\n cclf1 = cclf1.withColumn(\"file_year\", f.split(f.col(\"source_file\"), \"/\").getItem(6)).withColumn('source_year', f.substring(f.col('file_year'), -4, 4))\n cclf4 = cclf4.withColumn(\"file_year\", f.split(f.col(\"source_file\"), \"/\").getItem(6)).withColumn('source_year', f.substring(f.col('file_year'), -4, 4))\n cclf5 = cclf5.withColumn(\"file_year\", f.split(f.col(\"source_file\"), \"/\").getItem(6)).withColumn('source_year', f.substring(f.col('file_year'), -4, 4))\n cclf8 = cclf8.withColumn(\"file_year\", f.split(f.col(\"source_file\"), \"/\").getItem(6)).withColumn('source_year', f.substring(f.col('file_year'), -4, 4))\n\n cclf1 = cclf1.withColumn('claim_year', f.substring(f.col('CLM_THRU_DT'), 1, 4))\n cclf4 = cclf4.withColumn('claim_year', f.substring(f.col('CLM_THRU_DT'), 1, 4))\n cclf5 = cclf5.withColumn('claim_year', f.substring(f.col('CLM_THRU_DT'), 1, 4))\n \n cclf1 = cclf1.select('BENE_MBI_ID', 'PRNCPL_DGNS_CD', 'claim_year', 'source_year')\n cclf1 = cclf1.withColumn('PRNCPL_DGNS_CD', f.when(f.col('PRNCPL_DGNS_CD') == '', None).otherwise(f.col('PRNCPL_DGNS_CD')))\n cclf1 = cclf1.groupBy('BENE_MBI_ID', 'source_year', 'claim_year').agg(f.array_distinct(f.collect_list('PRNCPL_DGNS_CD')))\n cclf1 = cclf1.dropna()\n cclf1 = cclf1.drop_duplicates()\n cclf1 = cclf1.withColumnRenamed('array_distinct(collect_list(PRNCPL_DGNS_CD))', 'PRNCPL_DGNS_CD')\n\n\n cclf4 = cclf4.select('BENE_MBI_ID', 'CLM_DGNS_CD', 'source_year', 'claim_year')\n cclf4 = cclf4.withColumn('CLM_DGNS_CD', f.when(f.col('CLM_DGNS_CD') == '', None).otherwise(f.col('CLM_DGNS_CD')))\n cclf4 = cclf4.groupBy('BENE_MBI_ID', 'source_year', 'claim_year').agg(f.array_distinct(f.collect_list('CLM_DGNS_CD')))\n cclf4 = cclf4.dropna()\n cclf4 = cclf4.drop_duplicates()\n cclf4 = cclf4.withColumnRenamed('array_distinct(collect_list(CLM_DGNS_CD))', 'CLM_DGNS_CD')\n\n cclf8 = cclf8.select('BENE_MBI_ID', 'BENE_SEX_CD', 'BENE_AGE', 'BENE_MDCR_STUS_CD', 'BENE_DUAL_STUS_CD', 'BENE_ORGNL_ENTLMT_RSN_CD', 'source_year' )\n cclf8 = cclf8.withColumn('concat_elig', f.concat(cclf8.BENE_DUAL_STUS_CD, cclf8.BENE_MDCR_STUS_CD))\n\n elig_comb = \\\n {'0210' : 'CFA', '0410' : 'CFA', '0810' : 'CFA', '0211' : 'CFA', '0411' : 'CFA', '0811' : 'CFA',\n '0220' : 'CFD', '0420' : 'CFD', '0820': 'CFD', '0221': 'CFD', '0421': 'CFD', '0821': 'CFD',\n 'NA10': 'CNA', 'NA11': 'CNA',\n 'NA20': 'CND', 'NA21': 'CND',\n '0110' : 'CPA', '0310': 'CPA', '0510': 'CPA', '0610': 'CPA', '0111': 'CPA', '0311': 'CPA', '0511': 'CPA', '0611': 'CPA',\n '0120': 'CPD', '0320': 'CPD', '0520': 'CPD', '0620': 'CPD', '0121': 'CPD', '0321': 'CPD', '0521': 'CPD', '0621': 'CPD'}\n\n mapping_expr = f.create_map([f.lit(x) for x in chain(*elig_comb.items())])\n cclf8 = cclf8.replace(to_replace=elig_comb, subset=['concat_elig'])\n cclf8 = cclf8.select('BENE_MBI_ID', 'BENE_AGE', 'BENE_SEX_CD', 'BENE_ORGNL_ENTLMT_RSN_CD', 'concat_elig', 'source_year')\n cclf8 = cclf8.dropna()\n cclf8 = cclf8.drop_duplicates()\n w2 = Window.partitionBy(\"BENE_MBI_ID\", 'source_year').orderBy(f.col(\"BENE_AGE\"))\n cclf8 = cclf8.withColumn(\"row\", f.row_number().over(w2)).filter(f.col(\"row\") == 1).drop(\"row\").orderBy(f.col('BENE_MBI_ID'))\n\n cclf5 = cclf5.select('BENE_MBI_ID', 'CLM_DGNS_1_CD', 'CLM_DGNS_2_CD', 'CLM_DGNS_3_CD', 'CLM_DGNS_4_CD', 'CLM_DGNS_5_CD', 'CLM_DGNS_6_CD',\n 'CLM_DGNS_7_CD', 'CLM_DGNS_8_CD', 'CLM_DGNS_9_CD', 'CLM_DGNS_10_CD', 'CLM_DGNS_11_CD', 'CLM_DGNS_12_CD', 'source_year', \n 'claim_year')\n\n diag_cds = ['CLM_DGNS_1_CD', 'CLM_DGNS_2_CD', 'CLM_DGNS_3_CD', 'CLM_DGNS_4_CD', 'CLM_DGNS_5_CD', 'CLM_DGNS_6_CD', 'CLM_DGNS_7_CD',\n 'CLM_DGNS_8_CD', 'CLM_DGNS_9_CD', 'CLM_DGNS_10_CD', 'CLM_DGNS_11_CD', 'CLM_DGNS_12_CD']\n\n cols = [f.when(~f.col(x).isin(\"~\"), f.col(x)).alias(x) for x in cclf5.columns]\n cclf5 = cclf5.select(*cols)\n cclf5 = cclf5.withColumn('DIAG_ARRAY', f.concat_ws(',', *diag_cds)) # concat diags and aggregate\n cclf5 = cclf5.select('BENE_MBI_ID', 'source_year', 'claim_year', 'DIAG_ARRAY')\n cclf5 = cclf5.dropna()\n cclf5 = cclf5.drop_duplicates()\n cclf5 = cclf5.groupBy('BENE_MBI_ID', 'source_year', 'claim_year').agg(f.array_distinct(f.collect_list('DIAG_ARRAY')))\n cclf5 = cclf5.withColumnRenamed('array_distinct(collect_list(DIAG_ARRAY))', 'DIAG_ARRAY')\n cclf5 = cclf5.withColumn(\"DIAG_ARRAY\",f.concat_ws(\",\",f.col(\"DIAG_ARRAY\")))\n cclf5 = cclf5.withColumn(\"DIAG_ARRAY\", f.split(f.col(\"DIAG_ARRAY\"), \",\\s*\").cast(ArrayType(StringType())).alias(\"DIAG_ARRAY\"))\n\n master = cclf8.join(cclf1, on=['BENE_MBI_ID', 'source_year'], how='left')\n master = master.join(cclf4, on=['BENE_MBI_ID','source_year', 'claim_year'], how='left')\n master = master.join(cclf5, on =['BENE_MBI_ID','source_year', 'claim_year'], how ='left')\n # logger.info('final columns after joins:' + str(master.columns))\n # logger.info('final row count:' + str(master.count()))\n\n\n master = master.withColumn('diagnosis_list', f.array_distinct(f.concat(master.DIAG_ARRAY, master.PRNCPL_DGNS_CD, master.CLM_DGNS_CD)))\n master = master.drop('DIAG_ARRAY', 'PRNCPL_DGNS_CD', 'CLM_DGNS_CD')\t\n\n master = master.select('BENE_MBI_ID', 'BENE_AGE', 'BENE_SEX_CD', 'concat_elig', 'diagnosis_list', 'BENE_ORGNL_ENTLMT_RSN_CD', \n 'source_year', 'claim_year')\n master = master.withColumn('BENE_AGE', f.col('BENE_AGE').cast('int'))\n master = master.withColumn('BENE_SEX_CD', f.when(f.col('BENE_SEX_CD')=='1', f.lit('M')).otherwise(f.lit('F')))\n master = master.withColumnRenamed('BENE_ORGNL_ENTLMT_RSN_CD', 'oerc')\n master = master.filter(f.col('claim_year') >= '2018')\n \n # master = master.withColumn('diag_lag',f.lag(master['diagnosis_list']).over(Window.partitionBy(\"BENE_MBI_ID\").orderBy('BENE_MBI_ID','source_year', 'claim_year')))\n # master = master.withColumn('diff_diag', f.array_except(f.col('diagnosis_list'), f.col('diag_lag')))\t\n \n # replace nulls wit []\n # master= master.withColumn('diff_diag', f.coalesce(master['diff_diag'], f.array())) \n # window_member = (Window.partitionBy('BENE_MBI_ID').orderBy('BENE_MBI_ID', 'source_year', 'claim_year').rangeBetween(Window.unboundedPreceding, 0))\n # master = master.withColumn('cum_diff_diag',f.flatten(f.collect_list('diff_diag').over(window_member)))\n \n master_18 = master.filter((f.col('source_year')=='2018') & (f.col('claim_year')=='2018'))\n master_19 = master.filter((f.col('source_year')=='2019') & (f.col('claim_year')=='2019'))\n master_20 = master.filter((f.col('source_year')=='2020') & (f.col('claim_year')=='2020'))\n master_21 = master.filter((f.col('source_year')=='2021') & (f.col('claim_year')=='2021'))\n\n # logger.info('master18 row count:' + str(master_18.count()))\n # logger.info('master19 row count:' + str(master_19.count()))\n # logger.info('master20 row count:' + str(master_20.count()))\n # logger.info('master21 row count:' + str(master_21.count()))\n\n master_18 = master_18.toPandas()\n master_19 = master_19.toPandas()\n master_20 = master_20.toPandas()\n master_21 = master_21.toPandas()\n\n # master_18['diagnosis_list'] = [ [] if x is np.NaN else x for x in master_18['diagnosis_list'] ]\n\n master_18 = master_18[master_18['diagnosis_list'].notna()]\n he = HCCEngine(version=\"23\")\n # master_18 = master_18.dropna()\n master_18['risk_profile'] = master_18.apply(lambda row: he.profile(row['diagnosis_list'], row['BENE_AGE'], row['BENE_SEX_CD'], row['concat_elig'], row['oerc']), axis=1)\n \n he = HCCEngine(version=\"24_19\")\n master_19 = master_19[master_19['diagnosis_list'].notna()]\n master_19['risk_profile'] = master_19.apply(lambda row: he.profile(row['diagnosis_list'], row['BENE_AGE'], row['BENE_SEX_CD'], row['concat_elig'], row['oerc']), axis=1)\n\n he = HCCEngine(version=\"24_19\")\n master_20 = master_20[master_20['diagnosis_list'].notna()]\n master_20['risk_profile'] = master_20.apply(lambda row: he.profile(row['diagnosis_list'], row['BENE_AGE'], row['BENE_SEX_CD'], row['concat_elig'], row['oerc']), axis=1)\n\n he = HCCEngine(version=\"24_21\")\n master_21 = master_21[master_21['diagnosis_list'].notna()]\n master_21['risk_profile'] = master_21.apply(lambda row: he.profile(row['diagnosis_list'], row['BENE_AGE'], row['BENE_SEX_CD'], row['concat_elig'], row['oerc']), axis=1)\n\n\n result_master = pd.concat([master_18, master_19, master_20, master_21 ], axis=0, ignore_index=True)\n df_result = pd.DataFrame(result_master['risk_profile'].values.tolist())\n result_master = pd.concat([result_master, df_result['risk_score'], df_result['hcc_lst'], df_result['hcc_map'], df_result['details']], axis=1)\n result_master = result_master[['BENE_MBI_ID', 'BENE_AGE', 'BENE_SEX_CD', 'concat_elig', 'diagnosis_list', 'oerc', 'source_year', 'claim_year', 'hcc_lst', 'hcc_map', 'risk_score', 'details']]\n fields = result_master.columns\n for field in fields:\n result_master[field] = [str(x) for x in result_master[field]]\n\n # logger.info('final row count:' + str(result_master.count()))\n result_master = spark.createDataFrame(result_master)\n # drop risk profile column\n # write_output(result_master)\n\n # df = spark.read.parquet('/data/data_science/raf/NW_master_raf.parquet')\n # df = df.withColumn('diagnosis_list', f.array(f.col('diagnosis_list')))\n # df = df.withColumn('hcc_lst',f.array(f.col('hcc_lst')))\n\n # df = df.withColumn('diag_lag',f.lag(df['diagnosis_list']).over(Window.partitionBy(\"BENE_MBI_ID\").orderBy('BENE_MBI_ID','year')))\n # df = df.withColumn('hcc_lag',f.lag(df['hcc_lst']).over(Window.partitionBy(\"BENE_MBI_ID\").orderBy('BENE_MBI_ID','year')))\n\n # df = df.withColumn('diff_diag', f.array_except(f.col('diagnosis_list'), f.col('diag_lag')))\n # df = df.withColumn('diff_hcc', f.array_except(f.col('hcc_lst'), f.col('hcc_lag')))\n write_output(result_master)\n\n\nif __name__ == \"__main__\":\n\n logger.info('START')\n main()\n logger.info('END')\n" ]
[ [ "pandas.set_option", "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
SlipknotTN/HyperGAN
[ "bd39759521d52a706f6f0f561e0c8355a3ef427e" ]
[ "examples/static.py" ]
[ "import argparse\nimport os\nimport uuid\nimport tensorflow as tf\nimport hypergan as hg\nimport hyperchamber as hc\nimport numpy as np\nfrom hypergan.generators import *\nfrom hypergan.viewer import GlobalViewer\nfrom common import *\nfrom hypergan.search.random_search import RandomSearch\n\nfrom hypergan.samplers.random_walk_sampler import RandomWalkSampler\nfrom hypergan.samplers.static_batch_sampler import StaticBatchSampler\n\narg_parser = ArgumentParser(\"Feed static values into X/Z and memorize them\")\narg_parser.add_image_arguments()\nargs = arg_parser.parse_args()\n\nwidth, height, channels = parse_size(args.size)\n\nconfig = lookup_config(args)\n\nsave_file = \"save/model.ckpt\"\n\nif args.action == 'search':\n config = RandomSearch({}).random_config()\n\n if args.config_list is not None:\n config = random_config_from_list(args.config_list)\n random_config = RandomSearch({}).random_config()\n\n config[\"generator\"]=random_config[\"generator\"]\n config[\"discriminator\"]=random_config[\"discriminator\"]\n # TODO Other search terms?\n\ninputs = hg.inputs.image_loader.ImageLoader(args.batch_size)\ninputs.create(args.directory,\n channels=channels, \n format=args.format,\n crop=args.crop,\n width=width,\n height=height,\n resize=True)\n\nsave_file = \"save/model.ckpt\"\n\ndef setup_gan(config, inputs, args):\n gan = hg.GAN(config, inputs=inputs)\n\n gan.create()\n\n if(args.action != 'search' and os.path.isfile(save_file+\".meta\")):\n gan.load(save_file)\n\n tf.train.start_queue_runners(sess=gan.session)\n\n config_name = args.config\n title = \"[hypergan] static \" + config_name\n GlobalViewer.title = title\n GlobalViewer.enabled = args.viewer\n\n return gan\n\ndef train(config, inputs, args):\n gan = setup_gan(config, inputs, args)\n static_x, static_z = gan.session.run([gan.inputs.x, gan.encoder.sample])\n\n accuracy_x_to_g=accuracy(static_x, gan.generator.sample)\n diversity_g = batch_diversity(gan.generator.sample)\n\n metrics = [accuracy_x_to_g, diversity_g]\n sum_metrics = [0 for metric in metrics]\n sampler = lookup_sampler(args.sampler or StaticBatchSampler)(gan)\n for i in range(args.steps):\n gan.step({gan.inputs.x: static_x, gan.encoder.sample: static_z})\n\n if i % args.sample_every == 0:\n print(\"sampling \"+str(i))\n sample_file = \"samples/\"+str(i)+\".png\"\n sampler.sample(sample_file, args.save_samples)\n\n if args.action == 'train' and i % args.save_every == 0 and i > 0:\n print(\"saving \" + save_file)\n gan.save(save_file)\n\n if i > args.steps * 9.0/10:\n for k, metric in enumerate(gan.session.run(metrics)):\n print(\"Metric \"+str(k)+\" \"+str(metric))\n sum_metrics[k] += metric \n return sum_metrics\n\ndef sample(config, inputs, args):\n gan = setup_gan(config, inputs, args)\n sampler = lookup_sampler(args.sampler or RandomWalkSampler)(gan)\n for i in range(args.steps):\n sample_file = \"samples/\"+str(i)+\".png\"\n sampler.sample(sample_file, args.save_samples)\n\ndef search(config, inputs, args):\n metrics = train(config, inputs, args)\n config_filename = \"static-\"+str(uuid.uuid4())+'.json'\n hc.Selector().save(config_filename, config)\n\n with open(args.search_output, \"a\") as myfile:\n myfile.write(config_filename+\",\"+\",\".join([str(x) for x in metrics])+\"\\n\")\n\nif args.action == 'train':\n metrics = train(config, inputs, args)\n print(\"Resulting metrics:\", metrics)\nelif args.action == 'sample':\n sample(config, inputs, args)\nelif args.action == 'search':\n search(config, inputs, args)\nelse:\n print(\"Unknown action: \"+args.action)\n" ]
[ [ "tensorflow.train.start_queue_runners" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
thorwhalen/spyn
[ "35a9a85a96736764de9ef20f4cedb260b9b7a8c7" ]
[ "spyn/utils/color.py" ]
[ "__author__ = 'thor'\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\ndef shifted_color_map(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):\n '''\n Function to offset the \"center\" of a colormap. Useful for\n data with a negative min and positive max and you want the\n middle of the colormap's dynamic range to be at zero\n\n Input\n -----\n cmap : The matplotlib colormap to be altered\n start : Offset from lowest point in the colormap's range.\n Defaults to 0.0 (no lower ofset). Should be between\n 0.0 and `midpoint`.\n midpoint : The new center of the colormap. Defaults to\n 0.5 (no shift). Should be between 0.0 and 1.0. In\n general, this should be 1 - vmax/(vmax + abs(vmin))\n For example if your data range from -15.0 to +5.0 and\n you want the center of the colormap at 0.0, `midpoint`\n should be set to 1 - 5/(5 + 15)) or 0.75\n stop : Offset from highets point in the colormap's range.\n Defaults to 1.0 (no upper ofset). Should be between\n `midpoint` and 1.0.\n '''\n cdict = {\n 'red': [],\n 'green': [],\n 'blue': [],\n 'alpha': []\n }\n\n # regular index to compute the colors\n reg_index = np.linspace(start, stop, 257)\n\n # shifted index to match the data\n shift_index = np.hstack([\n np.linspace(0.0, midpoint, 128, endpoint=False),\n np.linspace(midpoint, 1.0, 129, endpoint=True)\n ])\n\n for ri, si in zip(reg_index, shift_index):\n r, g, b, a = cmap(ri)\n\n cdict['red'].append((si, r, r))\n cdict['green'].append((si, g, g))\n cdict['blue'].append((si, b, b))\n cdict['alpha'].append((si, a, a))\n\n newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)\n plt.register_cmap(cmap=newcmap)\n plt.imshow\n return newcmap\n\n\ndef get_colorbar_tick_labels_as_floats(cbar):\n return [float(ww.get_text().replace('\\u2212', '-')) for ww in cbar.ax.yaxis.get_majorticklabels()]\n" ]
[ [ "matplotlib.pyplot.register_cmap", "matplotlib.colors.LinearSegmentedColormap", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
olonok69/RL_Stable_baselines
[ "3634cc60e8de8e9dfa7cb50fdc6272284b8a7cc9" ]
[ "algos/Td3.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport gym\nfrom tensorflow.keras.models import load_model\n#!pip3 install box2d-py\n\nprint(tf.config.list_physical_devices('GPU'))\n\nenv= gym.make(\"LunarLanderContinuous-v2\")\nstate_low = env.observation_space.low\nstate_high = env.observation_space.high\naction_low = env.action_space.low \naction_high = env.action_space.high\nprint(state_low)\nprint(state_high)\nprint(action_low)\nprint(action_high)\n\n\nclass RBuffer():\n def __init__(self, maxsize, statedim, naction):\n self.cnt = 0\n self.maxsize = maxsize\n self.state_memory = np.zeros((maxsize, *statedim), dtype=np.float32)\n self.action_memory = np.zeros((maxsize, naction), dtype=np.float32)\n self.reward_memory = np.zeros((maxsize,), dtype=np.float32)\n self.next_state_memory = np.zeros((maxsize, *statedim), dtype=np.float32)\n self.done_memory = np.zeros((maxsize,), dtype= np.bool)\n\n def storexp(self, state, next_state, action, done, reward):\n index = self.cnt % self.maxsize\n self.state_memory[index] = state\n self.action_memory[index] = action\n self.reward_memory[index] = reward\n self.next_state_memory[index] = next_state\n self.done_memory[index] = 1- int(done)\n self.cnt += 1\n\n def sample(self, batch_size):\n max_mem = min(self.cnt, self.maxsize)\n batch = np.random.choice(max_mem, batch_size, replace= False) \n states = self.state_memory[batch]\n next_states = self.next_state_memory[batch]\n rewards = self.reward_memory[batch]\n actions = self.action_memory[batch]\n dones = self.done_memory[batch]\n return states, next_states, rewards, actions, dones\n\n\nclass Critic(tf.keras.Model):\n def __init__(self):\n super(Critic, self).__init__()\n self.f1 = tf.keras.layers.Dense(512, activation='relu')\n self.f2 = tf.keras.layers.Dense(512, activation='relu')\n self.v = tf.keras.layers.Dense(1, activation=None)\n\n def call(self, inputstate, action):\n x = self.f1(tf.concat([inputstate, action], axis=1))\n x = self.f2(x)\n x = self.v(x)\n return x\n\n\nclass Actor(tf.keras.Model):\n def __init__(self, no_action):\n super(Actor, self).__init__() \n self.f1 = tf.keras.layers.Dense(512, activation='relu')\n self.f2 = tf.keras.layers.Dense(512, activation='relu')\n self.mu = tf.keras.layers.Dense(no_action, activation='tanh')\n\n def call(self, state):\n x = self.f1(state)\n x = self.f2(x)\n x = self.mu(x) \n return x\n\n \n\nclass Agent():\n def __init__(self, n_action= len(env.action_space.high)):\n self.actor_main = Actor(n_action)\n self.actor_target = Actor(n_action)\n self.critic_main = Critic()\n self.critic_main2 = Critic()\n self.critic_target = Critic()\n self.critic_target2 = Critic()\n self.batch_size = 64\n self.n_actions = len(env.action_space.high)\n self.a_opt = tf.keras.optimizers.Adam(0.001)\n # self.actor_target = tf.keras.optimizers.Adam(.001)\n self.c_opt1 = tf.keras.optimizers.Adam(0.002)\n self.c_opt2 = tf.keras.optimizers.Adam(0.002)\n # self.critic_target = tf.keras.optimizers.Adam(.002)\n self.memory = RBuffer(1_00_000, env.observation_space.shape, len(env.action_space.high))\n self.trainstep = 0\n #self.replace = 5\n self.gamma = 0.99\n self.min_action = env.action_space.low[0]\n self.max_action = env.action_space.high[0]\n self.actor_update_steps = 2\n self.warmup = 200\n \n\n def act(self, state, evaluate=False):\n if self.trainstep > self.warmup:\n evaluate = True\n state = tf.convert_to_tensor([state], dtype=tf.float32)\n actions = self.actor_main(state)\n if not evaluate:\n actions += tf.random.normal(shape=[self.n_actions], mean=0.0, stddev=0.1)\n\n actions = self.max_action * (tf.clip_by_value(actions, self.min_action, self.max_action))\n #print(actions)\n return actions[0]\n\n\n def savexp(self,state, next_state, action, done, reward):\n self.memory.storexp(state, next_state, action, done, reward)\n\n def update_target(self):\n self.actor_target.set_weights(self.actor_main.get_weights())\n self.critic_target.set_weights(self.critic_main.get_weights())\n self.critic_target2.set_weights(self.critic_main2.get_weights())\n\n \n def train(self):\n if self.memory.cnt < self.batch_size:\n return \n\n\n states, next_states, rewards, actions, dones = self.memory.sample(self.batch_size)\n \n states = tf.convert_to_tensor(states, dtype= tf.float32)\n next_states = tf.convert_to_tensor(next_states, dtype= tf.float32)\n rewards = tf.convert_to_tensor(rewards, dtype= tf.float32)\n actions = tf.convert_to_tensor(actions, dtype= tf.float32)\n #dones = tf.convert_to_tensor(dones, dtype= tf.bool)\n\n with tf.GradientTape() as tape1, tf.GradientTape() as tape2:\n \n target_actions = self.actor_target(next_states)\n target_actions += tf.clip_by_value(tf.random.normal(shape=[*np.shape(target_actions)], mean=0.0, stddev=0.2), -0.5, 0.5)\n target_actions = self.max_action * (tf.clip_by_value(target_actions, self.min_action, self.max_action))\n \n \n target_next_state_values = tf.squeeze(self.critic_target(next_states, target_actions), 1)\n target_next_state_values2 = tf.squeeze(self.critic_target2(next_states, target_actions), 1)\n \n critic_value = tf.squeeze(self.critic_main(states, actions), 1)\n critic_value2 = tf.squeeze(self.critic_main2(states, actions), 1)\n \n next_state_target_value = tf.math.minimum(target_next_state_values, target_next_state_values2)\n \n target_values = rewards + self.gamma * next_state_target_value * dones\n critic_loss1 = tf.keras.losses.MSE(target_values, critic_value)\n critic_loss2 = tf.keras.losses.MSE(target_values, critic_value2)\n \n\n\n \n grads1 = tape1.gradient(critic_loss1, self.critic_main.trainable_variables)\n grads2 = tape2.gradient(critic_loss2, self.critic_main2.trainable_variables)\n \n self.c_opt1.apply_gradients(zip(grads1, self.critic_main.trainable_variables))\n self.c_opt2.apply_gradients(zip(grads2, self.critic_main2.trainable_variables))\n \n \n self.trainstep +=1\n \n if self.trainstep % self.actor_update_steps == 0:\n \n with tf.GradientTape() as tape3:\n \n new_policy_actions = self.actor_main(states)\n actor_loss = -self.critic_main(states, new_policy_actions)\n actor_loss = tf.math.reduce_mean(actor_loss)\n \n grads3 = tape3.gradient(actor_loss, self.actor_main.trainable_variables)\n self.a_opt.apply_gradients(zip(grads3, self.actor_main.trainable_variables))\n\n #if self.trainstep % self.replace == 0:\n self.update_target()\n \n \n \n\n\nwith tf.device('GPU:0'):\n tf.random.set_seed(336699)\n agent = Agent(2)\n episods = 20000\n ep_reward = []\n total_avgr = []\n target = False\n\n for s in range(episods):\n if target == True:\n break\n total_reward = 0 \n state = env.reset()\n done = False\n\n while not done:\n env.render()\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n agent.savexp(state, next_state, action, done, reward)\n agent.train()\n state = next_state\n total_reward += reward\n if done:\n ep_reward.append(total_reward)\n avg_reward = np.mean(ep_reward[-100:])\n total_avgr.append(avg_reward)\n print(\"total reward after {} steps is {} and avg reward is {}\".format(s, total_reward, avg_reward))\n if avg_reward == 200:\n target = True\n\n\n\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.clip_by_value", "tensorflow.device", "tensorflow.concat", "numpy.random.choice", "tensorflow.random.normal", "tensorflow.keras.layers.Dense", "tensorflow.math.minimum", "tensorflow.keras.losses.MSE", "tensorflow.math.reduce_mean", "tensorflow.keras.optimizers.Adam", "tensorflow.config.list_physical_devices", "numpy.mean", "numpy.shape", "numpy.zeros", "tensorflow.random.set_seed", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
ictnlp/COKD
[ "15074b67c4917f22ef3f6495d96f092835ddfda5" ]
[ "fairseq/data/dictionary.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom collections import Counter\nfrom multiprocessing import Pool\nimport os\n\nimport torch\n\nfrom fairseq.tokenizer import tokenize_line\nfrom fairseq.binarizer import safe_readline\nfrom fairseq.data import data_utils\n\n\nclass Dictionary(object):\n \"\"\"A mapping from symbols to consecutive integers\"\"\"\n\n def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', bos='<s>'):\n self.unk_word, self.pad_word, self.eos_word = unk, pad, eos\n self.symbols = []\n self.count = []\n self.indices = {}\n self.bos_index = self.add_symbol(bos)\n self.pad_index = self.add_symbol(pad)\n self.eos_index = self.add_symbol(eos)\n self.unk_index = self.add_symbol(unk)\n self.nspecial = len(self.symbols)\n\n def __eq__(self, other):\n return self.indices == other.indices\n\n def __getitem__(self, idx):\n if idx < len(self.symbols):\n return self.symbols[idx]\n return self.unk_word\n\n def __len__(self):\n \"\"\"Returns the number of symbols in the dictionary\"\"\"\n return len(self.symbols)\n\n def index(self, sym):\n \"\"\"Returns the index of the specified symbol\"\"\"\n if sym in self.indices:\n return self.indices[sym]\n return self.unk_index\n\n def string(self, tensor, bpe_symbol=None, escape_unk=False):\n \"\"\"Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n \"\"\"\n if torch.is_tensor(tensor) and tensor.dim() == 2:\n return '\\n'.join(self.string(t) for t in tensor)\n\n def token_string(i):\n if i == self.unk():\n return self.unk_string(escape_unk)\n else:\n return self[i]\n\n sent = ' '.join(token_string(i) for i in tensor if i != self.eos())\n return data_utils.process_bpe_symbol(sent, bpe_symbol)\n\n def unk_string(self, escape=False):\n \"\"\"Return unknown string, optionally escaped as: <<unk>>\"\"\"\n if escape:\n return '<{}>'.format(self.unk_word)\n else:\n return self.unk_word\n\n def add_symbol(self, word, n=1):\n \"\"\"Adds a word to the dictionary\"\"\"\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + n\n return idx\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(n)\n return idx\n\n def update(self, new_dict):\n \"\"\"Updates counts from new dictionary.\"\"\"\n for word in new_dict.symbols:\n idx2 = new_dict.indices[word]\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + new_dict.count[idx2]\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(new_dict.count[idx2])\n\n def finalize(self, threshold=-1, nwords=-1, padding_factor=8):\n \"\"\"Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n if nwords <= 0:\n nwords = len(self)\n\n new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))\n new_symbols = self.symbols[:self.nspecial]\n new_count = self.count[:self.nspecial]\n\n c = Counter(dict(sorted(zip(self.symbols[self.nspecial:], self.count[self.nspecial:]))))\n for symbol, count in c.most_common(nwords - self.nspecial):\n if count >= threshold:\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(count)\n else:\n break\n\n threshold_nwords = len(new_symbols)\n if padding_factor > 1:\n i = 0\n while threshold_nwords % padding_factor != 0:\n symbol = 'madeupword{:04d}'.format(i)\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(0)\n i += 1\n threshold_nwords += 1\n\n assert len(new_symbols) % padding_factor == 0\n assert len(new_symbols) == len(new_indices)\n\n self.count = list(new_count)\n self.symbols = list(new_symbols)\n self.indices = new_indices\n\n def bos(self):\n \"\"\"Helper to get index of beginning-of-sentence symbol\"\"\"\n return self.bos_index\n\n def pad(self):\n \"\"\"Helper to get index of pad symbol\"\"\"\n return self.pad_index\n\n def eos(self):\n \"\"\"Helper to get index of end-of-sentence symbol\"\"\"\n return self.eos_index\n\n def unk(self):\n \"\"\"Helper to get index of unk symbol\"\"\"\n return self.unk_index\n\n @classmethod\n def load(cls, f, ignore_utf_errors=False):\n \"\"\"Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n \"\"\"\n if isinstance(f, str):\n try:\n if not ignore_utf_errors:\n with open(f, 'r', encoding='utf-8') as fd:\n return cls.load(fd)\n else:\n with open(f, 'r', encoding='utf-8', errors='ignore') as fd:\n return cls.load(fd)\n except FileNotFoundError as fnfe:\n raise fnfe\n except UnicodeError:\n raise Exception(\"Incorrect encoding detected in {}, please \"\n \"rebuild the dataset\".format(f))\n\n d = cls()\n lines = f.readlines()\n indices_start_line = d._load_meta(lines)\n for line in lines[indices_start_line:]:\n idx = line.rfind(' ')\n if idx == -1:\n raise ValueError(\"Incorrect dictionary format, expected '<token> <cnt>'\")\n word = line[:idx]\n count = int(line[idx + 1:])\n d.indices[word] = len(d.symbols)\n d.symbols.append(word)\n d.count.append(count)\n return d\n\n def _save(self, f, kv_iterator):\n if isinstance(f, str):\n os.makedirs(os.path.dirname(f), exist_ok=True)\n with open(f, 'w', encoding='utf-8') as fd:\n return self.save(fd)\n for k, v in kv_iterator:\n print('{} {}'.format(k, v), file=f)\n\n def _get_meta(self):\n return [], []\n\n def _load_meta(self, lines):\n return 0\n\n def save(self, f):\n \"\"\"Stores dictionary into a text file\"\"\"\n ex_keys, ex_vals = self._get_meta()\n self._save(f, zip(ex_keys + self.symbols[self.nspecial:], ex_vals + self.count[self.nspecial:]))\n\n def dummy_sentence(self, length):\n t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()\n t[-1] = self.eos()\n return t\n\n def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True,\n consumer=None, append_eos=True, reverse_order=False):\n words = line_tokenizer(line)\n if reverse_order:\n words = list(reversed(words))\n nwords = len(words)\n ids = torch.IntTensor(nwords + 1 if append_eos else nwords)\n\n for i, word in enumerate(words):\n if add_if_not_exist:\n idx = self.add_symbol(word)\n else:\n idx = self.index(word)\n if consumer is not None:\n consumer(word, idx)\n ids[i] = idx\n if append_eos:\n ids[nwords] = self.eos_index\n return ids\n\n @staticmethod\n def _add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):\n counter = Counter()\n with open(filename, 'r', encoding='utf-8',errors='replace') as f:\n size = os.fstat(f.fileno()).st_size\n chunk_size = size // num_workers\n offset = worker_id * chunk_size\n end = offset + chunk_size\n f.seek(offset)\n if offset > 0:\n safe_readline(f) # drop first incomplete line\n line = f.readline()\n while line:\n for word in tokenize(line):\n counter.update([word])\n counter.update([eos_word])\n if f.tell() > end:\n break\n line = f.readline()\n return counter\n\n @staticmethod\n def add_file_to_dictionary(filename, dict, tokenize, num_workers):\n def merge_result(counter):\n for w, c in sorted(counter.items()):\n dict.add_symbol(w, c)\n\n if num_workers > 1:\n pool = Pool(processes=num_workers)\n results = []\n for worker_id in range(num_workers):\n results.append(pool.apply_async(\n Dictionary._add_file_to_dictionary_single_worker,\n (filename, tokenize, dict.eos_word, worker_id, num_workers)\n ))\n pool.close()\n pool.join()\n for r in results:\n merge_result(r.get())\n else:\n merge_result(Dictionary._add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))\n\n\nclass TruncatedDictionary(object):\n\n def __init__(self, wrapped_dict, length):\n self.__class__ = type(\n wrapped_dict.__class__.__name__,\n (self.__class__, wrapped_dict.__class__),\n {}\n )\n self.__dict__ = wrapped_dict.__dict__\n self.wrapped_dict = wrapped_dict\n self.length = min(len(self.wrapped_dict), length)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, i):\n if i < self.length:\n return self.wrapped_dict[i]\n return self.wrapped_dict.unk()\n" ]
[ [ "torch.is_tensor", "torch.Tensor", "torch.IntTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yiminglin-ai/face_embedding
[ "a96ba31f75cf9a9a2bcebbd47c7bbffecafb73ed" ]
[ "face_embedding_train.py" ]
[ "import argparse\nimport logging\nimport os\nimport time\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn.functional as F\nimport torch.utils.data.distributed\nfrom torch.nn.utils import clip_grad_norm_\n\nimport ibug.face_embedding.backbones.iresnet as iresnet\nimport ibug.face_embedding.backbones.rtnet as rtnet\nimport ibug.face_embedding.utils.losses as losses\nfrom ibug.face_embedding.utils.dataset import MXFaceDataset, DataLoaderX\nfrom ibug.face_embedding.utils.partial_fc import PartialFC\nfrom ibug.face_embedding.utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint\nfrom ibug.face_embedding.utils.utils_logging import AverageMeter, init_logging\nfrom ibug.face_embedding.utils.utils_amp import MaxClipGradScaler\nfrom ibug.face_embedding.utils.misc import get_projection_options\nfrom ibug.face_embedding.utils.train_config import config as cfg\n\ntorch.backends.cudnn.benchmark = True\n\ndef main(args):\n\n world_size = int(os.environ['WORLD_SIZE'])\n rank = int(os.environ['RANK'])\n\n dist_url = \"tcp://{}:{}\".format(os.environ[\"MASTER_ADDR\"], os.environ[\"MASTER_PORT\"])\n\n dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size)\n local_rank = args.local_rank\n torch.cuda.set_device(local_rank)\n\n if args.output_dir is not None:\n cfg.output = os.path.join(cfg.output, args.output_dir.strip())\n\n if args.batch_size_per_gpu is not None:\n cfg.batch_size = args.batch_size_per_gpu\n\n if not os.path.exists(cfg.output) and rank is 0:\n os.makedirs(cfg.output)\n else:\n time.sleep(2)\n\n log_root = logging.getLogger()\n init_logging(log_root, rank, cfg.output)\n\n projection_options = get_projection_options(args)\n trainset = MXFaceDataset(\n root_dir=cfg.rec,\n local_rank=local_rank,\n project_to_space=args.project_to_space,\n augment_projection=True,\n **projection_options)\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n trainset, shuffle=True)\n train_loader = DataLoaderX(\n local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size,\n sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True)\n\n dropout = 0.4 if cfg.dataset is \"webface\" else 0\n\n net_type = args.network.strip()\n if \"resnet\" in net_type: # use iresnet serials as backbone\n backbone = eval(\"iresnet.{}\".format(net_type))(False, dropout=dropout, fp16=cfg.fp16).to(local_rank)\n print(\"Using iRseNet backbone: {}\".format(net_type))\n elif \"rtnet\" in net_type: # use rtnet serials as backbone\n backbone = eval(\"rtnet.{}\".format(net_type))(\n False, dropout=dropout, fp16=cfg.fp16, dilated=False).to(local_rank)\n print(\"Using RTNet backbone: {}\".format(net_type))\n else:\n raise ValueError('Unsupported network options: {}'.format(net_type))\n\n if args.resume:\n try:\n backbone_pth = os.path.join(cfg.output, \"backbone.pth\")\n backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))\n if rank is 0:\n logging.info(\"backbone resume successfully!\")\n except (FileNotFoundError, KeyError, IndexError, RuntimeError):\n logging.info(\"resume fail, backbone init successfully!\")\n\n for ps in backbone.parameters():\n dist.broadcast(ps, 0)\n backbone = torch.nn.parallel.DistributedDataParallel(\n module=backbone, broadcast_buffers=False, device_ids=[local_rank])\n backbone.train()\n\n margin_softmax = eval(\"losses.{}\".format(args.loss))()\n module_partial_fc = PartialFC(\n rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume,\n batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes,\n sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output)\n\n opt_backbone = torch.optim.SGD(\n params=[{'params': backbone.parameters()}],\n lr=cfg.lr / 512 * cfg.batch_size * world_size,\n momentum=0.9, weight_decay=cfg.weight_decay)\n opt_pfc = torch.optim.SGD(\n params=[{'params': module_partial_fc.parameters()}],\n lr=cfg.lr / 512 * cfg.batch_size * world_size,\n momentum=0.9, weight_decay=cfg.weight_decay)\n\n scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(\n optimizer=opt_backbone, lr_lambda=cfg.lr_func)\n scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(\n optimizer=opt_pfc, lr_lambda=cfg.lr_func)\n\n start_epoch = 0\n total_step = int(len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)\n if rank is 0: logging.info(\"Total Step is: %d\" % total_step)\n\n callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec,\n project_to_space=args.project_to_space,\n roi_ratio=projection_options['roi_ratio'],\n keep_aspect_ratio=projection_options['keep_aspect_ratio'],\n backbone_type=net_type)\n callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None)\n callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)\n\n loss = AverageMeter()\n global_step = 0\n grad_scaler = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None\n for epoch in range(start_epoch, cfg.num_epoch):\n train_sampler.set_epoch(epoch)\n for step, loader_data in enumerate(train_loader):\n\n global_step += 1\n if len(loader_data) == 2:\n img, label = loader_data\n else:\n img, label, rois = loader_data\n\n if \"resnet\" in net_type:\n # iresent backbone\n features = F.normalize(backbone(img))\n else:\n # rtnetwork requires RoIs as additional input\n features = F.normalize(backbone(img, rois))\n\n x_grad, loss_v = module_partial_fc.forward_backward(label, features, opt_pfc)\n if cfg.fp16:\n features.backward(grad_scaler.scale(x_grad))\n grad_scaler.unscale_(opt_backbone)\n clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)\n grad_scaler.step(opt_backbone)\n grad_scaler.update()\n else:\n features.backward(x_grad)\n clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)\n opt_backbone.step()\n\n opt_pfc.step()\n module_partial_fc.update()\n opt_backbone.zero_grad()\n opt_pfc.zero_grad()\n loss.update(loss_v, 1)\n callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler)\n callback_verification(global_step, backbone)\n\n callback_checkpoint(global_step, backbone, module_partial_fc)\n scheduler_backbone.step()\n scheduler_pfc.step()\n dist.destroy_process_group()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='PyTorch ArcFace Training')\n parser.add_argument('--local_rank', type=int, default=0, help='local_rank')\n parser.add_argument('--network', type=str, default='iresnet50', help='backbone network')\n parser.add_argument('--loss', type=str, default='ArcFace', help='loss function')\n parser.add_argument('--resume', type=int, default=0, help='model resuming')\n parser.add_argument('--output_dir', type=str, default=None,\n help='The name of the output directory to save results.')\n parser.add_argument('--batch_size_per_gpu', type=int, default=None, help='the batch size per gpu')\n parser.add_argument('--project_to_space', type=str, default=None,\n help='The space to project facial images into. '\n 'Options: roi_tanh_polar, roi_tanh_circular, roi_tanh')\n # options for space projection\n parser.add_argument('--roi_ratio', type=str, default=\"0.8,0.8\",\n help='The ratio of RoI region with respect to the whole image when doing space projection')\n parser.add_argument('--roi_offset_range', type=str, default=\"-0.09,0.09\",\n help='The RoI offset range during space projection')\n parser.add_argument('--angular_offset_range', type=str, default=\"-0.35,0.35\",\n help='The angular offset during space projection')\n parser.add_argument('--ignore_aspect_ratio', default=False, action=\"store_true\",\n help='If specified, will ignore aspect ratio during space projection')\n\n args_ = parser.parse_args()\n main(args_)\n" ]
[ [ "torch.distributed.broadcast", "torch.optim.lr_scheduler.LambdaLR", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.cuda.set_device", "torch.distributed.destroy_process_group", "torch.device", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RJRL12138/LSTM-CRF-Terminology-extraction
[ "4919d7685e2dd1297aa7a682316950a15f0755dd" ]
[ "LSTM_CRF_model.py" ]
[ "from keras.initializers import Constant\nfrom keras.models import Model, Input\nfrom keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional\nfrom keras_contrib.layers import CRF\nfrom sklearn_crfsuite.metrics import flat_classification_report\nimport numpy as np\nfrom config import Config\n\n# Load the configuration\nconfig = Config()\n\n# Building the model\ninput = Input(shape=(config.MAX_LEN,))\nmodel = Embedding(input_dim=config.n_words, output_dim=config.EMBEDDING,\n embeddings_initializer=Constant(config.embedding_matrix), trainable=False,\n input_length=config.MAX_LEN)(input)\nmodel = Bidirectional(LSTM(units=100, return_sequences=True,\n recurrent_dropout=0.1))(model)\nmodel = TimeDistributed(Dense(80, activation=\"relu\"))(model)\nmodel = Dense(40, activation='relu')(model)\ncrf = CRF(config.n_tags + 1)\nout = crf(model)\nmodel = Model(input, out)\n\n# Compile and train the model\nmodel.compile(optimizer=\"rmsprop\", loss=crf.loss_function, metrics=[crf.accuracy])\nmodel.summary()\nhistory = model.fit(config.X_tr, np.array(config.y_tr), batch_size=config.BATCH_SIZE, epochs=config.EPOCHS,\n validation_split=0.1, verbose=2)\n# Evaluate the model and get the summary for prediction\npred_cat = model.predict(config.X_te)\npred = np.argmax(pred_cat, axis=-1)\ny_te_true = np.argmax(config.y_te, -1)\n\npred_tag = [[config.idx2tag[i] for i in row] for row in pred]\ny_te_true_tag = [[config.idx2tag[i] for i in row] for row in y_te_true]\n\nreport = flat_classification_report(y_pred=pred_tag, y_true=y_te_true_tag)\nprint(report)\n\n# Save the model to file\nmodel.save(config.model_save_path)\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
feudalism/spatialmath-python
[ "eac94363822c642d9e360db0d5f86585334473c5" ]
[ "spatialmath/geom3d.py" ]
[ "# Part of Spatial Math Toolbox for Python\n# Copyright (c) 2000 Peter Corke\n# MIT Licence, see details in top-level file: LICENCE\n\nimport numpy as np\nimport math\nfrom collections import namedtuple\nimport matplotlib.pyplot as plt\nimport spatialmath.base as base\nfrom spatialmath import SE3\nfrom spatialmath.baseposelist import BasePoseList\n\n_eps = np.finfo(np.float64).eps\n\n# ======================================================================== #\n\nclass Plane:\n r\"\"\"\n Create a plane object from linear coefficients\n \n :param c: Plane coefficients\n :type c: 4-element array_like\n :return: a Plane object\n :rtype: Plane\n\n Planes are represented by the 4-vector :math:`[a, b, c, d]` which describes\n the plane :math:`\\pi: ax + by + cz + d=0`.\n \"\"\"\n def __init__(self, c):\n\n self.plane = base.getvector(c, 4)\n \n # point and normal\n @classmethod\n def PN(cls, p, n):\n \"\"\"\n Create a plane object from point and normal\n \n :param p: Point in the plane\n :type p: 3-element array_like\n :param n: Normal to the plane\n :type n: 3-element array_like\n :return: a Plane object\n :rtype: Plane\n\n \"\"\"\n n = base.getvector(n, 3) # normal to the plane\n p = base.getvector(p, 3) # point on the plane\n return cls(np.r_[n, -np.dot(n, p)])\n \n # point and normal\n @classmethod\n def P3(cls, p):\n \"\"\"\n Create a plane object from three points\n \n :param p: Three points in the plane\n :type p: numpy.ndarray, shape=(3,3)\n :return: a Plane object\n :rtype: Plane\n \"\"\"\n \n p = base.ismatrix(p, (3,3))\n v1 = p[:,0]\n v2 = p[:,1]\n v3 = p[:,2]\n \n # compute a normal\n n = np.cross(v2-v1, v3-v1)\n \n return cls(n, v1)\n \n # line and point\n # 3 points\n \n @property\n def n(self):\n r\"\"\"\n Normal to the plane\n \n :return: Normal to the plane\n :rtype: 3-element array_like\n \n For a plane :math:`\\pi: ax + by + cz + d=0` this is the vector\n :math:`[a,b,c]`.\n\n \"\"\"\n # normal\n return self.plane[:3]\n \n @property\n def d(self):\n r\"\"\"\n Plane offset\n \n :return: Offset of the plane\n :rtype: float\n \n For a plane :math:`\\pi: ax + by + cz + d=0` this is the scalar\n :math:`d`.\n\n \"\"\"\n return self.plane[3]\n \n def contains(self, p, tol=10*_eps):\n \"\"\"\n \n :param p: A 3D point\n :type p: 3-element array_like\n :param tol: Tolerance, defaults to 10*_eps\n :type tol: float, optional\n :return: if the point is in the plane\n :rtype: bool\n\n \"\"\"\n return abs(np.dot(self.n, p) - self.d) < tol\n \n def __str__(self):\n \"\"\"\n \n :return: String representation of plane\n :rtype: str\n\n \"\"\"\n return str(self.plane)\n\n# ======================================================================== #\n\nclass Plucker(BasePoseList):\n \"\"\"\n Plucker coordinate class\n \n Concrete class to represent a 3D line using Plucker coordinates.\n \n Methods:\n \n Plucker Contructor from points\n Plucker.planes Constructor from planes\n Plucker.pointdir Constructor from point and direction\n \n Information and test methods::\n closest closest point on line\n commonperp common perpendicular for two lines\n contains test if point is on line\n distance minimum distance between two lines\n intersects intersection point for two lines\n intersect_plane intersection points with a plane\n intersect_volume intersection points with a volume\n pp principal point\n ppd principal point distance from origin\n point generate point on line\n \n Conversion methods::\n char convert to human readable string\n double convert to 6-vector\n skew convert to 4x4 skew symmetric matrix\n \n Display and print methods::\n display display in human readable form\n plot plot line\n \n Operators:\n * multiply Plucker matrix by a general matrix\n | test if lines are parallel\n ^ test if lines intersect\n == test if two lines are equivalent\n ~= test if lines are not equivalent\n\n Notes:\n \n - This is reference (handle) class object\n - Plucker objects can be used in vectors and arrays\n \n References:\n \n - Ken Shoemake, \"Ray Tracing News\", Volume 11, Number 1\n http://www.realtimerendering.com/resources/RTNews/html/rtnv11n1.html#art3\n - Matt Mason lecture notes http://www.cs.cmu.edu/afs/cs/academic/class/16741-s07/www/lectures/lecture9.pdf\n - Robotics, Vision & Control: Second Edition, P. Corke, Springer 2016; p596-7.\n \n Implementation notes:\n \n - The internal representation is a 6-vector [v, w] where v (moment), w (direction).\n - There is a huge variety of notation used across the literature, as well as the ordering\n of the direction and moment components in the 6-vector.\n \n Copyright (C) 1993-2019 Peter I. Corke\n \"\"\"\n\n # w # direction vector\n # v # moment vector (normal of plane containing line and origin)\n \n def __init__(self, v=None, w=None):\n \"\"\"\n Create a Plucker 3D line object\n \n :param v: Plucker vector, Plucker object, Plucker moment\n :type v: 6-element array_like, Plucker instance, 3-element array_like\n :param w: Plucker direction, optional\n :type w: 3-element array_like, optional\n :raises ValueError: bad arguments\n :return: Plucker line\n :rtype: Plucker\n\n - ``L = Plucker(X)`` creates a Plucker object from the Plucker coordinate vector\n ``X`` = [V,W] where V (3-vector) is the moment and W (3-vector) is the line direction.\n\n - ``L = Plucker(L)`` creates a copy of the Plucker object ``L``.\n \n - ``L = Plucker(V, W)`` creates a Plucker object from moment ``V`` (3-vector) and\n line direction ``W`` (3-vector).\n \n Notes:\n \n - The Plucker object inherits from ``collections.UserList`` and has list-like\n behaviours.\n - A single Plucker object contains a 1D array of Plucker coordinates.\n - The elements of the array are guaranteed to be Plucker coordinates.\n - The number of elements is given by ``len(L)``\n - The elements can be accessed using index and slice notation, eg. ``L[1]`` or\n ``L[2:3]``\n - The Plucker instance can be used as an iterator in a for loop or list comprehension.\n - Some methods support operations on the internal list.\n \n :seealso: Plucker.PQ, Plucker.Planes, Plucker.PointDir\n \"\"\"\n super().__init__() # enable list powers\n\n if w is None:\n # zero or one arguments passed\n if super().arghandler(v, convertfrom=(SE3,)):\n return\n\n else:\n # additional arguments\n assert base.isvector(v, 3) and base.isvector(w, 3), 'expecting two 3-vectors'\n self.data = [np.r_[v, w]]\n \n # needed to allow __rmul__ to work if left multiplied by ndarray\n #self.__array_priority__ = 100 \n\n @property\n def shape(self):\n return (6,)\n\n @staticmethod\n def _identity():\n return np.zeros((6,))\n\n @staticmethod\n def isvalid(x, check=False):\n return x.shape == (6,)\n\n @staticmethod\n def TwoPoints(P=None, Q=None):\n \"\"\"\n Create Plucker line object from two 3D points\n \n :param P: First 3D point\n :type P: 3-element array_like\n :param Q: Second 3D point\n :type Q: 3-element array_like\n :return: Plucker line\n :rtype: Plucker\n\n ``L = Plucker(P, Q)`` create a Plucker object that represents\n the line joining the 3D points ``P`` (3-vector) and ``Q`` (3-vector). The direction\n is from ``Q`` to ``P``.\n\n :seealso: Plucker, Plucker.Planes, Plucker.PointDir\n \"\"\"\n P = base.getvector(P, 3)\n Q = base.getvector(Q, 3)\n # compute direction and moment\n w = P - Q\n v = np.cross(w, P)\n return Plucker(np.r_[v, w])\n \n @staticmethod\n def TwoPlanes(pi1, pi2):\n r\"\"\"\n Create Plucker line from two planes\n \n :param pi1: First plane\n :type pi1: 4-element array_like, or Plane\n :param pi2: Second plane\n :type pi2: 4-element array_like, or Plane\n :return: Plucker line\n :rtype: Plucker\n\n ``L = Plucker.planes(PI1, PI2)`` is a Plucker object that represents\n the line formed by the intersection of two planes ``PI1`` and ``PI2``.\n\n Planes are represented by the 4-vector :math:`[a, b, c, d]` which describes\n the plane :math:`\\pi: ax + by + cz + d=0`.\n \n :seealso: Plucker, Plucker.PQ, Plucker.PointDir\n \"\"\"\n\n if not isinstance(pi1, Plane):\n pi1 = Plane(base.getvector(pi1, 4))\n if not isinstance(pi2, Plane):\n pi2 = Plane(base.getvector(pi2, 4))\n \n w = np.cross(pi1.n, pi2.n)\n v = pi2.d * pi1.n - pi1.d * pi2.n\n return Plucker(np.r_[v, w])\n\n @staticmethod\n def PointDir(point, dir):\n \"\"\"\n Create Plucker line from point and direction\n \n :param point: A 3D point\n :type point: 3-element array_like\n :param dir: Direction vector\n :type dir: 3-element array_like\n :return: Plucker line\n :rtype: Plucker\n \n ``L = Plucker.pointdir(P, W)`` is a Plucker object that represents the\n line containing the point ``P`` and parallel to the direction vector ``W``.\n\n :seealso: Plucker, Plucker.Planes, Plucker.PQ\n \"\"\"\n\n p = base.getvector(point, 3)\n w = base.getvector(dir, 3)\n v = np.cross(w, p)\n return Plucker(np.r_[v, w])\n \n def append(self, x):\n \"\"\"\n \n :param x: Plucker object\n :type x: Plucker\n :raises ValueError: Attempt to append a non Plucker object\n :return: Plucker object with new Plucker line appended\n :rtype: Plucker\n\n \"\"\"\n #print('in append method')\n if not type(self) == type(x):\n raise ValueError(\"can pnly append Plucker object\")\n if len(x) > 1:\n raise ValueError(\"cant append a Plucker sequence - use extend\")\n super().append(x.A)\n\n @property\n def A(self):\n # get the underlying numpy array\n if len(self.data) == 1:\n return self.data[0]\n else:\n return self.data\n\n def __getitem__(self, i):\n # print('getitem', i, 'class', self.__class__)\n return self.__class__(self.data[i])\n \n @property\n def v(self):\n \"\"\"\n Moment vector\n \n :return: the moment vector\n :rtype: numpy.ndarray, shape=(3,)\n\n \"\"\"\n return self.data[0][0:3]\n \n @property\n def w(self):\n \"\"\"\n Direction vector\n \n :return: the direction vector\n :rtype: numpy.ndarray, shape=(3,)\n \n :seealso: Plucker.uw\n\n \"\"\"\n return self.data[0][3:6]\n \n @property\n def uw(self):\n \"\"\"\n Line direction as a unit vector\n \n :return: Line direction\n :rtype: numpy.ndarray, shape=(3,)\n\n ``line.uw`` is a unit-vector parallel to the line.\n \"\"\"\n return base.unitvec(self.w)\n \n @property\n def vec(self):\n \"\"\"\n Line as a Plucker coordinate vector\n \n :return: Coordinate vector\n :rtype: numpy.ndarray, shape=(6,)\n \n ``line.vec`` is the Plucker coordinate vector ``X`` = [V,W] where V (3-vector)\n is the moment and W (3-vector) is the line direction.\n \"\"\"\n return np.r_[self.v, self.w]\n \n @property\n def skew(self):\n r\"\"\"\n Line as a Plucker skew-matrix\n \n :return: Skew-symmetric matrix form of Plucker coordinates\n :rtype: numpy.ndarray, shape=(4,4)\n\n ``M = line.skew()`` is the Plucker matrix, a 4x4 skew-symmetric matrix\n representation of the line.\n\n Notes:\n \n - For two homogeneous points P and Q on the line, :math:`PQ^T-QP^T` is also skew\n symmetric.\n - The projection of Plucker line by a perspective camera is a homogeneous line (3x1)\n given by :math:`\\vee C M C^T` where :math:`C \\in \\mathbf{R}^{3 \\times 4}` is the camera matrix.\n \"\"\"\n \n v = self.v\n w = self.w\n \n # the following matrix is at odds with H&Z pg. 72\n return np.array([\n [ 0, v[2], -v[1], w[0]],\n [-v[2], 0 , v[0], w[1]],\n [ v[1], -v[0], 0, w[2]],\n [-w[0], -w[1], -w[2], 0 ]\n ])\n \n @property\n def pp(self):\n \"\"\"\n Principal point of the line\n\n ``line.pp`` is the point on the line that is closest to the origin.\n\n Notes:\n \n - Same as Plucker.point(0)\n\n :seealso: Plucker.ppd, Plucker.point\n \"\"\"\n \n return np.cross(self.v, self.w) / np.dot(self.w, self.w)\n\n @property\n def ppd(self):\n \"\"\"\n Distance from principal point to the origin\n\n :return: Distance from principal point to the origin\n :rtype: float\n \n ``line.ppd`` is the distance from the principal point to the origin.\n This is the smallest distance of any point on the line\n to the origin.\n\n :seealso: Plucker.pp\n \"\"\"\n return math.sqrt(np.dot(self.v, self.v) / np.dot(self.w, self.w) )\n\n def point(self, lam):\n r\"\"\"\n Generate point on line\n \n :param lam: Scalar distance from principal point\n :type lam: float\n :return: Distance from principal point to the origin\n :rtype: float\n\n ``line.point(LAMBDA)`` is a point on the line, where ``LAMBDA`` is the parametric\n distance along the line from the principal point of the line such\n that :math:`P = P_p + \\lambda \\hat{d}` and :math:`\\hat{d}` is the line\n direction given by ``line.uw``.\n\n :seealso: Plucker.pp, Plucker.closest, Plucker.uw\n \"\"\"\n lam = base.getvector(lam, out='row')\n return self.pp.reshape((3,1)) + self.uw.reshape((3,1)) * lam\n\n def lam(self, point):\n return np.dot( point.flatten() - self.pp, self.uw)\n\n # ------------------------------------------------------------------------- #\n # TESTS ON PLUCKER OBJECTS\n # ------------------------------------------------------------------------- #\n\n def contains(self, x, tol=50*_eps):\n \"\"\"\n Test if points are on the line\n \n :param x: 3D point\n :type x: 3-element array_like, or numpy.ndarray, shape=(3,N)\n :param tol: Tolerance, defaults to 50*_eps\n :type tol: float, optional\n :raises ValueError: Bad argument\n :return: Whether point is on the line\n :rtype: bool or numpy.ndarray(N) of bool\n\n ``line.contains(X)`` is true if the point ``X`` lies on the line defined by\n the Plucker object self.\n \n If ``X`` is an array with 3 rows, the test is performed on every column and\n an array of booleans is returned.\n \"\"\"\n if base.isvector(x, 3):\n x = base.getvector(x)\n return np.linalg.norm( np.cross(x - self.pp, self.w) ) < tol\n elif base.ismatrix(x, (3,None)):\n return [np.linalg.norm(np.cross(_ - self.pp, self.w)) < tol for _ in x.T]\n else:\n raise ValueError('bad argument')\n\n def __eq__(self, l2): # pylint: disable=no-self-argument\n \"\"\"\n Test if two lines are equivalent\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: Plucker\n :return: line equivalence\n :rtype: bool\n\n ``L1 == L2`` is true if the Plucker objects describe the same line in\n space. Note that because of the over parameterization, lines can be\n equivalent even if their coordinate vectors are different.\n \"\"\"\n l1 = self\n return abs( 1 - np.dot(base.unitvec(l1.vec), base.unitvec(l2.vec))) < 10*_eps\n \n def __ne__(self, l2): # pylint: disable=no-self-argument\n \"\"\"\n Test if two lines are not equivalent\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: line inequivalence\n :rtype: bool\n\n ``L1 != L2`` is true if the Plucker objects describe different lines in\n space. Note that because of the over parameterization, lines can be\n equivalent even if their coordinate vectors are different.\n \"\"\"\n l1 = self\n return not l1.__eq__(l2)\n \n def isparallel(self, l2, tol=10*_eps): # pylint: disable=no-self-argument\n \"\"\"\n Test if lines are parallel\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: lines are parallel\n :rtype: bool\n\n ``l1.isparallel(l2)`` is true if the two lines are parallel.\n \n ``l1 | l2`` as above but in binary operator form\n\n :seealso: Plucker.or, Plucker.intersects\n \"\"\"\n l1 = self\n return np.linalg.norm(np.cross(l1.w, l2.w) ) < tol\n\n \n def __or__(self, l2): # pylint: disable=no-self-argument\n \"\"\"\n Overloaded ``|`` operator tests for parallelism\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: lines are parallel\n :rtype: bool\n\n ``l1 | l2`` is an operator which is true if the two lines are parallel.\n\n\n .. note:: The ``|`` operator has low precendence.\n\n :seealso: Plucker.isparallel, Plucker.__xor__\n \"\"\"\n l1 = self\n return l1.isparallel(l2)\n\n def __xor__(self, l2): # pylint: disable=no-self-argument\n \n \"\"\"\n Overloaded ``^`` operator tests for intersection\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: lines intersect\n :rtype: bool\n\n ``l1 ^ l2`` is an operator which is true if the two lines intersect at a point.\n\n .. note:: \n \n - The ``^`` operator has low precendence.\n - Is ``False`` if the lines are equivalent since they would intersect at\n an infinite number of points.\n\n :seealso: Plucker.intersects, Plucker.parallel\n \"\"\"\n l1 = self\n return not l1.isparallel(l2) and (abs(l1 * l2) < 10*_eps )\n \n # ------------------------------------------------------------------------- #\n # PLUCKER LINE DISTANCE AND INTERSECTION\n # ------------------------------------------------------------------------- # \n \n \n def intersects(self, l2): # pylint: disable=no-self-argument\n \"\"\"\n Intersection point of two lines\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: 3D intersection point\n :rtype: numpy.ndarray, shape=(3,) or None\n\n ``l1.intersects(l2)`` is the point of intersection of the two lines, or\n ``None`` if the lines do not intersect or are equivalent.\n\n\n :seealso: Plucker.commonperp, Plucker.eq, Plucker.__xor__\n \"\"\"\n l1 = self\n if l1^l2:\n # lines do intersect\n return -(np.dot(l1.v, l2.w) * np.eye(3, 3) + \\\n l1.w.reshape((3,1)) @ l2.v.reshape((1,3)) - \\\n l2.w.reshape((3,1)) @ l1.v.reshape((1,3))) * base.unitvec(np.cross(l1.w, l2.w))\n else:\n # lines don't intersect\n return None\n \n def distance(self, l2): # pylint: disable=no-self-argument\n \"\"\"\n Minimum distance between lines\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: Closest distance\n :rtype: float\n\n ``l1.distance(l2) is the minimum distance between two lines.\n \n Notes:\n \n - Works for parallel, skew and intersecting lines.\n \"\"\"\n l1 = self\n if l1 | l2:\n # lines are parallel\n l = np.cross(l1.w, l1.v - l2.v * np.dot(l1.w, l2.w) / dot(l2.w, l2.w)) / np.linalg.norm(l1.w)\n else:\n # lines are not parallel\n if abs(l1 * l2) < 10*_eps:\n # lines intersect at a point\n l = 0\n else:\n # lines don't intersect, find closest distance\n l = abs(l1 * l2) / np.linalg.norm(np.cross(l1.w, l2.w))**2\n return l\n\n def closest_to_line(self, line):\n # point on line closest to another line\n # https://web.cs.iastate.edu/~cs577/handouts/plucker-coordinates.pdf\n # but (20) (21) is the negative of correct answer\n\n p = []\n dist = []\n for line1, line2 in zip(self, line):\n v1 = line1.v\n w1 = line1.w\n v2 = line2.v\n w2 = line2.w\n p1 = (np.cross(v1, np.cross(w2, np.cross(w1, w2))) - np.dot(v2, np.cross(w1, w2)) * w1) \\\n / np.sum(np.cross(w1, w2) ** 2)\n p2 = (np.cross(-v2, np.cross(w1, np.cross(w1, w2))) + np.dot(v1, np.cross(w1, w2)) * w2) \\\n / np.sum(np.cross(w1, w2) ** 2)\n\n p.append(p1)\n dist.append(np.linalg.norm(p1 - p2))\n \n if len(p) == 1:\n return p[0], dist[0]\n else:\n return np.array(p).T, np.array(dist)\n\n def closest_to_point(self, x):\n \"\"\"\n Point on line closest to given point\n \n :param line: A line\n :type l1: Plucker\n :param l2: An arbitrary 3D point\n :type l2: 3-element array_like\n :return: Point on the line and distance to line\n :rtype: collections.namedtuple\n\n - ``line.closest(x).p`` is the coordinate of a point on the line that is\n closest to ``x``.\n\n - ``line.closest(x).d`` is the distance between the point on the line and ``x``.\n \n The return value is a named tuple with elements:\n \n - ``.p`` for the point on the line as a numpy.ndarray, shape=(3,)\n - ``.d`` for the distance to the point from ``x``\n - ``.lam`` the `lambda` value for the point on the line.\n\n :seealso: Plucker.point\n \"\"\"\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = base.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam).flatten() # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)\n \n \n def commonperp(self, l2): # pylint: disable=no-self-argument\n \"\"\"\n Common perpendicular to two lines\n \n :param l1: First line\n :type l1: Plucker\n :param l2: Second line\n :type l2: Plucker\n :return: Perpendicular line\n :rtype: Plucker or None\n\n ``l1.commonperp(l2)`` is the common perpendicular line between the two lines.\n Returns ``None`` if the lines are parallel.\n\n :seealso: Plucker.intersect\n \"\"\"\n l1 = self\n if l1 | l2:\n # no common perpendicular if lines are parallel\n return None\n else:\n # lines are skew or intersecting\n w = np.cross(l1.w, l2.w)\n v = np.cross(l1.v, l2.w) - np.cross(l2.v, l1.w) + \\\n (l1 * l2) * np.dot(l1.w, l2.w) * base.unitvec(np.cross(l1.w, l2.w))\n \n return Plucker(v, w)\n\n\n def __mul__(self, right): # pylint: disable=no-self-argument\n r\"\"\"\n Reciprocal product\n \n :param left: Left operand\n :type left: Plucker\n :param right: Right operand\n :type right: Plucker\n :return: reciprocal product\n :rtype: float\n\n ``left * right`` is the scalar reciprocal product :math:`\\hat{w}_L \\dot m_R + \\hat{w}_R \\dot m_R`.\n\n Notes:\n \n - Multiplication or composition of Plucker lines is not defined.\n - Pre-multiplication by an SE3 object is supported, see ``__rmul__``.\n\n :seealso: Plucker.__rmul__\n \"\"\"\n left = self\n if isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n else:\n raise ValueError('bad arguments')\n \n def __rmul__(self, left): # pylint: disable=no-self-argument\n \"\"\"\n Line transformation\n\n :param left: Rigid-body transform\n :type left: SE3\n :param right: Right operand\n :type right: Plucker\n :return: transformed line\n :rtype: Plucker\n \n ``T * line`` is the line transformed by the rigid body transformation ``T``.\n\n\n :seealso: Plucker.__mul__\n \"\"\"\n right = self\n if isinstance(left, SE3):\n A = np.r_[ np.c_[left.R, base.skew(-left.t) @ left.R],\n np.c_[np.zeros((3,3)), left.R]\n ]\n return Plucker( A @ right.vec) # premultiply by SE3\n else:\n raise ValueError('bad arguments')\n\n # ------------------------------------------------------------------------- #\n # PLUCKER LINE DISTANCE AND INTERSECTION\n # ------------------------------------------------------------------------- # \n\n\n def intersect_plane(self, plane): # pylint: disable=no-self-argument\n r\"\"\"\n Line intersection with a plane\n \n :param line: A line\n :type line: Plucker\n :param plane: A plane\n :type plane: 4-element array_like or Plane\n :return: Intersection point\n :rtype: collections.namedtuple\n\n - ``line.intersect_plane(plane).p`` is the point where the line \n intersects the plane, or None if no intersection.\n \n - ``line.intersect_plane(plane).lam`` is the `lambda` value for the point on the line\n that intersects the plane.\n\n The plane can be specified as:\n \n - a 4-vector :math:`[a, b, c, d]` which describes the plane :math:`\\pi: ax + by + cz + d=0`.\n - a ``Plane`` object\n \n The return value is a named tuple with elements:\n \n - ``.p`` for the point on the line as a numpy.ndarray, shape=(3,)\n - ``.lam`` the `lambda` value for the point on the line.\n\n See also Plucker.point.\n \"\"\"\n \n # Line U, V\n # Plane N n\n # (VxN-nU:U.N)\n # Note that this is in homogeneous coordinates.\n # intersection of plane (n,p) with the line (v,p)\n # returns point and line parameter\n if not isinstance(plane, Plane):\n plane = Plane(base.getvector(plane, 4))\n \n den = np.dot(self.w, plane.n)\n \n if abs(den) > (100*_eps):\n # P = -(np.cross(line.v, plane.n) + plane.d * line.w) / den\n p = (np.cross(self.v, plane.n) - plane.d * self.w) / den\n \n t = self.lam(p)\n return namedtuple('intersect_plane', 'p lam')(p, t)\n else:\n return None\n\n def intersect_volume(self, bounds):\n \"\"\"\n Line intersection with a volume\n \n :param line: A line\n :type line: Plucker\n :param bounds: Bounds of an axis-aligned rectangular cuboid\n :type plane: 6-element array_like\n :return: Intersection point\n :rtype: collections.namedtuple\n \n ``line.intersect_volume(bounds).p`` is a matrix (3xN) with columns\n that indicate where the line intersects the faces of the volume\n specified by ``bounds`` = [xmin xmax ymin ymax zmin zmax]. The number of\n columns N is either:\n \n - 0, when the line is outside the plot volume or,\n - 2 when the line pierces the bounding volume.\n \n ``line.intersect_volume(bounds).lam`` is an array of shape=(N,) where\n N is as above.\n \n The return value is a named tuple with elements:\n \n - ``.p`` for the points on the line as a numpy.ndarray, shape=(3,N)\n - ``.lam`` for the `lambda` values for the intersection points as a\n numpy.ndarray, shape=(N,).\n \n See also Plucker.plot, Plucker.point.\n \"\"\"\n \n intersections = []\n \n # reshape, top row is minimum, bottom row is maximum\n bounds23 = bounds.reshape((3, 2))\n \n for face in range(0, 6):\n # for each face of the bounding volume\n # x=xmin, x=xmax, y=ymin, y=ymax, z=zmin, z=zmax\n\n # planes are:\n # 0 normal in x direction, xmin\n # 1 normal in x direction, xmax\n # 2 normal in y direction, ymin\n # 3 normal in y direction, ymax\n # 4 normal in z direction, zmin\n # 5 normal in z direction, zmax\n \n i = face // 2 # 0, 1, 2\n I = np.eye(3,3)\n p = [0, 0, 0]\n p[i] = bounds[face]\n plane = Plane.PN(n=I[:,i], p=p)\n \n # find where line pierces the plane\n try:\n p, lam = self.intersect_plane(plane)\n except TypeError:\n continue # no intersection with this plane\n \n # print('face %d: n=(%f, %f, %f)' % (face, plane.n[0], plane.n[1], plane.n[2]))\n # print(' : p=(%f, %f, %f) ' % (p[0], p[1], p[2]))\n \n # print('face', face, ' point ', p, ' plane ', plane)\n # print('lamda', lam, self.point(lam))\n # find if intersection point is within the cube face\n # test x,y,z simultaneously\n k = (p >= bounds23[:,0]) & (p <= bounds23[:,1])\n k = np.delete(k, i) # remove the boolean corresponding to current face\n if all(k):\n # if within bounds, add\n intersections.append(lam)\n \n# print(' HIT');\n\n # put them in ascending order\n intersections.sort()\n p = self.point(intersections)\n \n return namedtuple('intersect_volume', 'p lam')(p, intersections)\n\n \n # ------------------------------------------------------------------------- #\n # PLOT AND DISPLAY\n # ------------------------------------------------------------------------- # \n \n def plot(self, *pos, bounds=None, axis=None, **kwargs):\n \"\"\"\n Plot a line\n \n :param line: A line\n :type line: Plucker\n :param bounds: Bounds of an axis-aligned rectangular cuboid as [xmin xmax ymin ymax zmin zmax], optional\n :type plane: 6-element array_like\n :param **kwargs: Extra arguents passed to `Line2D <https://matplotlib.org/3.2.2/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_\n :return: Plotted line\n :rtype: Line3D or None\n\n - ``line.plot(bounds)`` adds a line segment to the current axes, and the handle of the line is returned. \n The line segment is defined by the intersection of the line and the given rectangular cuboid. \n If the line does not intersect the plotting volume None is returned.\n \n - ``line.plot()`` as above but the bounds are taken from the axis limits of the current axes.\n \n The line color or style is specified by:\n \n - a MATLAB-style linestyle like 'k--'\n - additional arguments passed to `Line2D <https://matplotlib.org/3.2.2/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_\n \n :seealso: Plucker.intersect_volume\n \"\"\"\n if axis is None:\n ax = plt.gca()\n else:\n ax = axis\n\n if bounds is None:\n bounds = np.r_[ax.get_xlim(), ax.get_ylim(), ax.get_zlim()]\n else:\n bounds = base.getvector(bounds, 6)\n ax.set_xlim(bounds[:2])\n ax.set_ylim(bounds[2:4])\n ax.set_zlim(bounds[4:6])\n\n # print(bounds)\n \n #U = self.Q - self.P;\n #line.p = self.P; line.v = unit(U);\n \n lines = []\n for line in self:\n P, lam = line.intersect_volume(bounds)\n \n if len(lam) > 0:\n l = ax.plot(tuple(P[0,:]), tuple(P[1,:]), tuple(P[2,:]), *pos, **kwargs)\n lines.append(l)\n return lines\n\n def __str__(self):\n \"\"\"\n Convert to a string\n \n :return: String representation of line parameters\n :rtype: str\n\n ``str(line)`` is a string showing Plucker parameters in a compact single\n line format like::\n \n { 0 0 0; -1 -2 -3}\n \n where the first three numbers are the moment, and the last three are the \n direction vector.\n\n \"\"\"\n \n return '\\n'.join(['{{ {:.5g} {:.5g} {:.5g}; {:.5g} {:.5g} {:.5g}}}'.format(*list(base.removesmall(x.vec))) for x in self])\n\n def __repr__(self):\n \"\"\"\n %Twist.display Display parameters\n %\nL.display() displays the twist parameters in compact single line format. If L is a\nvector of Twist objects displays one line per element.\n %\nNotes::\n- This method is invoked implicitly at the command line when the result\n of an expression is a Twist object and the command has no trailing\n semicolon.\n %\nSee also Twist.char.\n \"\"\"\n \n if len(self) == 1:\n return \"Plucker([{:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}])\".format(*list(self.A))\n else:\n return \"Plucker([\\n\" + \\\n ',\\n'.join([\" [{:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}, {:.5g}]\".format(*list(tw)) for tw in self.data]) +\\\n \"\\n])\"\n \n def _repr_pretty_(self, p, cycle):\n \"\"\"\n Pretty string for IPython\n\n :param p: pretty printer handle (ignored)\n :param cycle: pretty printer flag (ignored)\n\n Print colorized output when variable is displayed in IPython, ie. on a line by\n itself.\n\n Example::\n\n In [1]: x\n\n \"\"\"\n if len(self) == 1:\n p.text(str(self))\n else:\n for i, x in enumerate(self):\n if i > 0:\n p.break_()\n p.text(f\"{i:3d}: {str(x)}\")\n\n# function z = side(self1, pl2)\n# Plucker.side Plucker side operator\n# \n# # X = SIDE(P1, P2) is the side operator which is zero whenever\n# # the lines P1 and P2 intersect or are parallel.\n# \n# # See also Plucker.or.\n# \n# if ~isa(self2, 'Plucker')\n# error('SMTB:Plucker:badarg', 'both arguments to | must be Plucker objects');\n# end\n# L1 = pl1.line(); L2 = pl2.line();\n# \n# z = L1([1 5 2 6 3 4]) * L2([5 1 6 2 4 3])';\n# end\n\n# \n# function z = intersect(self1, pl2)\n# Plucker.intersect Line intersection\n# \n# PL1.intersect(self2) is zero if the lines intersect. It is positive if PL2\n# passes counterclockwise and negative if PL2 passes clockwise. Defined as\n# looking in direction of PL1\n# \n# ---------->\n# o o\n# ---------->\n# counterclockwise clockwise\n# \n# z = dot(self1.w, pl1.v) + dot(self2.w, pl2.v);\n# end\n \n # Static factory methods for constructors from exotic representations\n\n\n \nif __name__ == '__main__': # pragma: no cover\n\n import pathlib\n import os.path\n \n a = SE3.Exp([2,0,0,0,0,0])\n\n exec(open(pathlib.Path(__file__).parent.parent.absolute() / \"tests\" / \"test_geom3d.py\").read()) # pylint: disable=exec-used" ]
[ [ "numpy.dot", "matplotlib.pyplot.gca", "numpy.eye", "numpy.linalg.norm", "numpy.finfo", "numpy.delete", "numpy.cross", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kohei-us/keras-onnx
[ "fdb9f68f20d60fd7cb244db3100ff1138ab6cc64" ]
[ "keras2onnx/ktf2onnx/tests/backend_test_base.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n\"\"\"Unit Test Base.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import variables as variables_lib\nfrom common import get_test_config\nfrom tf2onnx import utils\nfrom tf2onnx.tfonnx import process_tf_graph, tf_optimize\nfrom tf2onnx import optimizer\n\n\n# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test\n\nclass Tf2OnnxBackendTestBase(unittest.TestCase):\n def setUp(self):\n self.config = get_test_config()\n tf.reset_default_graph()\n # reset name generation on every test\n utils.INTERNAL_NAME = 1\n np.random.seed(1) # Make it reproducible.\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def tearDown(self):\n if not self.config.is_debug_mode:\n utils.delete_directory(self.test_data_directory)\n\n @property\n def test_data_directory(self):\n return os.path.join(self.config.temp_dir, self._testMethodName)\n\n @staticmethod\n def assertAllClose(expected, actual, **kwargs):\n np.testing.assert_allclose(expected, actual, **kwargs)\n\n @staticmethod\n def assertAllEqual(expected, actual, **kwargs):\n np.testing.assert_array_equal(expected, actual, **kwargs)\n\n def run_onnxcaffe2(self, onnx_graph, inputs):\n \"\"\"Run test against caffe2 backend.\"\"\"\n import caffe2.python.onnx.backend\n prepared_backend = caffe2.python.onnx.backend.prepare(onnx_graph)\n results = prepared_backend.run(inputs)\n return results\n\n def run_onnxmsrtnext(self, model_path, inputs, output_names):\n \"\"\"Run test against msrt-next backend.\"\"\"\n import lotus\n m = lotus.InferenceSession(model_path)\n results = m.run(output_names, inputs)\n return results\n\n def run_onnxruntime(self, model_path, inputs, output_names):\n \"\"\"Run test against msrt-next backend.\"\"\"\n import onnxruntime as rt\n m = rt.InferenceSession(model_path)\n results = m.run(output_names, inputs)\n return results\n\n def run_backend(self, g, outputs, input_dict):\n model_proto = g.make_model(\"test\")\n model_path = self.save_onnx_model(model_proto, input_dict)\n\n if self.config.backend == \"onnxmsrtnext\":\n y = self.run_onnxmsrtnext(model_path, input_dict, outputs)\n elif self.config.backend == \"onnxruntime\":\n y = self.run_onnxruntime(model_path, input_dict, outputs)\n elif self.config.backend == \"caffe2\":\n y = self.run_onnxcaffe2(model_proto, input_dict)\n else:\n raise ValueError(\"unknown backend\")\n return y\n\n def run_test_case(self, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-07, atol=1e-5,\n convert_var_to_const=True, constant_fold=True, check_value=True, check_shape=False,\n check_dtype=True, process_args=None, onnx_feed_dict=None, graph_validator=None):\n # optional - passed to process_tf_graph\n if process_args is None:\n process_args = {}\n # optional - pass distinct feed_dict to onnx runtime\n if onnx_feed_dict is None:\n onnx_feed_dict = feed_dict\n\n graph_def = None\n if convert_var_to_const:\n with tf.Session() as sess:\n variables_lib.global_variables_initializer().run()\n output_name_without_port = [n.split(':')[0] for n in output_names_with_port]\n graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,\n output_name_without_port)\n\n tf.reset_default_graph()\n tf.import_graph_def(graph_def, name='')\n\n with tf.Session() as sess:\n variables_lib.global_variables_initializer().run()\n output_dict = []\n for out_name in output_names_with_port:\n output_dict.append(sess.graph.get_tensor_by_name(out_name))\n expected = sess.run(output_dict, feed_dict=feed_dict)\n\n if self.config.is_debug_mode:\n if not os.path.exists(self.test_data_directory):\n os.makedirs(self.test_data_directory)\n model_path = os.path.join(self.test_data_directory, self._testMethodName + \"_original.pb\")\n utils.save_protobuf(model_path, sess.graph_def)\n self.logger.debug(\"created file %s\", model_path)\n\n graph_def = tf_optimize(input_names_with_port, output_names_with_port,\n sess.graph_def, constant_fold)\n\n if self.config.is_debug_mode:\n model_path = os.path.join(self.test_data_directory, self._testMethodName + \"_after_tf_optimize.pb\")\n utils.save_protobuf(model_path, graph_def)\n self.logger.debug(\"created file %s\", model_path)\n\n tf.reset_default_graph()\n tf.import_graph_def(graph_def, name='')\n\n with tf.Session() as sess:\n g = process_tf_graph(sess.graph, opset=self.config.opset, output_names=output_names_with_port,\n target=self.config.target, **process_args)\n g = optimizer.optimize_graph(g)\n actual = self.run_backend(g, output_names_with_port, onnx_feed_dict)\n\n for expected_val, actual_val in zip(expected, actual):\n if check_value:\n self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=atol)\n if check_dtype:\n self.assertEqual(expected_val.dtype, actual_val.dtype)\n if check_shape:\n self.assertEqual(expected_val.shape, actual_val.shape)\n\n if graph_validator:\n self.assertTrue(graph_validator(g))\n\n return g\n\n def save_onnx_model(self, model_proto, feed_dict, postfix=\"\"):\n target_path = utils.save_onnx_model(self.test_data_directory, self._testMethodName + postfix, feed_dict,\n model_proto, include_test_data=self.config.is_debug_mode,\n as_text=self.config.is_debug_mode)\n\n self.logger.debug(\"create model file: %s\", target_path)\n return target_path\n" ]
[ [ "tensorflow.graph_util.convert_variables_to_constants", "tensorflow.import_graph_def", "numpy.random.seed", "numpy.testing.assert_array_equal", "tensorflow.reset_default_graph", "tensorflow.Session", "numpy.testing.assert_allclose", "tensorflow.python.ops.variables.global_variables_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
TsingHuasuya/tfsnippet
[ "3f51f704e09c654eaf39e231948efed2868669fd" ]
[ "tfsnippet/examples/utils/multi_gpu.py" ]
[ "import multiprocessing as mp\nimport traceback\nfrom contextlib import contextmanager\n\nimport six\nimport tensorflow as tf\n\nfrom .misc import is_dynamic_tensor, cached\n\n__all__ = ['detect_gpus', 'average_gradients', 'MultiGPU']\n\n\n@cached\ndef detect_gpus():\n \"\"\"\n Detect the GPU devices and their interconnection on current machine.\n\n Returns:\n list[list[str]]: List of GPU groups, each group is a list of\n GPU device names. The GPUs in one group are interconnected.\n \"\"\"\n def worker(q):\n # `device_lib` will not release the memory it took,\n # so we run it in a sub-process.\n try:\n from tensorflow.python.client import device_lib\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n devices = list(device_lib.list_local_devices(config))\n gpus = [\n (device.name, device)\n for device in devices\n if device.device_type == 'GPU'\n ]\n union_set = {i: i for i in range(len(gpus))}\n\n for i, (name, device) in enumerate(gpus):\n assert (device.name == '/device:GPU:{}'.format(i))\n for link in device.locality.links.link:\n if link.device_id != i:\n union_set[i] = union_set[link.device_id]\n\n for i in six.iterkeys(union_set):\n while union_set[i] != union_set[union_set[i]]:\n union_set[i] = union_set[union_set[i]]\n\n root_devices = sorted(set(union_set.values()))\n gpu_groups = [[] for _ in range(len(root_devices))]\n dev_to_group = {j: i for i, j in enumerate(root_devices)}\n for i, (name, device) in enumerate(gpus):\n gpu_groups[dev_to_group[union_set[i]]].append(name)\n\n q.put((1, gpu_groups))\n except Exception:\n q.put((0, traceback.format_exc()))\n\n q = mp.Queue()\n p = mp.Process(target=worker, args=(q,))\n\n try:\n p.start()\n result = q.get()\n if result[0] == 1:\n return result[1]\n else:\n raise RuntimeError(\n 'Failed to retrieve GPU information, the traceback of '\n 'sub-process is:\\n {}'.\n format('\\n '.join(result[1].split('\\n')))\n )\n finally:\n p.terminate()\n p.join()\n\n\ndef average_gradients(tower_grads):\n \"\"\"\n Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n\n Source:\n https://github.com/tensorflow/models/blob/master/tutorials/image/\n cifar10/cifar10_multi_gpu_train.py\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer\n list is over individual gradients. The inner list is over the\n gradient calculation for each tower.\n\n Returns:\n List of pairs of (gradient, variable) where the gradient has been\n averaged across all towers.\n \"\"\"\n if len(tower_grads) == 1:\n return tower_grads[0]\n\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\n\nclass MultiGPU(object):\n \"\"\"\n Class to help build data-paralleled outputs and training operations.\n \"\"\"\n\n def __init__(self, disable_prebuild=False):\n \"\"\"\n Construct a :class:`MultiGPU`.\n\n Args:\n disable_prebuild: Whether or not to disable pre-build on CPU?\n Some operations (e.g., NCHW convolutional kernels) may not be\n supported by CPUs for the time being, thus the pre-building on\n CPUs might need to be disabled.\n \"\"\"\n gpu_groups = detect_gpus()\n if not gpu_groups:\n self._main_device = '/device:CPU:0'\n elif len(gpu_groups) != 1 and not disable_prebuild:\n self._main_device = '/device:CPU:0'\n else:\n self._main_device = gpu_groups[0][0]\n\n self._disable_prebuild = disable_prebuild\n self._gpu_devices = tuple(sum(gpu_groups, []))\n self._work_devices = self._gpu_devices \\\n if self._gpu_devices else [self._main_device]\n\n @property\n def disable_prebuild(self):\n \"\"\"Whether or not to disable pre-build on CPU?\"\"\"\n return self._disable_prebuild\n\n @property\n def main_device(self):\n \"\"\"\n Get the main device name.\n\n Main device is the device for storing variables, and for gathering\n losses / gradients during training. It may not be necessary one\n of the `work_devices`. Do not run the model computation graph on the\n `main_device`, otherwise the `channels_last` parameter for convolutional\n layers might result in undesired behaviors.\n \"\"\"\n return self._main_device\n\n @property\n def work_devices(self):\n \"\"\"\n Get the names of the working devices.\n\n The model computation graph should be run only on these devices.\n Do not run them on the `main_device`, otherwise the `channels_last`\n parameter for convolutional layers might result in undesired behaviors.\n \"\"\"\n return self._work_devices\n\n @property\n def gpu_devices(self):\n \"\"\"Get the names of GPU devices.\"\"\"\n return self._gpu_devices\n\n def is_gpu_device(self, device):\n \"\"\"Check whether or not `device` is a GPU device.\"\"\"\n return device in self._gpu_devices\n\n def channels_last(self, device):\n \"\"\"\n Get the `channels_last` argument for `device`.\n\n It will be :obj:`True` for non-GPU devices, :obj:`False` for GPUs.\n Be careful if you want to build a model on both CPU and GPU devices,\n with ``channels_last = multi_gpu.channels_last(device)``.\n The convolutional layers will work as desired, but the dense layers\n after or before a convolutional layer will not work properly, unless\n special treatment is taken.\n \"\"\"\n return device not in self._gpu_devices\n\n def data_parallel(self, batch_size, inputs):\n \"\"\"\n Iterate through all devices and build the data-paralleled model.\n\n Args:\n batch_size (int or tf.Tensor): The size of each mini-batch.\n inputs (Iterable[tf.Tensor]): Input placeholders to be sliced\n for data parallelism. The input placeholders will be sliced\n through the first dimension.\n\n Yields:\n str, bool, tuple[tf.Tensor]: ``(dev, pre_build, inputs)``,\n the device name, a flag indicating whether this is a\n pre-building pass for creating variables on CPU, and the\n tuple of sliced input placeholders.\n \"\"\"\n inputs = list(inputs)\n\n # quick path: only one device, do not slice\n if len(self.work_devices) == 1:\n assert(self.main_device == self.work_devices[0])\n yield self.main_device, False, tuple(inputs)\n\n # slow path: multi-GPUs\n else:\n # the GPUs are not in the same group, place variables on CPU\n if self.main_device not in self.work_devices:\n yield self.main_device, True, tuple(inputs)\n\n # build the paralleled computation graph for each device\n k = len(self.work_devices)\n for i, device in enumerate(self.work_devices):\n dev_inputs = []\n for inp in inputs:\n slice_len = (batch_size + k - 1) // k\n low, high = slice_len * i, slice_len * (i + 1)\n dev_inputs.append(inp[low: high])\n yield device, False, tuple(dev_inputs)\n\n @contextmanager\n def maybe_name_scope(self, device):\n \"\"\"\n Generate a name scope if `device` is not `main_device`.\n\n Args:\n device (str): The name of the device.\n\n Yields\n The generated name scope, or None.\n \"\"\"\n if device == self.main_device:\n yield\n elif device not in self._gpu_devices:\n with tf.name_scope('tower_cpu') as ns:\n yield ns\n else:\n gpu_id = self._gpu_devices.index(device)\n with tf.name_scope('tower_gpu_{}'.format(gpu_id)) as ns:\n yield ns\n\n def average_grads(self, grads):\n \"\"\"\n Take the averaged gradients on the main device.\n\n Args:\n grads: List of lists of (gradients, variables) pairs.\n\n Returns:\n List of pairs of (gradient, variable) where the gradient has been\n averaged across all devices.\n \"\"\"\n # quick path: only one device, just return the grads\n if len(grads) == 1:\n return grads[0]\n\n # slow path: multi-GPUs\n else:\n with tf.device(self.main_device), tf.name_scope('average_grads'):\n return average_gradients(grads)\n\n def apply_grads(self, grads, optimizer, global_step=None,\n control_inputs=None):\n \"\"\"\n Apply the gradients.\n\n Args:\n grads: List of (gradients, variables) pairs.\n optimizer: The TensorFlow optimizer.\n global_step: The optional global step counter.\n control_inputs: Dependency operations before applying the gradients.\n\n Returns:\n The operation of applying gradients.\n \"\"\"\n def mk_op():\n return optimizer.apply_gradients(grads, global_step=global_step)\n\n with tf.device(self.main_device), tf.name_scope('apply_grads'):\n if control_inputs:\n with tf.control_dependencies(control_inputs):\n return mk_op()\n else:\n return mk_op()\n\n def average(self, tensors, batch_size=None):\n \"\"\"\n Take the average of given tensors from different devices.\n\n If `batch_size` is specified, the tensors will be averaged with respect\n to the size of data fed to each device.\n\n Args:\n tensors (list[list[tf.Tensor]]): List of tensors from each device.\n batch_size (None or int or tf.Tensor): The optional batch size.\n\n Returns:\n list[tf.Tensor]: The averaged tensors.\n \"\"\"\n # check the arguments and try the fast path: only one tensor\n tensors = list(tensors)\n if not tensors:\n return []\n length = len(tensors[0])\n if length == 0:\n raise ValueError('`tensors` must be list of non-empty Tensor '\n 'lists.')\n for t in tensors[1:]:\n if len(t) != length:\n raise ValueError('`tensors` must be list of Tensor lists of '\n 'the same length.')\n if length == 1:\n return [t[0] for t in tensors]\n\n # do the slow path: average all tensors\n with tf.device(self.main_device), tf.name_scope('average_tensors'):\n if batch_size is None:\n return [tf.reduce_mean(tf.stack(t), axis=0) for t in tensors]\n\n k = len(self.work_devices)\n slice_len = (batch_size + k - 1) // k\n last_slice_size = batch_size - (k - 1) * slice_len\n\n if is_dynamic_tensor(batch_size):\n to_float = tf.to_float\n else:\n to_float = float\n\n float_batch_size = to_float(batch_size)\n weights = tf.stack(\n [to_float(slice_len) / float_batch_size] * (k - 1) +\n [to_float(last_slice_size) / float_batch_size]\n )\n\n return [tf.reduce_sum(tf.stack(t) * weights, axis=0)\n for t in tensors]\n\n def concat(self, tensors):\n \"\"\"\n Concat given tensors from different devices.\n\n Args:\n tensors (list[list[tf.Tensor]]): List of tensors from each device.\n\n Returns:\n list[tf.Tensor]: The concatenated tensors.\n \"\"\"\n # check the arguments and try the fast path: only one tensor\n tensors = list(tensors)\n if not tensors:\n return []\n length = len(tensors[0])\n if length == 0:\n raise ValueError('`tensors` must be list of non-empty Tensor '\n 'lists.')\n for t in tensors[1:]:\n if len(t) != length:\n raise ValueError('`tensors` must be list of Tensor lists of '\n 'the same length.')\n if length == 1:\n return [t[0] for t in tensors]\n\n # do the slow path: concat all tensors\n with tf.device(self.main_device), tf.name_scope('average_tensors'):\n return [tf.concat(t, axis=0) for t in tensors]\n" ]
[ [ "tensorflow.device", "tensorflow.concat", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.reduce_mean", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.expand_dims", "tensorflow.ConfigProto", "tensorflow.name_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
TakuyaShintate/tsts
[ "483db1edc2f765a5449137446a77acaf17684ce9" ]
[ "tsts/solvers/solver.py" ]
[ "import json\nimport os\nimport sys\nimport warnings\nfrom typing import List, Optional\n\nimport torch\nfrom torch.optim import Optimizer\nfrom torch.utils.data import ConcatDataset\nfrom tsts.cfg import get_cfg_defaults\nfrom tsts.collators import Collator, build_collator\nfrom tsts.core import ContextManager\nfrom tsts.dataloaders import DataLoader, build_dataloader\nfrom tsts.datasets import Dataset, build_dataset\nfrom tsts.loggers import Logger, build_logger\nfrom tsts.losses import Loss\nfrom tsts.losses.builder import build_losses\nfrom tsts.metrics import Metric, build_metrics\nfrom tsts.models import Module, build_model\nfrom tsts.models.localscalers import build_local_scaler\nfrom tsts.optimizers import build_optimizer\nfrom tsts.scalers import Scaler\nfrom tsts.schedulers import Scheduler, build_scheduler\nfrom tsts.trainers import Trainer, build_trainer\nfrom tsts.types import MaybeRawDataset, RawDataset\nfrom tsts.utils import set_random_seed\n\n__all__ = [\"Solver\"]\n\n\nclass Solver(object):\n \"\"\"Base solver class.\n\n It has methods to build modules used to start training and inference, and has some utility\n methods.\n\n Parameters\n ----------\n cfg_path : str, optional\n Path to custom config file, by default None\n\n verbose : bool, optional\n If True, it prints meta info on console, by default True\n \"\"\"\n\n def __init__(self, cfg_path: Optional[str] = None, verbose: bool = True) -> None:\n super(Solver, self).__init__()\n self.cfg_path = cfg_path\n self.verbose = verbose\n self._init_internal_state()\n\n def _init_internal_state(self) -> None:\n self._init_cfg()\n seed = self.cfg.SEED\n set_random_seed(seed)\n self._init_context_manager()\n if self.log_dir_exist() is True:\n self._load_meta_info()\n\n def _load_meta_info(self) -> None:\n if self.verbose is True:\n sys.stdout.write(\"Log directory found \\n\")\n sys.stdout.write(\"Restoring state ...\")\n log_dir = self.cfg.LOGGER.LOG_DIR\n meta_info_path = os.path.join(log_dir, \"meta.json\")\n with open(meta_info_path, \"r\") as f:\n meta_info = json.load(f)\n for (k, v) in meta_info.items():\n self.context_manager[k] = v\n if self.verbose is True:\n sys.stdout.write(\"\\t [done] \\n\")\n\n def _init_cfg(self) -> None:\n self.cfg = get_cfg_defaults()\n if self.cfg_path is not None:\n self.cfg.merge_from_file(self.cfg_path)\n\n def _init_context_manager(self) -> None:\n self.context_manager = ContextManager()\n\n def infer_num_in_feats(self, X: RawDataset) -> int:\n num_in_feats = X[0].size(-1)\n return num_in_feats\n\n def infer_num_out_feats(self, y: RawDataset) -> int:\n num_out_feats = self.infer_num_in_feats(y)\n return num_out_feats\n\n def log_dir_exist(self) -> bool:\n log_dir = self.cfg.LOGGER.LOG_DIR\n return os.path.exists(log_dir)\n\n def build_model(\n self,\n num_in_feats: int,\n num_out_feats: int,\n ) -> Module:\n model = build_model(\n num_in_feats,\n num_out_feats,\n self.cfg,\n )\n device = self.cfg.DEVICE\n model.to(device)\n log_dir = self.cfg.LOGGER.LOG_DIR\n if self.log_dir_exist() is True:\n try:\n model_path = os.path.join(log_dir, \"model.pth\")\n state_dict = torch.load(model_path)\n model.load_state_dict(state_dict)\n except FileNotFoundError:\n warnings.warn(\"Failed to load pretrained model\")\n return model\n\n def build_local_scaler(\n self,\n num_in_feats: int,\n num_out_feats: int,\n ) -> Module:\n local_scaler = build_local_scaler(\n num_in_feats,\n num_out_feats,\n self.cfg,\n )\n device = self.cfg.DEVICE\n local_scaler.to(device)\n log_dir = self.cfg.LOGGER.LOG_DIR\n if self.log_dir_exist() is True:\n try:\n local_scaler_path = os.path.join(log_dir, \"local_scaler.pth\")\n state_dict = torch.load(local_scaler_path)\n local_scaler.load_state_dict(state_dict)\n except FileNotFoundError:\n warnings.warn(\"Failed to load pretrained local scaler\")\n return local_scaler\n\n def build_losses(self) -> List[Loss]:\n losses = build_losses(self.cfg)\n device = self.cfg.DEVICE\n for loss in losses:\n loss.to(device)\n return losses\n\n def build_metrics(self) -> List[Metric]:\n metrics = build_metrics(self.cfg)\n device = self.cfg.DEVICE\n for metric in metrics:\n metric.to(device)\n return metrics\n\n def build_optimizer(self, model: Module, local_scaler: Module) -> Optimizer:\n params = list(model.parameters()) + list(local_scaler.parameters())\n optimizer = build_optimizer(params, self.cfg)\n return optimizer\n\n def build_scheduler(\n self,\n optimizer: Optimizer,\n iters_per_epoch: int,\n ) -> Scheduler:\n scheduler = build_scheduler(\n optimizer, # type: ignore\n iters_per_epoch,\n self.cfg,\n )\n return scheduler\n\n def build_train_dataset(\n self,\n X: RawDataset,\n y: RawDataset,\n time_stamps: MaybeRawDataset,\n X_scaler: Scaler,\n y_scaler: Scaler,\n ) -> Dataset:\n train_datasets = []\n num_datasets = len(X)\n for i in range(num_datasets):\n td = build_dataset(\n X[i],\n y[i],\n time_stamps[i] if time_stamps is not None else None,\n \"train\",\n X_scaler,\n y_scaler,\n self.cfg,\n )\n train_datasets.append(td)\n train_dataset = ConcatDataset(train_datasets) # type: ignore\n return train_dataset # type: ignore\n\n def build_valid_dataset(\n self,\n X: RawDataset,\n y: RawDataset,\n time_stamps: MaybeRawDataset,\n X_scaler: Scaler,\n y_scaler: Scaler,\n ) -> Dataset:\n valid_datasets = []\n num_datasets = len(X)\n for i in range(num_datasets):\n vd = build_dataset(\n X[i],\n y[i],\n time_stamps[i] if time_stamps is not None else None,\n \"valid\",\n X_scaler,\n y_scaler,\n self.cfg,\n )\n valid_datasets.append(vd)\n valid_dataset = ConcatDataset(valid_datasets) # type: ignore\n return valid_dataset # type: ignore\n\n def build_collator(self) -> Collator:\n collator = build_collator(self.cfg)\n return collator\n\n def build_train_dataloader(\n self,\n train_dataset: Dataset,\n collator: Collator,\n ) -> DataLoader:\n train_dataloader = build_dataloader(\n train_dataset,\n \"train\",\n collator,\n self.cfg,\n )\n return train_dataloader\n\n def build_valid_dataloader(\n self,\n valid_dataset: Dataset,\n collator: Collator,\n ) -> DataLoader:\n valid_dataloader = build_dataloader(\n valid_dataset,\n \"valid\",\n collator,\n self.cfg,\n )\n return valid_dataloader\n\n def build_trainer(\n self,\n model: Module,\n local_scaler: Module,\n losses: List[Loss],\n metrics: List[Metric],\n optimizer: Optimizer,\n scheduler: Scheduler,\n train_dataloader: DataLoader,\n valid_dataloader: DataLoader,\n ) -> Trainer:\n trainer = build_trainer(\n model,\n local_scaler,\n losses,\n metrics,\n optimizer, # type: ignore\n scheduler,\n train_dataloader,\n valid_dataloader,\n self.cfg,\n )\n return trainer\n\n def build_logger(\n self,\n model: Module,\n local_scaler: Module,\n losses: List[Loss],\n metrics: List[Metric],\n context_manager: ContextManager,\n ) -> Logger:\n logger = build_logger(\n model,\n local_scaler,\n losses,\n metrics,\n context_manager,\n self.cfg,\n )\n return logger\n" ]
[ [ "torch.utils.data.ConcatDataset", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shivanshuman021/RouteIC
[ "d5e2faddf60fc6756c414f8ca5d19e4409b5c212" ]
[ "model/train_fcn_pytorch.py" ]
[ "import argparse\nimport os\nimport time\nimport sys\n\nimport h5py\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (10.0, 10.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, TensorDataset\n\nsys.path.append('../')\nfrom datagen.decoder import decodeData\n\nparser = argparse.ArgumentParser(description='Deep-Route: Training a deep FCN network to route circuit layouts.')\nparser.add_argument('--data', metavar='PATH', default=os.getcwd()+'/data/', help='path to dataset (default: ./data/)')\nparser.add_argument('--batch_size', metavar='N', default=100, type=int, help='mini-batch size (default: 100)')\nparser.add_argument('--num_workers', metavar='N', default=4, type=int, help='number of data loading workers (default: 4)')\nparser.add_argument('--num_epochs', metavar='N', default=200, type=int, help='number of total epochs to run (default: 200)')\nparser.add_argument('--use_gpu', action='store_true', help='use GPU if available')\nparser.add_argument('--pretrained', action='store_true', help='use pre-trained model')\nparser.add_argument('--lr', metavar='LR', default=5e-4, type=float, help='initial learning rate (default: 5e-4)')\nparser.add_argument('--adapt_lr', action='store_true', help='use learning rate schedule')\nparser.add_argument('--reg', metavar='REG', default=1e-5, type=float, help='regularization strength (default: 1e-5)')\nparser.add_argument('--print-freq', metavar='N', default=10, type=int, help='print frequency (default: 10)')\n\n\ndef main(args):\n # Unutilized GPU notification\n if torch.cuda.is_available() and not args.use_gpu:\n print(\"GPU is available. Provide command line flag --use_gpu to use it!\")\n \n # To run on GPU, specify command-line flag --use_gpu\n if args.use_gpu and torch.cuda.is_available():\n dtype = torch.cuda.FloatTensor\n else:\n dtype = torch.FloatTensor\n \n # Dataset filenames\n train_fname = 'train_50k_32pix.hdf5'\n val_fname = 'val_10k_32pix.hdf5'\n \n # Save dir\n train_id = 'train50k_val10k_pix32' + '_lr' + str(args.lr) + '_reg' + str(args.reg) + '_batchsize' + str(args.batch_size) + '_epochs' + str(args.num_epochs) + '_gpu' + str(args.use_gpu)\n save_dir = os.getcwd() + '/training/' + train_id + '/'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n # Weighted loss to overcome unbalanced dataset (>98% pixels are off ('0'))\n weight = torch.Tensor([1, 3]).type(dtype)\n \n # Read dataset at path provided by --data command-line flag\n train_data = h5py.File(args.data + train_fname, 'r')\n X_train = np.asarray(train_data['X']) # Data: X_train.shape = (N, 1, H, W); X_train.dtype = uint8\n Y_train = np.asarray(train_data['Y']) # Labels: Y_train.shape = (N, 8, H, W); Y_train.dtype = uint8\n print(\"X_train: %s \\nY_train: %s\\n\" %(X_train.shape, Y_train.shape))\n\n val_data = h5py.File(args.data + val_fname, 'r')\n X_val = np.asarray(val_data['X'])\n Y_val = np.asarray(val_data['Y'])\n print(\"X_val: %s \\nY_val: %s\\n\" %(X_val.shape, Y_val.shape))\n\n # Dimensions\n N_train = X_train.shape[0]\n N_val = X_val.shape[0]\n C = Y_train.shape[1]\n H = X_train.shape[2]\n W = X_train.shape[3]\n dims_X = [-1, 1, H, W]\n dims_Y = [-1, C, H, W]\n \n # Setup DataLoader\n # https://stackoverflow.com/questions/41924453/pytorch-how-to-use-dataloaders-for-custom-datasets \n # PyTorch tensors are of type torch.ByteTensor (8 bit unsigned int)\n # Stored as 2D --> train: (N, 1*H*W), val: (N, 8*H*W)\n train_dset = TensorDataset(torch.from_numpy(X_train).view(N_train, -1),\n torch.from_numpy(Y_train).view(N_train, -1))\n \n train_loader = DataLoader(train_dset, batch_size=args.batch_size,\n # Disable shuffling in debug mode\n #num_workers=args.num_workers, shuffle=False)\n num_workers=args.num_workers, shuffle=True)\n \n val_dset = TensorDataset(torch.from_numpy(X_val).view(N_val, -1),\n torch.from_numpy(Y_val).view(N_val, -1))\n \n val_loader = DataLoader(val_dset, batch_size=args.batch_size,\n num_workers=args.num_workers, shuffle=False)\n \n # Define NN architecture\n model = nn.Sequential( # Input (N, 1, 32, 32) \n nn.Conv2d(1, 16, kernel_size=33, stride=1, padding=16, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n \n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n \n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n \n # Layer 5\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n # Layer 10\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n nn.BatchNorm2d(16),\n nn.LeakyReLU(inplace=True),\n\n # Layer 15\n nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=True), # Output (N, 16, 32, 32)\n )\n\n # Load pretrained model parameters\n if args.pretrained:\n model.load_state_dict(torch.load(save_dir + '../saved_model_params'))\n \n # Cast model to the correct datatype\n model.type(dtype) \n \n loss_fn = nn.CrossEntropyLoss(weight=weight).type(dtype)\n \n # Use Adam optimizer with default betas\n optimizer = optim.Adam(model.parameters(), lr=args.lr,\n betas=(0.9, 0.999), weight_decay=args.reg)\n loss_history = []\n train_precision = [0]\n train_recall = [0]\n train_f1score = [0]\n val_precision = [0]\n val_recall = [0]\n val_f1score = [0]\n \n best_val_f1score = 0\n \n epoch_time = AverageMeter()\n end = time.time()\n \n # Run the model for given epochs\n for epoch in range(args.num_epochs):\n # Adaptive learning rate schedule\n if args.adapt_lr:\n adjust_learning_rate(optimizer, epoch)\n \n # Run an epoch over the training data\n loss = train(model, train_loader, loss_fn, optimizer, dtype, dims_X, dims_Y, epoch)\n loss_history.extend(loss)\n \n # Check precision/recall/accuracy/F1_score on the train and val sets\n prec, rec, f1 = check_accuracy(model, train_loader, dtype, dims_X, dims_Y, epoch, save_dir, 'train')\n train_precision.append(prec)\n train_recall.append(rec)\n train_f1score.append(f1) \n prec, rec, f1 = check_accuracy(model, val_loader, dtype, dims_X, dims_Y, epoch, save_dir, 'val') \n val_precision.append(prec)\n val_recall.append(rec)\n val_f1score.append(f1)\n\n plt.subplot(2, 2, 1)\n plt.title('Training loss')\n plt.plot(loss_history, 'o')\n plt.yscale('log')\n plt.xlabel('Iteration')\n\n plt.subplot(2, 2, 2)\n plt.title('Accuracy (F1 Score)')\n plt.plot(train_f1score, '-o', label='train')\n plt.plot(val_f1score, '-o', label='val')\n plt.xlabel('Epoch')\n plt.legend(loc='lower right')\n \n plt.subplot(2, 2, 3)\n plt.title('Precision')\n plt.plot(train_precision, '-o', label='train')\n plt.plot(val_precision, '-o', label='val')\n plt.xlabel('Epoch')\n plt.legend(loc='lower right')\n\n plt.subplot(2, 2, 4)\n plt.title('Recall')\n plt.plot(train_recall, '-o', label='train')\n plt.plot(val_recall, '-o', label='val')\n plt.xlabel('Epoch')\n plt.legend(loc='lower right')\n\n plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0) \n plt.savefig(save_dir + 'training_history.jpg')\n #plt.savefig(save_dir + 'training_history.eps', format='eps')\n plt.close()\n \n # Save best model parameters\n if f1 > best_val_f1score:\n best_val_f1score = f1\n print('Saving best model parameters with Val F1 score = %.4f' %(best_val_f1score))\n torch.save(model.state_dict(), save_dir + 'saved_model_params')\n \n\n # Measure elapsed time\n epoch_time.update(time.time() - end)\n end = time.time()\n \n print('Timer Epoch [{0}/{1}]\\t'\n 't_epoch {epoch_time.val:.3f} ({epoch_time.avg:.3f})'.format(\n epoch+1, args.num_epochs, epoch_time=epoch_time))\n\n\ndef train(model, loader, loss_fn, optimizer, dtype, dims_X, dims_Y, epoch):\n \"\"\"\n Train the model for one epoch\n \"\"\"\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n \n # Set the model to training mode\n model.train()\n\n loss_hist = []\n \n end = time.time()\n for i, (x, y) in enumerate(loader):\n # The DataLoader produces 2D Torch Tensors, so we need to reshape them to 4D,\n # cast them to the correct datatype and wrap them in Variables.\n #\n # Note that the labels should be a torch.LongTensor on CPU and a\n # torch.cuda.LongTensor on GPU; to accomplish this we first cast to dtype\n # (either torch.FloatTensor or torch.cuda.FloatTensor) and then cast to\n # long; this ensures that y has the correct type in both cases.\n \n # Measure data loading time\n data_time.update(time.time() - end)\n \n x = x.view(dims_X) # (N_batch, 1, H, W)\n y = y.view(dims_Y) # (N_batch, 8, H, W)\n x_var = Variable(x.type(dtype), requires_grad=False)\n y_var = Variable(y.type(dtype).long(), requires_grad=False)\n\n # Run the model forward to compute scores and loss\n scores = model(x_var) # (N_batch, 16, H, W)\n \n # To convert scores from (N_batch, 16, H, W) to (N_batch*H*W*8, 2) where 2 = number of classes (on/off),\n # for PyTorch's cross entropy loss format (http://pytorch.org/docs/nn.html#crossentropyloss)\n _, twoC, _, _ = scores.size()\n scores = scores.permute(0, 2, 3, 1).contiguous().view(-1, twoC) # (N_batch*H*W, twoC)\n scores = torch.cat((scores[:, 0:twoC:2].contiguous().view(-1, 1), \n scores[:, 1:twoC:2].contiguous().view(-1, 1)), 1) # (N_batch*H*W*8, 2)\n \n # To convert y_var from (N_batch, 8, H, W) to (N_batch*H*W*8)\n # for PyTorch's cross entropy loss format (http://pytorch.org/docs/nn.html#crossentropyloss)\n y_var = y_var.permute(0, 2, 3, 1).contiguous().view(-1) # (N_batch*H*W*8)\n \n # Use cross entropy loss - 16 filter case\n loss = loss_fn(scores, y_var)\n \n losses.update(loss.data[0], y_var.size(0)) \n loss_hist.append(loss.data[0])\n \n # Run the model backward and take a step using the optimizer\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (i % args.print_freq == 0) or (i+1 == len(loader)):\n print('Train Epoch [{0}/{1}]\\t'\n 'Batch [{2}/{3}]\\t'\n 't_total {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 't_data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(\n epoch+1, args.num_epochs, i+1, len(loader), batch_time=batch_time,\n data_time=data_time, loss=losses))\n \n return loss_hist\n\n\ndef check_accuracy(model, loader, dtype, dims_X, dims_Y, epoch, save_dir, train_val):\n \"\"\"\n Check the accuracy of the model\n \"\"\"\n # Filenames\n y_pred_fname = 'Y_' + train_val + '_pred'\n y_act_fname = 'Y_' + train_val + '_act'\n \n # Set the model to eval mode\n model.eval()\n\n tp, tn, fp, fn = 0, 0, 0, 0\n \n for i, (x, y) in enumerate(loader):\n # Reshape 2D torch tensors from DataLoader to 4D, cast to the \n # correct type and wrap it in a Variable.\n #\n # At test-time when we do not need to compute gradients, marking \n # the Variable as volatile can reduce memory usage and slightly improve speed.\n x = x.view(dims_X) # (N_batch, 1, H, W)\n y = y.view(dims_Y) # (N_batch, 8, H, W)\n x_var = Variable(x.type(dtype), volatile=True)\n y_var = Variable(y.type(dtype), volatile=True)\n\n # Run the model forward, and compute the y_pred to compare with the ground-truth\n scores = model(x_var) # (N, 16, H, W)\n \n _, twoC, _, _ = scores.size() \n scores_off = scores[:, 0:twoC:2, :, :]\n scores_on = scores[:, 1:twoC:2, :, :]\n \n y_pred = (scores_on > scores_off) # (N_batch, 8, H, W)\n \n # Precision / Recall / F-1 Score\n #https://en.wikipedia.org/wiki/Precision_and_recall\n # tp = true_pos, tn = true_neg, fp = false_pos, fn = false_neg\n tp += ((y_pred.data == 1) * (y_var.data == 1)).sum()\n tn += ((y_pred.data == 0) * (y_var.data == 0)).sum()\n fp += ((y_pred.data == 1) * (y_var.data == 0)).sum()\n fn += ((y_pred.data == 0) * (y_var.data == 1)).sum()\n \n # Preview images from first mini-batch after every 5% of epochs\n # E.g., if num_epochs = 20, preview every 1 epoch\n # if num_epochs = 200, preview every 10 epochs\n if i == 0 and ((epoch % (args.num_epochs*5//100) == 0) or (epoch+1 == args.num_epochs)):\n Y_act_dec = decodeData(y_var.data.cpu().numpy()) # (N_batch, 3, H, W)\n Y_act_dec = np.swapaxes(Y_act_dec, 1, 2) # (N_batch, H, 3, W)\n Y_act_dec = np.swapaxes(Y_act_dec, 2, 3) # (N_batch, H, W, 3)\n\n Y_pred_dec = decodeData(y_pred.data.cpu().numpy()) # (N_batch, 3, H, W)\n Y_pred_dec = np.swapaxes(Y_pred_dec, 1, 2) # (N_batch, H, 3, W)\n Y_pred_dec = np.swapaxes(Y_pred_dec, 2, 3) # (N_batch, H, W, 3)\n\n num_images = 9\n for n in range(num_images):\n plt.subplot(3, 3, n+1)\n plt.imshow(Y_act_dec[n].astype('uint8'))\n #plt.axis('off')\n plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)\n plt.title('Y_%s_actual (epoch %d)' % (train_val, epoch+1))\n plt.savefig(save_dir + 'epoch_' + str(epoch+1) + '_' + y_act_fname + '.jpg')\n #plt.savefig(save_dir + 'epoch_' + str(epoch+1) + '_' + y_act_fname + '.eps', format='eps')\n plt.close()\n\n for n in range(num_images):\n plt.subplot(3, 3, n+1)\n plt.imshow(Y_pred_dec[n].astype('uint8'))\n #plt.axis('off')\n plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)\n plt.title('Y_%s_predicted (epoch %d)' % (train_val, epoch+1))\n plt.savefig(save_dir + 'epoch_' + str(epoch+1) + '_' + y_pred_fname + '.jpg')\n #plt.savefig(save_dir + 'epoch_' + str(epoch+1) + '_' + y_pred_fname + '.eps', format='eps')\n plt.close()\n\n # 1e-8 to avoid division by zero \n precision = tp / (tp + fp + 1e-8)\n recall = tp / (tp + fn)\n accuracy = (tp + tn) / (tp + tn + fp + fn)\n f1_score = 2 * (precision*recall) / (precision + recall + 1e-8)\n\n print('{0}\\t'\n 'Check Epoch [{1}/{2}]\\t'\n 'Precision {p:.4f}\\t'\n 'Recall {r:.4f}\\t'\n 'Accuracy {a:.4f}\\t'\n 'F1 score {f1:.4f}'.format(\n train_val, epoch+1, args.num_epochs, p=precision, r=recall, a=accuracy, f1=f1_score))\n \n return precision, recall, f1_score\n\n\ndef bce_loss(input, target):\n \"\"\"\n Numerically stable version of the binary cross-entropy loss function.\n\n As per https://github.com/pytorch/pytorch/issues/751\n See the TensorFlow docs for a derivation of this formula:\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Inputs:\n - input: PyTorch Variable of shape (N, 8, H, W) giving scores.\n - target: PyTorch Variable of shape (N, 8, H, W) containing 0 and 1 giving targets.\n\n Returns:\n - A PyTorch Variable containing the mean BCE loss over the minibatch of input data.\n \"\"\"\n # bce_loss(input, target) = target * -log(sigmoid(input)) + (1 - target) * -log(1 - sigmoid(input))\n \n neg_abs = - input.abs()\n bce_loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() # (N, 8, H, W)\n return bce_loss.mean()\n\n\ndef wt_bce_loss(input, target, weight):\n \"\"\"\n Numerically stable version of the weighted binary cross-entropy loss function.\n\n See the TensorFlow docs for a derivation of this formula:\n https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits\n\n Inputs:\n - input: PyTorch Variable of shape (N, 8, H, W) giving scores.\n - target: PyTorch Variable of shape (N, 8, H, W) containing 0 and 1 giving targets.\n\n Returns:\n - A PyTorch Variable containing the mean weighted BCE loss over the minibatch of input data.\n \"\"\"\n # wt_bce_loss(input, target, weight) = weight * target * -log(sigmoid(input)) + (1 - target) * -log(1 - sigmoid(input))\n \n neg_abs = - input.abs()\n wt_bce_loss = (-input).clamp(min=0) + (1 - target) * input + (1 + (weight - 1) * target) * (1 + neg_abs.exp()).log() # (N, 8, H, W)\n return wt_bce_loss.mean()\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\" \n lr = args.lr * (0.1 ** (epoch // 30))\n print(\"Adaptive learning rate: %e\" %(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n \nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n" ]
[ [ "matplotlib.pyplot.legend", "torch.load", "numpy.asarray", "torch.utils.data.DataLoader", "matplotlib.pyplot.plot", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "matplotlib.pyplot.tight_layout", "numpy.swapaxes", "torch.from_numpy", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.title", "torch.nn.Conv2d", "matplotlib.pyplot.savefig", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.Tensor", "matplotlib.use", "matplotlib.pyplot.yscale", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajijohn/planet-snowcover
[ "c1dde5a1984ef12bf293680968e93252a2ad240f" ]
[ "pipeline/Prepare_NDVI.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\n\n#get_ipython().system('export CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt')\n\n\n# In[2]:\n\n\n#get_ipython().run_line_magic('matplotlib', 'inline')\nfrom osgeo import gdal\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport rasterio as rio\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport rasterio.plot\nimport os\nfrom datetime import datetime as dt\nfrom rasterio import Affine, MemoryFile\nfrom rasterio.enums import Resampling\nimport numpy\nscale=1\n\n\n# In[42]:\n\n\n# Validate the see if the clips can be used.\n#get_ipython().run_line_magic('matplotlib', 'inline')\nfrom os import environ\nimport sys\nimport io\nfrom os.path import expanduser\nimport pprint\nimport s3fs\nimport boto3\nimport io\nfrom re import match\n#get_ipython().run_line_magic('matplotlib', 'inline')\nimport rasterio as rio\n\nimport earthpy as et\nimport earthpy.spatial as es\nimport earthpy.plot as ep\nimport numpy as np\nimport numpy\n#get_ipython().run_line_magic('matplotlib', 'inline')\nimport rasterio as rio\nfrom matplotlib import pyplot as plt\nimport rasterio.plot\nimport os\nfrom datetime import datetime as dt\nfrom rasterio.io import MemoryFile\nimport tempfile\n\nsys.path.append(\"../model/robosat_pink/\")\nfrom robosat_pink.config import load_config\nconfig_location= '/home/ubuntu/planet-snowcover/experiments/co-train.toml'\nconfig = load_config(config_location)\n\n\np = pprint.PrettyPrinter()\n\nfs = s3fs.S3FileSystem(session = boto3.Session(profile_name = config['dataset']['aws_profile']))\n\nimagery_searchpath = config['dataset']['image_bucket'] + '/' + config['dataset']['imagery_directory_regex']\nprint(\"Searching for imagery...({})\".format(imagery_searchpath))\nimagery_candidates = fs.ls(config['dataset']['image_bucket'])\n#print(\"candidates:\")\n#p.pprint(imagery_candidates)\nimagery_locs = [c for c in imagery_candidates if match(imagery_searchpath, c)]\nprint(\"result:\")\np.pprint(imagery_locs)\n\n\n# In[4]:\n\n\n#get_ipython().system('export CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt')\n\n\n# In[ ]:\n\n\n#Revised\n\n#Now we got main root clips\n#Now check that for each image, you can get the DEM clipped\nimport pandas as pd\nimport pyproj\nfrom geopandas import GeoDataFrame\nfrom shapely.geometry import shape,box\nfrom rasterio.mask import mask\nfrom rasterio.transform import from_origin\nscale=1\n#create the files with 5 babds\nfor link in imagery_locs:\n print(link)\n #sub_candidates = fs.ls(link)\n #print(sub_candidates)\n #dev_s3_client.list_objects(link) \n s3 = boto3.resource('s3')\n s3_client = boto3.client('s3')\n\n bucket=link.partition('/')[0] \n my_bucket = s3.Bucket(bucket)\n for my_bucket_object in my_bucket.objects.filter(Prefix=link.partition('/')[2]):\n #print(my_bucket_object)\n print('{0}:{1}'.format(my_bucket.name, my_bucket_object.key))\n with rio.open('s3://{0}/{1}'.format(my_bucket.name, my_bucket_object.key)) as src:\n print(\"key\",my_bucket_object.key)\n print(src.meta)\n # # Load red and NIR bands - note all PlanetScope 4-band images have band order BGRN\n planet_ndvi = es.normalized_diff(src.read(3), src.read(4))\n aug_pla_meta = src.profile\n # Change the count or number of bands from 4 to 5\n aug_pla_meta['count'] = 5\n # Change the data type to float rather than integer\n aug_pla_meta['dtype'] = \"float64\"\n aug_pla_meta\n \n #convert to float64\n #ndvi_64 = np.array(planet_ndvi, dtype=numpy.float64)\n t = src.transform\n # rescale the metadata\n transform = Affine(t.a * scale, t.b, t.c, t.d, t.e * scale, t.f)\n height = int(src.height / scale)\n width = int(src.width / scale)\n \n #clip the dem\n with rio.open('out.tiff') as origin:\n\n epsg4326_dem = origin.read(1)\n print('dem meta origin',origin.meta)\n\n print('planet origin',src.meta)\n #pf = src.read(1, masked=True)\n print(box(*src.bounds))\n \n try:\n clipped_raster,clipped_transform = mask(origin,[box(*src.bounds)],crop=True,nodata= 0)\n except ValueError as err:\n print('Handling run-time error:', err)\n \n print('clipped transform',clipped_transform)\n clipped_meta = origin.meta.copy()\n clipped_meta.update({\"driver\": \"GTiff\",\n \"height\": clipped_raster.shape[1],\n \"width\": clipped_raster.shape[2],\n \"nodata\": 0,\n \"transform\": clipped_transform})\n print(src.meta,\"ds\")\n print(clipped_raster[0].shape)\n print(src.crs)\n print(src.meta)\n print(src.shape[1])\n print(src.shape[0])\n print(\"clipped\")\n print(clipped_raster.shape[1])\n print(clipped_raster.shape[2])\n\n # type\n print(type(clipped_raster)) \n\n #old \n with rio.open(\"dem.masked1\" + \".tif\", \"w\", **clipped_meta) as dest:\n dest.write(clipped_raster) \n \n \n localname='dem.masked1.tif'\n #Second write is to do with resampling\n with rasterio.open(localname) as nf:\n print(nf.profile)\n print(nf.crs)\n print(nf.meta)\n dem_r = nf.read(1) # read the entire array\n clipped_meta = nf.meta.copy()\n # Resample it here to match the same as Planet\n #profile = origin.profile\n clipped_meta.update(transform=transform, driver='GTiff', height=height, width=width)\n clipped_raster = nf.read(\n out_shape=(origin.count, height, width),\n resampling=Resampling.bilinear,\n )\n with rio.open(\"dem.masked2\" + \".tif\", \"w\", **clipped_meta) as dest:\n dest.write(clipped_raster) \n # below works , but a better way\n\n \n localname='dem.masked2.tif' \n with rasterio.open(localname) as nf:\n print(nf.profile)\n print(nf.crs)\n print(nf.meta)\n dem_r = nf.read(1) # read the entire array\n \n #print(src.meta)\n # # Load red and NIR bands - note all PlanetScope 4-band images have band order BGRN\n\n aug_pla_meta = src.profile\n # Change the count or number of bands from 4 to 6\n aug_pla_meta['count'] = 5\n # Change the data type to float rather than integer\n aug_pla_meta['dtype'] = \"float64\"\n aug_pla_meta\n \n #convert to float64\n dem_64 = np.array(dem_r, dtype=numpy.float64)\n ndvi_64 = np.array(planet_ndvi , dtype=numpy.float64)\n new_bucket = s3.Bucket('planet-snowcover-imagery-ndvi')\n temp_file = tempfile.TemporaryFile()\n #with tempfile.NamedTemporaryFile() as tmpfile:\n #tmpfile.write(data)\n #with rasterio.open(tmpfile.name) as dataset:\n #data_array = dataset.read()\n \n with tempfile.NamedTemporaryFile() as tmpfile:\n with rasterio.open(tmpfile.name,\n 'w', **aug_pla_meta) as dstr:\n dstr.write_band(1, src.read(1))\n dstr.write_band(2, src.read(2))\n dstr.write_band(3, src.read(3))\n dstr.write_band(4, src.read(4))\n #dstr.write_band(5, dem_64)\n dstr.write_band(5, ndvi_64)\n dstr.close() \n \n s3_client.upload_fileobj(tmpfile, new_bucket.name, my_bucket_object.key) \n # Write band calculations to a new raster file\n \n# break\n\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
r0mainK/ur-lstm
[ "ffda3fb817f85b488b44e33b8ef69fbd60e71910" ]
[ "experiments/models/copy_model.py" ]
[ "from torch import Tensor\nimport torch.nn as nn\n\nfrom .utils import create_lstm_variant\nfrom .utils import ModelType\n\n\nclass CopyModel(nn.Module):\n def __init__(self, model_type: ModelType, hidden_size: int, forget_bias: float):\n super(CopyModel, self).__init__()\n self.lstm = create_lstm_variant(model_type, 10, hidden_size, forget_bias)\n self.out_proj = nn.Linear(hidden_size, 10)\n\n def forward(self, x: Tensor) -> Tensor:\n x, _ = self.lstm(x)\n return self.out_proj(x)\n" ]
[ [ "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HLJ1997/human-action-recognition
[ "550fcc3d1a2d8b338535ab12cb2124cf2e9c0be0" ]
[ "src/lib/classifier/dnn/feature_procs.py" ]
[ "''' This script defines functions/class to process features:\n\n* def extract_multi_frame_features\n Convert raw skeleton data into features extracted from multiple frames\n by calling `class FeatureGenerator`.\n\n* class FeatureGenerator:\n Compute features from a video sequence of raw skeleton data.\n\n'''\n\nimport numpy as np\nimport math\nfrom collections import deque\n\n# -- Settings\nNOISE_INTENSITY = 0.05\n\n# -- Constant\nPI = np.pi\nInf = float(\"inf\")\nNaN = 0\n\n# train keyoints info\nTOTAL_JOINTS = 13\nNECK = 0\nL_ARMS = [1, 2, 3]\nR_ARMS = [4, 5, 6]\nL_KNEE = 8\nL_ANKLE = 9\nR_KNEE = 11\nR_ANKLE = 12\nL_LEGS = [8, 9]\nR_LEGS = [11, 12]\nARMS_LEGS = L_ARMS + R_ARMS + L_LEGS + R_LEGS\nL_THIGH = 7\nR_THIGH = 10\n\n\n# -- Functions for processing features\ndef get_joint(x, idx):\n px = x[2*idx]\n py = x[2*idx+1]\n return px, py\n\ndef set_joint(x, idx, px, py):\n x[2*idx] = px\n x[2*idx+1] = py\n return\n\ndef check_joint(x, idx):\n return x[2*idx] != NaN\n\ndef get_an_example_of_standing_skeleton():\n data = [7, 67, 7041, \"stand\", \"stand_03-08-20-24-55-587/00055.jpg\", 0.5670731707317073, 0.11005434782608697, 0.5670731707317073, 0.18342391304347827, 0.5182926829268293, 0.1875, 0.5030487804878049, 0.27309782608695654, 0.5030487804878049, 0.34239130434782605, 0.6189024390243902, 0.18342391304347827, 0.6310975609756098, 0.2649456521739131, 0.6310975609756098, 0.3342391304347826, 0.5365853658536586,\n 0.34646739130434784, 0.5335365853658537, 0.46467391304347827, 0.5335365853658537, 0.5747282608695652, 0.600609756097561, 0.34646739130434784, 0.600609756097561, 0.4565217391304348, 0.5945121951219512, 0.5665760869565217, 0.5579268292682927, 0.10190217391304347, 0.5762195121951219, 0.09782608695652173, 0.5426829268292683, 0.11005434782608697, 0.5884146341463414, 0.11005434782608697]\n skeleton = np.array(data[5:])\n return skeleton\n\ndef get_a_normalized_standing_skeleton():\n x = get_an_example_of_standing_skeleton()\n\n neck = 1\n l_thigh = 8\n r_thigh = 11\n\n # Remove offset by setting neck as origin\n x0, y0 = get_joint(x, neck)\n x[::2] -= x0\n x[1::2] -= y0\n\n # Scale the skeleton by taking neck-thigh distance as height\n x0, y0 = get_joint(x, neck)\n _, y11 = get_joint(x, l_thigh)\n _, y12 = get_joint(x, r_thigh)\n y1 = (y11 + y12) / 2\n height = abs(y0 - y1)\n x /= height\n return x\n\ndef retrain_only_body_joints(skeleton):\n ''' All skeleton operations in this script are done after this function.\n The joints in the head are all removed, and the neck becomes the 0th joint.\n\n If you comment out this function in `def add_cur_skeleton`,\n then you need to change all the joint indices list below,\n i.e. change NECK=0 to 1, change L_KNEE=8 to 9, etc.\n Also, you will need to write some extra code to\n deal with the case when head joints are missing.\n\n '''\n return skeleton.copy()[2:2+13*2]\n\n\nSTAND_SKEL_NORMED = retrain_only_body_joints(\n get_a_normalized_standing_skeleton())\n\n# -- Functions\n\n\ndef extract_multi_frame_features(\n X, Y, video_indices, window_size,\n is_adding_noise=False, is_print=False):\n ''' From image index and raw skeleton positions,\n Extract features of body velocity, joint velocity, and normalized joint positions.\n '''\n X_new = []\n Y_new = []\n N = len(video_indices)\n\n # Loop through all data\n for i, _ in enumerate(video_indices):\n\n # If a new video clip starts, reset the feature generator\n if i == 0 or video_indices[i] != video_indices[i-1]:\n fg = FeatureGenerator(window_size, is_adding_noise)\n\n # Get features\n success, features = fg.add_cur_skeleton(X[i, :])\n if success: # True if (data length > 5) and (skeleton has enough joints)\n X_new.append(features)\n Y_new.append(Y[i])\n\n # Print\n if is_print and i % 1000 == 0:\n print(f\"{i}/{N}\", end=\", \")\n\n if is_print:\n print(\"\")\n X_new = np.array(X_new)\n Y_new = np.array(Y_new)\n return X_new, Y_new\n\n\nclass Math():\n ''' Some math operations '''\n @staticmethod\n def calc_dist(p1, p0):\n return math.sqrt((p1[0]-p0[0])**2+(p1[1]-p0[1])**2)\n\n @staticmethod\n def pi2pi(x):\n if x > PI:\n x -= 2*PI\n if x <= -PI:\n x += 2*PI\n return x\n\n @staticmethod\n def calc_relative_angle(x1, y1, x0, y0, base_angle):\n # compute rotation from {base_angle} to {(x0,y0)->(x1,y1)}\n if (y1 == y0) and (x1 == x0):\n return 0\n a1 = np.arctan2(y1-y0, x1-x0)\n return Math.pi2pi(a1 - base_angle)\n\n @staticmethod\n def calc_relative_angle_v2(p1, p0, base_angle):\n # compute rotation from {base_angle} to {p0->p1}\n return Math.calc_relative_angle(p1[0], p1[1], p0[0], p0[1], base_angle)\n\nclass ProcFtr(object):\n\n @staticmethod\n def drop_arms_and_legs_randomly(x, thresh=0.3):\n ''' Randomly drop one arm or one leg with a probability of thresh '''\n x = x.copy()\n N = len(ARMS_LEGS)\n rand_num = np.random.random()\n if rand_num < thresh:\n joint_idx = int((rand_num / thresh)*N)\n set_joint(x, joint_idx, NaN, NaN)\n return x\n\n @staticmethod\n def has_neck_and_thigh(x):\n ''' Check if a skeleton has a neck and at least one thigh '''\n return check_joint(x, NECK) and (check_joint(x, L_THIGH) or check_joint(x, R_THIGH))\n\n @staticmethod\n def get_body_height(x):\n ''' Compute height of the body, which is defined as:\n the distance between `neck` and `thigh`.\n '''\n x0, y0 = get_joint(x, NECK)\n\n # Get average thigh height\n x11, y11 = get_joint(x, L_THIGH)\n x12, y12 = get_joint(x, R_THIGH)\n if y11 == NaN and y12 == NaN: # Invalid data\n return 1.0\n if y11 == NaN:\n x1, y1 = x12, y12\n elif y12 == NaN:\n x1, y1 = x11, y11\n else:\n x1, y1 = (x11 + x12) / 2, (y11 + y12) / 2\n\n # Get body height\n height = ((x0-x1)**2 + (y0-y1)**2)**(0.5)\n\n # print('height ', height)\n return height\n\n @staticmethod\n def remove_body_offset(x):\n ''' The origin is the neck.\n TODO: Deal with empty data.\n '''\n x = x.copy()\n px0, py0 = get_joint(x, NECK)\n x[0::2] = x[0::2] - px0\n x[1::2] = x[1::2] - py0\n return x\n\n @staticmethod\n def joint_pos_2_angle_and_length(x):\n ''' Change the representation of skeletons\n From xy positions to angle and length.\n '''\n\n # ---------------------- Get joint positions ----------------------\n class JointPosExtractor(object):\n def __init__(self, x):\n self.x = x\n self.i = 0\n\n def get_next_point(self):\n p = [self.x[self.i], self.x[self.i+1]]\n self.i += 2\n return p\n tmp = JointPosExtractor(x)\n\n pneck = tmp.get_next_point()\n\n prshoulder = tmp.get_next_point()\n prelbow = tmp.get_next_point()\n prwrist = tmp.get_next_point()\n\n plshoulder = tmp.get_next_point()\n plelbow = tmp.get_next_point()\n plwrist = tmp.get_next_point()\n\n prhip = tmp.get_next_point()\n prknee = tmp.get_next_point()\n prankle = tmp.get_next_point()\n\n plhip = tmp.get_next_point()\n plknee = tmp.get_next_point()\n plankle = tmp.get_next_point()\n\n # ---------------------- Get joint angels ----------------------\n\n class Get12Angles(object):\n def __init__(self):\n self.j = 0\n self.f_angles = np.zeros((12,))\n self.x_lengths = np.zeros((12,))\n\n def set_next_angle_len(self, next_joint, base_joint, base_angle):\n angle = Math.calc_relative_angle_v2(\n next_joint, base_joint, base_angle)\n dist = Math.calc_dist(next_joint, base_joint)\n self.f_angles[self.j] = angle\n self.x_lengths[self.j] = dist\n self.j += 1\n\n tmp2 = Get12Angles()\n\n tmp2.set_next_angle_len(prshoulder, pneck, PI) # r-shoulder\n tmp2.set_next_angle_len(prelbow, prshoulder, PI/2) # r-elbow\n tmp2.set_next_angle_len(prwrist, prelbow, PI/2) # r-wrist\n\n tmp2.set_next_angle_len(plshoulder, pneck, 0) # l-shoulder\n tmp2.set_next_angle_len(plelbow, plshoulder, PI/2) # l-elbow\n tmp2.set_next_angle_len(plwrist, plelbow, PI/2) # l-wrist\n\n tmp2.set_next_angle_len(prhip, pneck, PI/2+PI/18)\n tmp2.set_next_angle_len(prknee, prhip, PI/2)\n tmp2.set_next_angle_len(prankle, prknee, PI/2)\n\n tmp2.set_next_angle_len(plhip, pneck, PI/2-PI/18)\n tmp2.set_next_angle_len(plknee, plhip, PI/2)\n tmp2.set_next_angle_len(plankle, plknee, PI/2)\n\n # Output\n features_angles = tmp2.f_angles\n features_lens = tmp2.x_lengths\n return features_angles, features_lens\n\n# -- The main class for extracting features\n\n\nclass FeatureGenerator(object):\n def __init__(self,\n window_size,\n is_adding_noise=False):\n '''\n Arguments:\n window_size {int}: Number of adjacent frames for extracting features.\n is_adding_noise {bool}: Is adding noise to the joint positions and scale.\n noise_intensity {float}: The noise relative to the body height.\n '''\n self._window_size = window_size\n self._is_adding_noise = is_adding_noise\n self._noise_intensity = NOISE_INTENSITY\n self.reset()\n\n def reset(self):\n ''' Reset the FeatureGenerator '''\n self._x_deque = deque()\n self._angles_deque = deque()\n self._lens_deque = deque()\n self._pre_x = None\n\n def add_cur_skeleton(self, skeleton):\n ''' Input a new skeleton, return the extracted feature.\n Returns:\n is_success {bool}: Return the feature only when\n the historical input skeletons are more than self._window_size.\n features {np.array}\n '''\n\n x = retrain_only_body_joints(skeleton)\n #print(\"skeleton\", x.shape, x)\n\n if not ProcFtr.has_neck_and_thigh(x):\n self.reset()\n return False, None\n\n else:\n ''' The input skeleton has a neck and at least one thigh '''\n # -- Preprocess x\n # Fill zeros, compute angles/lens\n x = self._fill_invalid_data(x)\n if self._is_adding_noise:\n # Add noise druing training stage to augment data\n x = self._add_noises(x, self._noise_intensity)\n x = np.array(x)\n # angles, lens = ProcFtr.joint_pos_2_angle_and_length(x) # deprecate\n\n # Push to deque\n self._x_deque.append(x)\n # self._angles_deque.append(angles) # deprecate\n # self._lens_deque.append(lens) # deprecate\n\n self._maintain_deque_size()\n self._pre_x = x.copy()\n\n # -- Extract features\n if len(self._x_deque) < self._window_size:\n return False, None\n else:\n # -- Normalize all 1~t features\n h_list = [ProcFtr.get_body_height(xi) for xi in self._x_deque]\n mean_height = np.mean(h_list)\n xnorm_list = [ProcFtr.remove_body_offset(xi)/mean_height\n for xi in self._x_deque]\n\n # -- Get features of pose/angles/lens\n f_poses = self._deque_features_to_1darray(xnorm_list)\n # f_angles = self._deque_features_to_1darray(self._angles_deque) # deprecate\n # f_lens = self._deque_features_to_1darray(\n # self._lens_deque) / mean_height # deprecate\n\n # -- Get features of motion\n\n f_v_center = self._compute_v_center(\n self._x_deque, step=1) / mean_height # len = (t=4)*2 = 8\n f_v_center = np.repeat(f_v_center, 10) # repeat to add weight\n\n f_v_joints = self._compute_v_all_joints(\n xnorm_list, step=1) # len = (t=(5-1)/step)*12*2 = 96\n\n # -- Output\n features = np.concatenate((f_poses, f_v_joints, f_v_center))\n return True, features.copy()\n\n def _maintain_deque_size(self):\n if len(self._x_deque) > self._window_size:\n self._x_deque.popleft()\n if len(self._angles_deque) > self._window_size:\n self._angles_deque.popleft()\n if len(self._lens_deque) > self._window_size:\n self._lens_deque.popleft()\n\n def _compute_v_center(self, x_deque, step):\n vel = []\n for i in range(0, len(x_deque) - step, step):\n dxdy = x_deque[i+step][0:2] - x_deque[i][0:2]\n vel += dxdy.tolist()\n return np.array(vel)\n\n def _compute_v_all_joints(self, xnorm_list, step):\n vel = []\n for i in range(0, len(xnorm_list) - step, step):\n dxdy = xnorm_list[i+step][:] - xnorm_list[i][:]\n vel += dxdy.tolist()\n return np.array(vel)\n\n def _fill_invalid_data(self, x):\n ''' Fill the NaN elements in x with\n their relative-to-neck position in the preious x.\n Argument:\n x {np.array}: a skeleton that has a neck and at least a thigh.\n '''\n res = x.copy()\n\n def get_px_py_px0_py0(x):\n px = x[0::2] # list of x\n py = x[1::2] # list of y\n px0, py0 = get_joint(x, NECK) # neck\n return px, py, px0, py0\n cur_px, cur_py, cur_px0, cur_py0 = get_px_py_px0_py0(x)\n cur_height = ProcFtr.get_body_height(x)\n\n is_lack_knee = check_joint(x, L_KNEE) or check_joint(x, R_KNEE)\n is_lack_ankle = check_joint(x, L_ANKLE) or check_joint(x, R_ANKLE)\n if (self._pre_x is None) or is_lack_knee or is_lack_ankle:\n # If preious data is invalid or there is no knee or ankle,\n # then fill the data based on the STAND_SKEL_NORMED.\n for i in range(TOTAL_JOINTS*2):\n if res[i] == NaN:\n res[i] = (cur_px0 if i % 2 == 0 else cur_py0) + \\\n cur_height * STAND_SKEL_NORMED[i]\n return res\n\n pre_px, pre_py, pre_px0, pre_py0 = get_px_py_px0_py0(self._pre_x)\n pre_height = ProcFtr.get_body_height(self._pre_x)\n\n scale = cur_height / pre_height\n\n bad_idxs = np.nonzero(cur_px == NaN)[0]\n if not len(bad_idxs): # No invalid data\n return res\n\n cur_px[bad_idxs] = cur_px0 + (pre_px[bad_idxs] - pre_px0) * scale\n cur_py[bad_idxs] = cur_py0 + (pre_py[bad_idxs] - pre_py0) * scale\n res[::2] = cur_px\n res[1::2] = cur_py\n return res\n\n def _add_noises(self, x, intensity):\n ''' Add noise to x with a ratio relative to the body height '''\n height = ProcFtr.get_body_height(x)\n randoms = (np.random.random(x.shape, ) - 0.5) * 2 * intensity * height\n x = [(xi + randoms[i] if xi != 0 else xi)\n for i, xi in enumerate(x)]\n return x\n\n def _deque_features_to_1darray(self, deque_data):\n features = []\n for i in range(len(deque_data)):\n next_feature = deque_data[i].tolist()\n features += next_feature\n features = np.array(features)\n return features\n\n def _deque_features_to_2darray(self, deque_data):\n features = []\n for i in range(len(deque_data)):\n next_feature = deque_data[i].tolist()\n features.append(next_feature)\n features = np.array(features)\n return features\n" ]
[ [ "numpy.random.random", "numpy.nonzero", "numpy.arctan2", "numpy.concatenate", "numpy.mean", "numpy.repeat", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
david-hoffman/dphtools
[ "f9f19ef5b0a00169562947c78a41c1f02e222a6a" ]
[ "tests/test_fitfuncs.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# test_fitfuncs.py\n\"\"\"\nTesting for fitfuncs.\n\nCopyright (c) 2021, David Hoffman\n\"\"\"\n\n\nimport unittest\n\nimport numpy as np\nfrom dphtools.utils.fitfuncs import exponent, exponent_fit\nfrom numpy.testing import assert_allclose\n\n\nclass TestExponentFit(unittest.TestCase):\n \"\"\"Test exponent fit.\n \n This is not even close to testing edge cases.\n \"\"\"\n\n def setUp(self):\n \"\"\"Set up.\"\"\"\n self.x = np.linspace(0, 10)\n self.params = (10, 3, 5)\n self.data = exponent(self.x, *self.params)\n self.data_noisy = np.random.randn(self.x.size)\n\n def test_positive(self):\n \"\"\"Test a decaying signal.\"\"\"\n popt, pcov = exponent_fit(self.data, self.x)\n assert_allclose(popt, self.params, rtol=1e-3)\n\n def test_negative(self):\n \"\"\"Test a rising signal.\"\"\"\n popt, pcov = exponent_fit(-self.data, self.x)\n amp, k, offset = self.params\n new_params = -amp, k, -offset\n assert_allclose(popt, new_params, rtol=1e-3)\n" ]
[ [ "numpy.random.randn", "numpy.linspace", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isgeles/SMARTS
[ "423275123ae4aab8b7d409140d82b50555a5267c" ]
[ "smarts/core/smarts.py" ]
[ "# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nimport importlib.resources as pkg_resources\nimport logging\nimport math\nimport os\nimport warnings\nfrom collections import defaultdict\nfrom time import time\nfrom typing import List, Sequence\n\nimport numpy\n\nfrom envision import types as envision_types\nfrom envision.client import Client as EnvisionClient\n\nwith warnings.catch_warnings():\n # XXX: Benign warning, seems no other way to \"properly\" fix\n warnings.filterwarnings(\"ignore\", \"numpy.ufunc size changed\")\n from sklearn.metrics.pairwise import euclidean_distances\n\nfrom smarts.core.chassis import AckermannChassis, BoxChassis\n\nfrom . import models\nfrom .agent_manager import AgentManager\nfrom .bubble_manager import BubbleManager\nfrom .colors import SceneColors\nfrom .controllers import ActionSpaceType, Controllers\nfrom .motion_planner_provider import MotionPlannerProvider\nfrom .provider import Provider, ProviderState\nfrom .renderer import Renderer\nfrom .scenario import Scenario\nfrom .sensors import Collision\nfrom .sumo_road_network import SumoRoadNetwork\nfrom .sumo_traffic_simulation import SumoTrafficSimulation\nfrom .traffic_history_provider import TrafficHistoryProvider\nfrom .trap_manager import TrapManager\nfrom .utils import pybullet\nfrom .utils.id import Id\nfrom .utils.pybullet import bullet_client as bc\nfrom .utils.visdom_client import VisdomClient\nfrom .vehicle import VehicleState\nfrom .vehicle_index import VehicleIndex\nfrom .waypoints import Waypoints\n\n\nclass SMARTSNotSetupError(Exception):\n pass\n\n\nclass SMARTS:\n def __init__(\n self,\n agent_interfaces,\n traffic_sim: SumoTrafficSimulation,\n envision: EnvisionClient = None,\n visdom: VisdomClient = None,\n timestep_sec=0.1,\n reset_agents_only=False,\n zoo_addrs=None,\n ):\n self._log = logging.getLogger(self.__class__.__name__)\n self._sim_id = Id.new(\"smarts\")\n self._is_setup = False\n self._scenario: Scenario = None\n self._renderer = None\n self._envision: EnvisionClient = envision\n self._visdom: VisdomClient = visdom\n self._timestep_sec = timestep_sec\n self._traffic_sim = traffic_sim\n self._motion_planner_provider = MotionPlannerProvider()\n self._traffic_history_provider = TrafficHistoryProvider()\n self._providers = [\n self._motion_planner_provider,\n self._traffic_history_provider,\n ]\n if self._traffic_sim:\n self._providers.insert(0, self._traffic_sim)\n\n # We buffer provider state between steps to compensate for TRACI's timestep delay\n self._last_provider_state = None\n self._reset_agents_only = reset_agents_only # a.k.a \"teleportation\"\n self._imitation_learning_mode = False\n\n self._elapsed_sim_time = 0\n self._total_sim_time = 0\n\n # For macOS GUI. See our `BulletClient` docstring for details.\n # from .utils.bullet import BulletClient\n # self._bullet_client = BulletClient(pybullet.GUI)\n self._bullet_client = bc.BulletClient(pybullet.DIRECT)\n self._dynamic_action_spaces = {\n ActionSpaceType.Continuous,\n ActionSpaceType.Lane,\n ActionSpaceType.ActuatorDynamic,\n ActionSpaceType.LaneWithContinuousSpeed,\n ActionSpaceType.Trajectory,\n ActionSpaceType.MPC,\n }\n\n # Set up indices\n self._agent_manager = AgentManager(agent_interfaces, zoo_addrs)\n self._vehicle_index = VehicleIndex()\n\n # TODO: Should not be stored in SMARTS\n self._vehicle_collisions = defaultdict(list) # list of `Collision` instances\n self._vehicle_states = []\n\n self._bubble_manager = None\n self._trap_manager: TrapManager = None\n\n self._ground_bullet_id = None\n\n def step(self, agent_actions):\n if not self._is_setup:\n raise SMARTSNotSetupError(\"Must call reset() or setup() before stepping.\")\n\n try:\n return self._step(agent_actions)\n except (KeyboardInterrupt, SystemExit):\n # ensure we clean-up if the user exits the simulation\n self._log.info(\"Simulation was interrupted by the user.\")\n self.destroy()\n raise # re-raise the KeyboardInterrupt\n except Exception as e:\n self._log.error(\n \"Simulation crashed with exception. Attempting to cleanly shutdown.\"\n )\n self._log.exception(e)\n self.destroy()\n raise # re-raise\n\n def _check_if_acting_on_active_agents(self, agent_actions):\n for agent_id in agent_actions.keys():\n if agent_id not in self._agent_manager.ego_agent_ids:\n self._log.warning(\n f\"Attempted to perform actions on non-existing agent, {agent_id} \"\n )\n\n def _step(self, agent_actions):\n \"\"\"Steps through the simulation while applying the given agent actions.\n Returns the observations, rewards, and done signals.\n \"\"\"\n\n # Due to a limitation of our traffic simulator(SUMO) interface(TRACI), we can\n # only observe traffic state of the previous simulation step.\n #\n # To compensate for this, we:\n #\n # 1. Fetch social agent actions\n # 2. Step all providers and harmonize state\n # 3. Step bubble manager\n # 4. Calculate observation and reward\n # 5. Send observations to social agents\n # 6. Clear done agents\n # 7. Perform visualization\n # 8. Advance the simulation clock\n #\n # In this way, observations and reward are computed with data that is\n # consistently with one step of latencey and the agent will observe consistent\n # data.\n\n # The following is simultated to happen in dt seconds.\n # This isn't a realtime simulation though.\n dt = self._timestep_sec\n\n # 1. Fetch agent actions\n all_agent_actions = self._agent_manager.fetch_agent_actions(self, agent_actions)\n\n # 2. Step all providers and harmonize state\n provider_state = self._step_providers(all_agent_actions, dt)\n self._check_if_acting_on_active_agents(agent_actions)\n\n # 3. Step bubble manager and trap manager\n self._vehicle_index.sync()\n self._bubble_manager.step(self)\n self._trap_manager.step(self)\n\n # 4. Calculate observation and reward\n # We pre-compute vehicle_states here because we *think* the users will\n # want these during their observation/reward computations.\n # This is a hack to give us some short term perf wins. Longer term we\n # need to expose better support for batched computations\n self._vehicle_states = [v.state for v in self._vehicle_index.vehicles]\n\n # Agents\n self._agent_manager.step_sensors(self)\n\n if self._renderer:\n # runs through the render pipeline (for camera-based sensors)\n # MUST perform this after step_sensors() above, and before observe() below,\n # so that all updates are ready before rendering happens per frame\n self._renderer.render()\n\n observations, rewards, scores, dones = self._agent_manager.observe(self)\n\n response_for_ego = self._agent_manager.filter_response_for_ego(\n (observations, rewards, scores, dones)\n )\n\n # 5. Send observations to social agents\n self._agent_manager.send_observations_to_social_agents(observations)\n\n # 6. Clear done agents\n self._teardown_done_agents_and_vehicles(dones)\n\n # 7. Perform visualization\n self._try_emit_envision_state(provider_state, observations, scores)\n self._try_emit_visdom_obs(observations)\n\n observations, rewards, scores, dones = response_for_ego\n extras = dict(scores=scores)\n\n # 8. Advance the simulation clock.\n # round due to FP precision issues, but need to allow arbitrarily-small dt's\n dec_digits = len(\"{}\".format(self._timestep_sec)) - 2\n self._elapsed_sim_time = round(self._elapsed_sim_time + dt, dec_digits)\n\n return observations, rewards, dones, extras\n\n def _teardown_done_agents_and_vehicles(self, dones):\n def done_vehicle_ids(dones):\n vehicle_ids = set()\n for agent_id, done in dones.items():\n if self._agent_manager.is_boid_agent(agent_id):\n vehicle_ids.update(id_ for id_ in done if done[id_])\n elif done:\n ids = self._vehicle_index.vehicle_ids_by_actor_id(agent_id)\n # 0 if shadowing, 1 if active\n assert len(ids) <= 1, f\"{len(ids)} <= 1\"\n vehicle_ids.update(ids)\n\n return vehicle_ids\n\n def done_agent_ids(dones):\n agent_ids = set()\n for agent_id, done in dones.items():\n if self._agent_manager.is_boid_agent(agent_id):\n if not self.agent_manager.is_boid_keep_alive_agent(\n agent_id\n ) and all(dones[agent_id].values()):\n agent_ids.add(agent_id)\n elif done:\n agent_ids.add(agent_id)\n\n return agent_ids\n\n # XXX: These can not be put inline because we do queries that must proceed\n # the actual teardown.\n vehicles_to_teardown = done_vehicle_ids(dones)\n agents_to_teardown = done_agent_ids(dones)\n\n self._agent_manager.teardown_ego_agents(agents_to_teardown)\n self._agent_manager.teardown_social_agents(agents_to_teardown)\n self._teardown_vehicles(vehicles_to_teardown)\n\n def reset(self, scenario: Scenario):\n if scenario == self._scenario and self._reset_agents_only:\n vehicle_ids_to_teardown = []\n agent_ids = self._agent_manager.teardown_ego_agents()\n for agent_id in agent_ids:\n ids = self._vehicle_index.vehicle_ids_by_actor_id(agent_id)\n vehicle_ids_to_teardown.extend(ids)\n self._teardown_vehicles(set(vehicle_ids_to_teardown))\n self._trap_manager.init_traps(\n scenario.road_network, scenario.waypoints, scenario.missions\n )\n self._agent_manager.init_ego_agents(self)\n if self._renderer:\n self._sync_vehicles_to_renderer()\n else:\n self.teardown()\n self.setup(scenario)\n\n # Tell history provide to ignore vehicles if we have assigned mission to them\n self._traffic_history_provider.set_replaced_ids(scenario.missions.keys())\n\n self._total_sim_time += self._elapsed_sim_time\n self._elapsed_sim_time = 0\n\n self._vehicle_states = [v.state for v in self._vehicle_index.vehicles]\n observations, _, _, _ = self._agent_manager.observe(self)\n observations_for_ego = self._agent_manager.reset_agents(observations)\n\n # Visualization\n self._try_emit_visdom_obs(observations)\n if len(self._agent_manager.ego_agent_ids):\n while len(observations_for_ego) < 1:\n observations_for_ego, _, _, _ = self.step({})\n\n self._reset_providers()\n\n return observations_for_ego\n\n def setup(self, scenario: Scenario):\n self._scenario = scenario\n\n self._bubble_manager = BubbleManager(scenario.bubbles, scenario.road_network)\n self._trap_manager = TrapManager(scenario)\n\n if self._renderer:\n self._renderer.setup(scenario)\n self._setup_bullet_client(self._bullet_client)\n provider_state = self._setup_providers(self._scenario)\n self._agent_manager.setup_agents(self)\n\n self._harmonize_providers(provider_state)\n self._last_provider_state = provider_state\n\n self._is_setup = True\n\n def add_provider(self, provider):\n assert isinstance(provider, Provider)\n self._providers.append(provider)\n\n def switch_ego_agent(self, agent_interface):\n self._agent_manager.switch_initial_agent(agent_interface)\n self._is_setup = False\n\n def _setup_bullet_client(self, client: bc.BulletClient):\n client.resetSimulation()\n client.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, 0)\n\n # PyBullet defaults the timestep to 240Hz. Several parameters are tuned with\n # this value in mind. For example the number of solver iterations and the error\n # reduction parameters (erp) for contact, friction and non-contact joints.\n # Attempting to get around this we set the number of substeps so that\n # timestep * substeps = 240Hz. Bullet (C++) does something to this effect as\n # well (https://git.io/Jvf0M), but PyBullet does not expose it.\n client.setPhysicsEngineParameter(\n fixedTimeStep=self._timestep_sec,\n numSubSteps=int(self._timestep_sec * 240),\n numSolverIterations=10,\n solverResidualThreshold=0.001,\n # warmStartingFactor=0.99\n )\n\n client.setGravity(0, 0, -9.8)\n\n plane_path = self._scenario.plane_filepath\n\n # 1e6 is the default value for plane length and width.\n plane_scale = (\n max(self._scenario.map_bounding_box[0], self._scenario.map_bounding_box[1])\n / 1e6\n )\n if not os.path.exists(plane_path):\n with pkg_resources.path(models, \"plane.urdf\") as path:\n plane_path = str(path.absolute())\n\n self._ground_bullet_id = client.loadURDF(\n plane_path,\n useFixedBase=True,\n basePosition=self._scenario.map_bounding_box[2],\n globalScaling=1.1 * plane_scale,\n )\n\n def teardown(self):\n if self._agent_manager is not None:\n self._agent_manager.teardown()\n if self._vehicle_index is not None:\n self._vehicle_index.teardown()\n\n if self._bullet_client is not None:\n self._bullet_client.resetSimulation()\n if self._renderer is not None:\n self._renderer.teardown()\n if self._traffic_sim is not None:\n self._traffic_sim.teardown()\n self._teardown_providers()\n\n if self._bubble_manager is not None:\n self._bubble_manager.teardown()\n self._bubble_manager = None\n if self._trap_manager is not None:\n self._trap_manager.teardown()\n self._trap_manager = None\n\n self._ground_bullet_id = None\n self._is_setup = False\n\n def destroy(self):\n self.teardown()\n\n if self._envision:\n self._envision.teardown()\n\n if self._visdom:\n self._visdom.teardown()\n\n if self._agent_manager is not None:\n self._agent_manager.destroy()\n self._agent_manager = None\n if self._traffic_sim is not None:\n self._traffic_sim.destroy()\n self._traffic_sim = None\n if self._renderer is not None:\n self._renderer.destroy()\n self._renderer = None\n if self._bullet_client is not None:\n self._bullet_client.disconnect()\n self._bullet_client = None\n\n def __del__(self):\n self.destroy()\n\n def _teardown_vehicles(self, vehicle_ids):\n self._vehicle_index.teardown_vehicles_by_vehicle_ids(vehicle_ids)\n self._clear_collisions(vehicle_ids)\n\n def attach_sensors_to_vehicles(self, agent_spec, vehicle_ids):\n self._agent_manager.attach_sensors_to_vehicles(\n self, agent_spec.interface, vehicle_ids\n )\n\n def observe_from(self, vehicle_ids):\n return self._agent_manager.observe_from(self, vehicle_ids)\n\n @property\n def renderer(self):\n if not self._renderer:\n self._renderer = Renderer(self._sim_id)\n if self._scenario:\n self._renderer.setup(self._scenario)\n self._vehicle_index.begin_rendering_vehicles(self._renderer)\n return self._renderer\n\n @property\n def is_rendering(self):\n return self._renderer is not None\n\n @property\n def road_stiffness(self):\n return self._bullet_client.getDynamicsInfo(self._ground_bullet_id, -1)[9]\n\n @property\n def dynamic_action_spaces(self):\n return self._dynamic_action_spaces\n\n @property\n def traffic_sim(self) -> SumoTrafficSimulation:\n return self._traffic_sim\n\n @property\n def waypoints(self) -> Waypoints:\n return self.scenario.waypoints\n\n @property\n def road_network(self) -> SumoRoadNetwork:\n return self.scenario.road_network\n\n @property\n def bc(self):\n return self._bullet_client\n\n @property\n def envision(self):\n return self._envision\n\n @property\n def elapsed_sim_time(self):\n return self._elapsed_sim_time\n\n def teardown_agents_without_vehicles(self, agent_ids: Sequence):\n \"\"\"\n Teardown agents in the given list that have no vehicles registered as\n controlled-by or shadowed-by\n\n Params:\n agent_ids: Sequence of agent ids\n \"\"\"\n agents_to_teardown = {\n agent_id\n for agent_id in agent_ids\n # Only clean-up when there are no controlled agents left (e.g. boids)\n if len(\n self._vehicle_index.vehicles_by_actor_id(\n agent_id, include_shadowers=True\n )\n )\n == 0\n }\n\n agents_to_teardown = {\n id_\n for id_ in agents_to_teardown\n if not self.agent_manager.is_boid_keep_alive_agent(id_)\n }\n self.agent_manager.teardown_social_agents(filter_ids=agents_to_teardown)\n\n def _teardown_vehicles_and_agents(self, vehicle_ids):\n shadow_and_controlling_agents = set()\n for vehicle_id in vehicle_ids:\n agent_id = self._vehicle_index.actor_id_from_vehicle_id(vehicle_id)\n if agent_id:\n shadow_and_controlling_agents.add(agent_id)\n\n shadow_agent_id = self._vehicle_index.shadow_actor_id_from_vehicle_id(\n vehicle_id\n )\n if shadow_agent_id:\n shadow_and_controlling_agents.add(shadow_agent_id)\n\n self._vehicle_index.teardown_vehicles_by_vehicle_ids(vehicle_ids)\n self.teardown_agents_without_vehicles(shadow_and_controlling_agents)\n\n def _pybullet_provider_sync(self, provider_state: ProviderState):\n current_vehicle_ids = {v.vehicle_id for v in provider_state.vehicles}\n previous_sv_ids = self._vehicle_index.social_vehicle_ids()\n exited_vehicles = previous_sv_ids - current_vehicle_ids\n self._teardown_vehicles_and_agents(exited_vehicles)\n\n # Update our pybullet world given this provider state\n for vehicle in provider_state.vehicles:\n vehicle_id = vehicle.vehicle_id\n # either this is a pybullet agent vehicle, or it is a social vehicle\n if vehicle_id in self._vehicle_index.agent_vehicle_ids():\n # this is an agent vehicle\n agent_id = self._vehicle_index.actor_id_from_vehicle_id(vehicle_id)\n agent_interface = self._agent_manager.agent_interface_for_agent_id(\n agent_id\n )\n agent_action_space = agent_interface.action_space\n if agent_action_space not in self._dynamic_action_spaces:\n # This is not a pybullet agent, but it has an avatar in this world\n # to make it's observations. Update the avatar to match the new\n # state of this vehicle\n pybullet_vehicle = self._vehicle_index.vehicle_by_id(vehicle_id)\n assert isinstance(pybullet_vehicle.chassis, BoxChassis)\n pybullet_vehicle.control(pose=vehicle.pose, speed=vehicle.speed)\n else:\n # This vehicle is a social vehicle\n if vehicle_id in self._vehicle_index.social_vehicle_ids():\n social_vehicle = self._vehicle_index.vehicle_by_id(vehicle_id)\n else:\n # It is a new social vehicle we have not seen yet.\n # Create it's avatar.\n social_vehicle = self._vehicle_index.build_social_vehicle(\n sim=self,\n vehicle_state=vehicle,\n actor_id=vehicle_id,\n vehicle_id=vehicle_id,\n vehicle_type=vehicle.vehicle_type,\n )\n # Update the social vehicle avatar to match the vehicle state\n social_vehicle.control(pose=vehicle.pose, speed=vehicle.speed)\n\n def _pybullet_provider_step(self, agent_actions) -> ProviderState:\n self._perform_agent_actions(agent_actions)\n\n self._bullet_client.stepSimulation()\n\n self._process_collisions()\n\n provider_state = ProviderState()\n pybullet_agent_ids = {\n agent_id\n for agent_id, interface in self._agent_manager.agent_interfaces.items()\n if interface.action_space in self._dynamic_action_spaces\n }\n\n for vehicle_id in self._vehicle_index.agent_vehicle_ids():\n agent_id = self._vehicle_index.actor_id_from_vehicle_id(vehicle_id)\n if agent_id not in pybullet_agent_ids:\n continue\n\n vehicle = self._vehicle_index.vehicle_by_id(vehicle_id)\n vehicle.step(self._elapsed_sim_time)\n provider_state.vehicles.append(\n VehicleState(\n vehicle_id=vehicle.id,\n vehicle_type=\"passenger\",\n pose=vehicle.pose,\n dimensions=vehicle.chassis.dimensions,\n speed=vehicle.speed,\n source=\"PYBULLET\",\n )\n )\n\n return provider_state\n\n @property\n def vehicle_index(self):\n return self._vehicle_index\n\n @property\n def agent_manager(self):\n return self._agent_manager\n\n @property\n def providers(self):\n # TODO: Add check to ensure that action spaces are disjoint between providers\n # TODO: It's inconsistent that pybullet is not here\n return self._providers\n\n def get_provider_by_type(self, requested_type):\n for provider in self._providers:\n if isinstance(provider, requested_type):\n return provider\n\n def _setup_providers(self, scenario) -> ProviderState:\n provider_state = ProviderState()\n for provider in self.providers:\n provider_state.merge(provider.setup(scenario))\n return provider_state\n\n def _teardown_providers(self):\n for provider in self.providers:\n provider.teardown()\n self._last_provider_state = None\n\n def _harmonize_providers(self, provider_state: ProviderState):\n for provider in self.providers:\n provider.sync(provider_state)\n self._pybullet_provider_sync(provider_state)\n if self._renderer:\n self._sync_vehicles_to_renderer()\n\n def _reset_providers(self):\n for provider in self.providers:\n provider.reset()\n\n def _step_providers(self, actions, dt) -> List[VehicleState]:\n accumulated_provider_state = ProviderState()\n\n def agent_controls_vehicles(agent_id):\n vehicles = self._vehicle_index.vehicles_by_actor_id(agent_id)\n return len(vehicles) > 0\n\n def matches_provider_action_spaces(agent_id, action_spaces):\n interface = self._agent_manager.agent_interface_for_agent_id(agent_id)\n return interface.action_space in action_spaces\n\n # PyBullet\n pybullet_actions = {\n agent_id: action\n for agent_id, action in actions.items()\n if agent_controls_vehicles(agent_id)\n and matches_provider_action_spaces(agent_id, self._dynamic_action_spaces)\n }\n accumulated_provider_state.merge(self._pybullet_provider_step(pybullet_actions))\n\n for provider in self.providers:\n provider_state = self._step_provider(provider, actions, dt)\n if provider == self._traffic_sim:\n # Remove agent vehicles from provider vehicles\n provider_state.filter(self._vehicle_index.agent_vehicle_ids())\n\n accumulated_provider_state.merge(provider_state)\n\n self._harmonize_providers(accumulated_provider_state)\n return accumulated_provider_state\n\n def _step_provider(self, provider, actions, dt):\n def agent_controls_vehicles(agent_id):\n vehicles = self._vehicle_index.vehicles_by_actor_id(agent_id)\n return len(vehicles) > 0\n\n provider_actions = {}\n for agent_id, action in actions.items():\n agent_interface = self._agent_manager.agent_interface_for_agent_id(agent_id)\n if (\n agent_interface\n and agent_controls_vehicles(agent_id)\n and agent_interface.action_space in provider.action_spaces\n ):\n vehicle_ids = [\n v.id\n for v in self._vehicle_index.vehicles_by_actor_id(\n agent_id, include_shadowers=True\n )\n ]\n\n if self._agent_manager.is_boid_agent(agent_id):\n for vehicle_id, vehicle_action in action.items():\n assert vehicle_id in vehicle_ids\n provider_actions[vehicle_id] = vehicle_action\n else:\n assert len(vehicle_ids) == 1\n provider_actions[vehicle_ids[0]] = action\n\n provider_state = provider.step(provider_actions, dt, self._elapsed_sim_time)\n return provider_state\n\n @property\n def scenario(self):\n return self._scenario\n\n @property\n def traffic_sim(self):\n return self._traffic_sim\n\n @property\n def timestep_sec(self):\n return self._timestep_sec\n\n @property\n def road_stiffness(self):\n return self._bullet_client.getDynamicsInfo(self._ground_bullet_id, -1)[9]\n\n def neighborhood_vehicles_around_vehicle(self, vehicle, radius=None):\n other_states = [v for v in self._vehicle_states if v.vehicle_id != vehicle.id]\n if radius is None:\n return other_states\n\n other_positions = [state.pose.position for state in other_states]\n if not other_positions:\n return []\n\n distances = euclidean_distances(other_positions, [vehicle.position]).reshape(\n -1,\n )\n indices = numpy.argwhere(distances <= radius).flatten()\n return [other_states[i] for i in indices]\n\n def vehicle_did_collide(self, vehicle_id):\n for c in self._vehicle_collisions[vehicle_id]:\n if c.collidee_id != self._ground_bullet_id:\n return True\n return False\n\n def vehicle_collisions(self, vehicle_id):\n return [\n c\n for c in self._vehicle_collisions[vehicle_id]\n if c.collidee_id != self._ground_bullet_id\n ]\n\n def _clear_collisions(self, vehicle_ids):\n for vehicle_id in vehicle_ids:\n self._vehicle_collisions.pop(vehicle_id, None)\n\n def _perform_agent_actions(self, agent_actions):\n for agent_id, action in agent_actions.items():\n agent_vehicles = self._vehicle_index.vehicles_by_actor_id(agent_id)\n if len(agent_vehicles) == 0:\n self._log.warning(\n f\"{agent_id} doesn't have a vehicle, is the agent done? (dropping action)\"\n )\n else:\n agent_interface = self._agent_manager.agent_interface_for_agent_id(\n agent_id\n )\n is_boid_agent = self._agent_manager.is_boid_agent(agent_id)\n\n for vehicle in agent_vehicles:\n vehicle_action = action[vehicle.id] if is_boid_agent else action\n\n controller_state = (\n self._vehicle_index.controller_state_for_vehicle_id(vehicle.id)\n )\n sensor_state = self._vehicle_index.sensor_state_for_vehicle_id(\n vehicle.id\n )\n # TODO: Support performing batched actions\n Controllers.perform_action(\n self,\n agent_id,\n vehicle,\n vehicle_action,\n controller_state,\n sensor_state,\n agent_interface.action_space,\n agent_interface.vehicle_type,\n )\n\n def _sync_vehicles_to_renderer(self):\n assert self._renderer\n for vehicle in self._vehicle_index.vehicles:\n vehicle.sync_to_renderer()\n\n def _process_collisions(self):\n self._vehicle_collisions = defaultdict(list) # list of `Collision` instances\n\n for vehicle_id in self._vehicle_index.agent_vehicle_ids():\n vehicle = self._vehicle_index.vehicle_by_id(vehicle_id)\n # We are only concerned with vehicle-vehicle collisions\n collidee_bullet_ids = set(\n [p.bullet_id for p in vehicle.chassis.contact_points]\n )\n collidee_bullet_ids.discard(self._ground_bullet_id)\n\n if not collidee_bullet_ids:\n continue\n\n for bullet_id in collidee_bullet_ids:\n collidee = self._bullet_id_to_vehicle(bullet_id)\n actor_id = self._vehicle_index.actor_id_from_vehicle_id(collidee.id)\n # TODO: Should we specify the collidee as the vehicle ID instead of\n # the agent/social ID?\n collision = Collision(collidee_id=actor_id)\n self._vehicle_collisions[vehicle_id].append(collision)\n\n def _bullet_id_to_vehicle(self, bullet_id):\n for vehicle in self._vehicle_index.vehicles:\n if bullet_id == vehicle.chassis.bullet_id:\n return vehicle\n assert False, \"Only collisions with agent or social vehicles is supported\"\n\n def _try_emit_envision_state(self, provider_state, obs, scores):\n if not self._envision:\n return\n\n traffic = {}\n position = {}\n speed = {}\n heading = {}\n lane_ids = {}\n for v in provider_state.vehicles:\n if v.vehicle_id in self._vehicle_index.agent_vehicle_ids():\n # this is an agent controlled vehicle\n agent_id = self._vehicle_index.actor_id_from_vehicle_id(v.vehicle_id)\n agent_obs = obs[agent_id]\n is_boid_agent = self._agent_manager.is_boid_agent(agent_id)\n vehicle_obs = agent_obs[v.vehicle_id] if is_boid_agent else agent_obs\n\n if self._agent_manager.is_ego(agent_id):\n actor_type = envision_types.TrafficActorType.Agent\n mission_route_geometry = (\n self._vehicle_index.sensor_state_for_vehicle_id(\n v.vehicle_id\n ).mission_planner.route.geometry\n )\n else:\n actor_type = envision_types.TrafficActorType.SocialAgent\n mission_route_geometry = None\n\n point_cloud = vehicle_obs.lidar_point_cloud or ([], [], [])\n point_cloud = point_cloud[0] # (points, hits, rays), just want points\n\n # TODO: driven path should be read from vehicle_obs\n driven_path = self._vehicle_index.vehicle_by_id(\n v.vehicle_id\n ).driven_path_sensor()\n\n road_waypoints = []\n if vehicle_obs.road_waypoints:\n road_waypoints = [\n path\n for paths in vehicle_obs.road_waypoints.lanes.values()\n for path in paths\n ]\n traffic[v.vehicle_id] = envision_types.TrafficActorState(\n name=self._agent_manager.agent_name(agent_id),\n actor_type=actor_type,\n vehicle_type=envision_types.VehicleType.Car,\n position=v.pose.position,\n heading=v.pose.heading,\n speed=v.speed,\n actor_id=envision_types.format_actor_id(\n agent_id,\n v.vehicle_id,\n is_multi=is_boid_agent,\n ),\n events=vehicle_obs.events,\n waypoint_paths=(vehicle_obs.waypoint_paths or []) + road_waypoints,\n point_cloud=point_cloud,\n driven_path=driven_path,\n mission_route_geometry=mission_route_geometry,\n )\n speed[agent_id] = v.speed\n position[agent_id] = v.pose.position[:2]\n heading[agent_id] = v.pose.heading\n if (\n len(vehicle_obs.waypoint_paths) > 0\n and len(vehicle_obs.waypoint_paths[0]) > 0\n ):\n lane_ids[agent_id] = vehicle_obs.waypoint_paths[0][0].lane_id\n elif v.vehicle_id in self._vehicle_index.social_vehicle_ids():\n # this is a social vehicle\n traffic[v.vehicle_id] = envision_types.TrafficActorState(\n actor_type=envision_types.TrafficActorType.SocialVehicle,\n vehicle_type=v.vehicle_type,\n position=list(v.pose.position),\n heading=v.pose.heading,\n speed=v.speed,\n )\n\n bubble_geometry = [\n list(bubble.geometry.exterior.coords)\n for bubble in self._bubble_manager.bubbles\n ]\n\n dec_digits = len(\"{}\".format(self._timestep_sec)) - 2\n state = envision_types.State(\n traffic=traffic,\n scenario_id=self.scenario.scenario_hash,\n bubbles=bubble_geometry,\n scene_colors=SceneColors.EnvisionColors.value,\n scores=scores,\n ego_agent_ids=list(self._agent_manager.ego_agent_ids),\n position=position,\n speed=speed,\n heading=heading,\n lane_ids=lane_ids,\n frame_time=round(self._elapsed_sim_time + self._total_sim_time, dec_digits),\n )\n self._envision.send(state)\n\n def _try_emit_visdom_obs(self, obs):\n if not self._visdom:\n return\n self._visdom.send(obs)\n" ]
[ [ "sklearn.metrics.pairwise.euclidean_distances", "numpy.argwhere" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mishraka/taskonomy
[ "c8fae8117d91bb8d12c518fd15c18f99d53ce69c" ]
[ "taskbank/tools/run_img_task.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport importlib\nimport itertools\nimport math\nimport os\nimport pdb\nimport pickle\nimport random\nimport subprocess\nimport sys\nimport threading\nimport time\nimport json\nfrom multiprocessing import Pool\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\nimport scipy.misc\nimport skimage\nimport skimage.io\nfrom PIL import Image, ImageDraw, ImageFont\nfrom skimage import color\n\nimport init_paths\nimport lib.data.load_ops as load_ops\nimport models.architectures as architectures\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport transforms3d\nimport utils\nfrom data.load_ops import rescale_image, resize_rescale_image\nfrom lib.data.synset import *\nfrom models.sample_models import *\nfrom task_viz import *\n\nmatplotlib.use('Agg')\n\n\nparser = argparse.ArgumentParser(description='Viz Single Task')\n\nparser.add_argument('--task', dest='task')\nparser.set_defaults(task='NONE')\n\nparser.add_argument('--img', dest='im_name')\nparser.set_defaults(im_name='NONE')\n\nparser.add_argument('--store', dest='store_name')\nparser.set_defaults(store_name='NONE')\n\nparser.add_argument('--store-rep', dest='store_rep', action='store_true')\nparser.set_defaults(store_rep=False)\n\nparser.add_argument('--store-pred', dest='store_pred', action='store_true')\nparser.set_defaults(store_pred=False)\n\nparser.add_argument('--on-screen', dest='on_screen', action='store_true')\nparser.set_defaults(on_screen=False)\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nlist_of_tasks = 'autoencoder curvature denoise edge2d edge3d \\\nkeypoint2d keypoint3d colorization jigsaw \\\nreshade rgb2depth rgb2mist rgb2sfnorm \\\nroom_layout segment25d segment2d vanishing_point \\\nsegmentsemantic class_1000 class_places inpainting_whole'\nlist_of_tasks = list_of_tasks.split()\n\n\ndef generate_cfg(task):\n repo_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n CONFIG_DIR = os.path.join(repo_dir, 'experiments/final', task)\n ############## Load Configs ##############\n import utils\n import data.load_ops as load_ops\n from general_utils import RuntimeDeterminedEnviromentVars\n cfg = utils.load_config(CONFIG_DIR, nopause=True)\n RuntimeDeterminedEnviromentVars.register_dict(cfg)\n cfg['batch_size'] = 1\n if 'batch_size' in cfg['encoder_kwargs']:\n cfg['encoder_kwargs']['batch_size'] = 1\n cfg['model_path'] = os.path.join(\n repo_dir, 'temp', task, 'model.permanent-ckpt')\n cfg['root_dir'] = repo_dir\n return cfg\n\n\ndef run_to_task():\n import general_utils\n from general_utils import RuntimeDeterminedEnviromentVars\n\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n args = parser.parse_args()\n\n img = load_raw_image_center_crop(args.im_name)\n img = skimage.img_as_float(img)\n # save the cropped image in temp folder to prevent overwriting\n img_name = os.path.basename(args.im_name)\n name, ext = os.path.splitext(img_name)\n args.im_name = os.path.join('/tmp/', name + '_cropped' + ext)\n scipy.misc.toimage(np.squeeze(img), cmin=0.0, cmax=1.0).save(args.im_name)\n\n task = args.task\n if task not in list_of_tasks:\n raise ValueError('Task not supported')\n\n cfg = generate_cfg(task)\n\n # Since we observe that areas with pixel values closes to either 0 or 1 sometimes overflows, we clip pixels value\n low_sat_tasks = 'autoencoder curvature denoise edge2d edge3d \\\n keypoint2d keypoint3d \\\n reshade rgb2depth rgb2mist rgb2sfnorm \\\n segment25d segment2d room_layout'.split()\n if task in low_sat_tasks:\n cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat\n\n if task == 'jigsaw':\n img = cfg['input_preprocessing_fn'](img, target=cfg['target_dict'][random.randint(0, 99)],\n **cfg['input_preprocessing_fn_kwargs'])\n else:\n img = cfg['input_preprocessing_fn'](\n img, **cfg['input_preprocessing_fn_kwargs'])\n\n img = img[np.newaxis, :]\n\n if task == 'class_places' or task == 'class_1000':\n synset = get_synset(task)\n\n print(\"Doing {task}\".format(task=task))\n general_utils = importlib.reload(general_utils)\n tf.reset_default_graph()\n training_runners = {\n 'sess': tf.InteractiveSession(), 'coord': tf.train.Coordinator()}\n\n ############## Set Up Inputs ##############\n # tf.logging.set_verbosity( tf.logging.INFO )\n setup_input_fn = utils.setup_input\n inputs = setup_input_fn(cfg, is_training=False, use_filename_queue=False)\n RuntimeDeterminedEnviromentVars.load_dynamic_variables(inputs, cfg)\n RuntimeDeterminedEnviromentVars.populate_registered_variables()\n start_time = time.time()\n\n ############## Set Up Model ##############\n model = utils.setup_model(inputs, cfg, is_training=False)\n m = model['model']\n model['saver_op'].restore(training_runners['sess'], cfg['model_path'])\n\n predicted, representation = training_runners['sess'].run(\n [m.decoder_output, m.encoder_output], feed_dict={m.input_images: img})\n\n if args.store_rep:\n s_name, file_extension = os.path.splitext(args.store_name)\n with open('{}.npy'.format(s_name), 'wb') as fp:\n np.save(fp, np.squeeze(representation))\n\n if args.store_pred:\n s_name, file_extension = os.path.splitext(args.store_name)\n with open('{}_pred.npy'.format(s_name), 'wb') as fp:\n np.save(fp, np.squeeze(predicted))\n\n if task == 'segment2d' or task == 'segment25d':\n segmentation_pca(predicted, args.store_name)\n return\n if task == 'colorization':\n single_img_colorize(predicted, img, args.store_name)\n return\n\n if task == 'curvature':\n curvature_single_image(predicted, args.store_name)\n return\n\n just_rescale = ['autoencoder', 'denoise', 'edge2d',\n 'edge3d', 'keypoint2d', 'keypoint3d',\n 'reshade', 'rgb2sfnorm']\n\n if task in just_rescale:\n simple_rescale_img(predicted, args.store_name)\n return\n\n just_clip = ['rgb2depth', 'rgb2mist']\n if task in just_clip:\n depth_single_image(predicted, args.store_name)\n return\n\n if task == 'inpainting_whole':\n inpainting_bbox(predicted, args.store_name)\n return\n\n if task == 'segmentsemantic':\n semseg_single_image(predicted, img, args.store_name)\n return\n\n if task in ['class_1000', 'class_places']:\n top_5_classes = classification(predicted, synset, args.store_name)\n # save predicted classes into a text file\n s_name, file_extension = os.path.splitext(args.store_name)\n with open('{}_top_5.txt'.format(s_name), 'w') as fp:\n json.dump(top_5_classes, fp)\n return\n\n if task == 'vanishing_point':\n _ = plot_vanishing_point_smoothed(np.squeeze(\n predicted), (np.squeeze(img) + 1.)/2., args.store_name, [])\n return\n\n if task == 'room_layout':\n mean = np.array([0.006072743318127848, 0.010272365569691076, -3.135909774145468,\n 1.5603802322235532, 5.6228218371102496e-05, -1.5669352793761442,\n 5.622875878174759, 4.082800262277375, 2.7713941642895956])\n std = np.array([0.8669452525283652, 0.687915294956501, 2.080513632043758,\n 0.19627420479282623, 0.014680602791251812, 0.4183827359302299,\n 3.991778013006544, 2.703495278378409, 1.2269185938626304])\n predicted = predicted * std + mean\n plot_room_layout(np.squeeze(predicted), (np.squeeze(\n img) + 1.)/2., args.store_name, [], cube_only=True)\n return\n\n if task == 'jigsaw':\n predicted = np.argmax(predicted, axis=1)\n perm = cfg['target_dict'][predicted[0]]\n show_jigsaw((np.squeeze(img) + 1.)/2., perm, args.store_name)\n return\n\n ############## Clean Up ##############\n training_runners['coord'].request_stop()\n training_runners['coord'].join()\n print(\"Done: {}\".format(config_name))\n\n ############## Reset graph and paths ##############\n tf.reset_default_graph()\n training_runners['sess'].close()\n return\n\n\nif __name__ == '__main__':\n cwd = os.getcwd()\n dl_folder = os.path.abspath(os.path.dirname(__file__))\n parent_dir = '/'.join(dl_folder.split('/')[:-1])\n os.chdir(parent_dir)\n print(\"Changing directory to \", parent_dir)\n run_to_task()\n os.chdir(cwd)\n print(\"Changing directory back to \", cwd)\n" ]
[ [ "tensorflow.InteractiveSession", "matplotlib.use", "numpy.squeeze", "tensorflow.train.Coordinator", "tensorflow.reset_default_graph", "numpy.argmax", "tensorflow.logging.set_verbosity", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
adabbott/cclib
[ "6a3d32fb4af96947a5ef577043441e24faa96ff6" ]
[ "src/cclib/method/orbitals.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# This file is part of cclib (http://cclib.github.io), a library for parsing\n# and interpreting the results of computational chemistry packages.\n#\n# Copyright (C) 2017, the cclib development team\n#\n# The library is free software, distributed under the terms of\n# the GNU Lesser General Public version 2.1 or later. You should have\n# received a copy of the license along with cclib. You can also access\n# the full license online at http://www.gnu.org/copyleft/lgpl.html.\n\n\"\"\"Analyses related to orbitals.\"\"\"\n\nimport logging\n\nimport numpy\n\nfrom cclib.method.calculationmethod import Method\n\n\nclass Orbitals(Method):\n \"\"\"A class for orbital related methods.\"\"\"\n\n def __init__(self, data, progress=None, \\\n loglevel=logging.INFO, logname=\"Log\"):\n\n # Call the __init__ method of the superclass.\n super(Orbitals, self).__init__(data, progress, loglevel, logname)\n self.fragresults = None\n\n def __str__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n return \"Orbitals\"\n\n def __repr__(self):\n \"\"\"Return a representation of the object.\"\"\"\n return \"Orbitals\"\n\n def closed_shell(self):\n \"\"\"Return Boolean indicating if system is closed shell.\"\"\"\n\n # If there are beta orbitals, we can assume the system is closed\n # shell if the orbital energies are identical within numerical accuracy.\n if len(self.data.mocoeffs) == 2:\n precision = 10e-6\n return numpy.allclose(*self.data.moenergies, atol=precision)\n\n # Restricted open shell will have one set of MOs but two HOMO indices,\n # and the indices should be different (otherwise it's still closed shell).\n if len(self.data.homos) == 2 and self.data.homos[0] != self.data.homos[1]:\n return False\n\n return True\n\n\nif __name__ == \"__main__\":\n import doctest\n from cclib.method import orbitals\n doctest.testmod(orbitals, verbose=False)\n" ]
[ [ "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
freidrichen/advent-of-code-2019
[ "08aca50e86700504d35c934a308a640a95de586e" ]
[ "day8/python/main.py" ]
[ "import numpy as np\n\n\ninput_file = \"../input.txt\"\nwith open(input_file, 'r') as f:\n input_text = f.read()\nimage_data = np.array([int(i) for i in input_text.strip()]).reshape((-1, 6, 25))\n\n# Test:\n# image_data = np.array([int(i) for i in \"123456789012\"]).reshape((-1, 2, 3))\n\nfewest_zeros = np.argmin([np.count_nonzero(layer == 0) for layer in image_data])\nnumber_of_ones = np.count_nonzero(image_data[fewest_zeros] == 1)\nnumber_of_twos = np.count_nonzero(image_data[fewest_zeros] == 2)\n\nprint(\"Part 1:\", number_of_ones*number_of_twos)\n\n\n# Test:\n# image_data = np.array([int(i) for i in \"0222112222120000\"]).reshape((-1, 2, 2))\n\nindices = np.expand_dims(np.argmax(image_data < 2, axis=0), axis=0)\nstacked = np.take_along_axis(image_data, indices, axis=0)\n\nprint(\"Part 2:\")\nprint(np.masked_zeros(stacked))\n" ]
[ [ "numpy.take_along_axis", "numpy.argmax", "numpy.count_nonzero", "numpy.masked_zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mccullerlp/OpenLoop
[ "fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d" ]
[ "phasor/signals/plot/ZPK_plot.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\nimport numpy as np\nimport math\n\nfrom matplotlib.text import OffsetFrom\nimport declarative\n\ndef eng_string(x, format='%.3f', si=False):\n '''\n Returns float/int value <x> formatted in a simplified engineering format -\n using an exponent that is a multiple of 3.\n\n format: printf-style string used to format the value before the exponent.\n\n si: if true, use SI suffix for exponent, e.g. k instead of e3, n instead of\n e-9 etc.\n\n E.g. with format='%.2f':\n 1.23e-08 => 12.30e-9\n 123 => 123.00\n 1230.0 => 1.23e3\n -1230000.0 => -1.23e6\n\n and with si=True:\n 1230.0 => 1.23k\n -1230000.0 => -1.23M\n\n '''\n sign = ''\n if x < 0:\n x = -x\n sign = '-'\n exp = int(math.floor(math.log10(x)))\n expe_3 = exp - (exp % 3)\n x3 = x / (10 ** expe_3)\n\n if si and expe_3 >= -24 and expe_3 <= 24 and expe_3 != 0:\n expe_3_text = 'yzafpnum kMGTPEZY'[(expe_3 - (-24)) / 3]\n elif expe_3 == 0:\n expe_3_text = ''\n else:\n expe_3_text = 'e%s' % expe_3\n\n return ('%s'+format+'%s') % (sign, x3, expe_3_text)\n\n\nclass ZPKAnnotator(declarative.OverridableObject):\n bbox_args = dict(boxstyle=\"round\", fc=\"0.8\")\n bbox_args = dict()\n arrow_args = dict(\n arrowstyle=\"->\",\n connectionstyle=\"angle,angleB=90,angleA=180,rad=3\",\n linewidth = .5,\n )\n\n style = dict(\n ls = '--',\n lw = .5,\n )\n style_vline = dict()\n style_annotate = dict()\n style_poles = dict()\n style_zeros = dict()\n style_real = dict()\n style_cplx = dict()\n style_poles_r = dict()\n style_zeros_r = dict()\n style_poles_c = dict()\n style_zeros_c = dict()\n\n def annolist(\n self,\n poles_r = (),\n poles_c = (),\n zeros_r = (),\n zeros_c = (),\n style = None,\n style_vline = None,\n style_annotate = None,\n style_poles = None,\n style_zeros = None,\n style_real = None,\n style_cplx = None,\n style_poles_r = None,\n style_zeros_r = None,\n style_poles_c = None,\n style_zeros_c = None,\n ):\n desc_by_x = []\n style = declarative.first_non_none(style , self.style )\n style_vline = declarative.first_non_none(style_vline , self.style_vline )\n style_annotate = declarative.first_non_none(style_annotate, self.style_annotate)\n style_poles = declarative.first_non_none(style_poles , self.style_poles )\n style_zeros = declarative.first_non_none(style_zeros , self.style_zeros )\n style_real = declarative.first_non_none(style_real , self.style_real )\n style_cplx = declarative.first_non_none(style_cplx , self.style_cplx )\n style_poles_r = declarative.first_non_none(style_poles_r , self.style_poles_r )\n style_zeros_r = declarative.first_non_none(style_zeros_r , self.style_zeros_r )\n style_poles_c = declarative.first_non_none(style_poles_c , self.style_poles_c )\n style_zeros_c = declarative.first_non_none(style_zeros_c , self.style_zeros_c )\n\n for root in poles_r:\n desc = \"Pole:{0}Hz\".format(eng_string(root))\n lkw = dict(style)\n loc = abs(root)\n lkw.update(style_vline)\n lkw.update(style_poles)\n lkw.update(style_real)\n lkw.update(style_poles_r)\n akw = dict(style)\n akw.update(style_annotate)\n akw.update(style_poles)\n akw.update(style_real)\n akw.update(style_poles_r)\n desc_by_x.append((loc, desc, lkw, akw))\n\n for root in zeros_r:\n desc = \"Zero:{0}Hz\".format(eng_string(root))\n lkw = dict(style)\n loc = abs(root)\n lkw.update(style_vline)\n lkw.update(style_zeros)\n lkw.update(style_real)\n lkw.update(style_zeros_r)\n akw = dict(style)\n akw.update(style_annotate)\n akw.update(style_zeros)\n akw.update(style_real)\n akw.update(style_zeros_r)\n desc_by_x.append((loc, desc, lkw, akw))\n\n for root in poles_c:\n desc = \"CPole:{0}+{1}i [Hz]\".format(eng_string(root.real), eng_string(root.imag))\n loc = abs(root.real) if abs(root.real) > abs(root.imag) else abs(root.imag)\n lkw = dict(style)\n lkw.update(style_vline)\n lkw.update(style_poles)\n lkw.update(style_cplx)\n lkw.update(style_poles_c)\n akw = dict(style)\n akw.update(style_annotate)\n akw.update(style_poles)\n akw.update(style_cplx)\n akw.update(style_poles_c)\n desc_by_x.append((loc, desc, lkw, akw))\n\n for root in zeros_c:\n desc = \"CZero:{0}+{1}i [Hz]\".format(eng_string(root.real), eng_string(root.imag))\n loc = abs(root.real) if abs(root.real) > abs(root.imag) else abs(root.imag)\n lkw = dict(style)\n lkw.update(style_vline)\n lkw.update(style_zeros)\n lkw.update(style_cplx)\n lkw.update(style_zeros_c)\n akw = dict(style)\n akw.update(style_annotate)\n akw.update(style_zeros)\n akw.update(style_cplx)\n akw.update(style_zeros_c)\n desc_by_x.append((loc, desc, lkw, akw))\n return desc_by_x\n\n def annotate(\n self,\n fB,\n desc_by_x = None,\n **kwargs\n ):\n if desc_by_x is None:\n desc_by_x = self.annolist(**kwargs)\n desc_by_x.sort()\n\n zs = np.array([tup[0] for tup in desc_by_x])\n xlow, xhigh = fB.ax_top.get_xlim()\n if fB.ax_bottom.get_xscale().find('log') != -1:\n xmid = np.exp((np.log(xlow) + np.log(xhigh))/2)\n else:\n xmid = (xlow + xhigh)/2\n idx_mid = np.searchsorted(zs, xmid)\n\n left_list = desc_by_x[:idx_mid]\n right_list = desc_by_x[idx_mid:]\n fsize_sep = 15\n\n for idx, (z, desc, lkw, akw) in enumerate(reversed(left_list)):\n #top elements\n if z < xlow:\n z = xlow\n arrowkw = dict(self.arrow_args)\n arrowkw.update(akw)\n an = fB.ax_top.annotate(\n desc,\n xy=(z, 1), xycoords=fB.ax_top.get_xaxis_transform(),\n xytext=(0, 15 + fsize_sep * idx), textcoords=OffsetFrom(fB.ax_top.bbox, (1, 1), \"points\"),\n ha = \"right\", va = \"bottom\",\n bbox = self.bbox_args,\n arrowprops = arrowkw,\n )\n for ax in fB.ax_list:\n ax.axvline(float(z), **lkw)\n\n for idx, (z, desc, lkw, akw) in enumerate(right_list):\n #bottom elements\n if z > xhigh:\n z = xhigh\n arrowkw = dict(self.arrow_args)\n arrowkw.update(akw)\n an = fB.ax_bottom.annotate(\n desc,\n xy=(z, -.12), xycoords=fB.ax_bottom.get_xaxis_transform(),\n xytext=(0, -34 - fsize_sep * idx), textcoords=OffsetFrom(fB.ax_bottom.bbox, (0, 0), \"points\"),\n ha=\"left\", va=\"bottom\",\n bbox=self.bbox_args,\n arrowprops = arrowkw,\n )\n for ax in fB.ax_list:\n ax.axvline(float(z), **lkw)\n return\n\n\n" ]
[ [ "numpy.log", "numpy.array", "numpy.searchsorted", "matplotlib.text.OffsetFrom" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
claire-s11/automl
[ "57621e8f3eaddd2c0b421c65c0bbd323ebcf8f2d" ]
[ "efficientdet/keras/efficientdet_arch_keras.py" ]
[ "# Lint as: python3\n# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras implementation of efficientdet.\"\"\"\nimport functools\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\n\nimport efficientdet_arch as legacy_arch\nimport hparams_config\nimport utils\nfrom keras import utils_keras\n\n\nclass BiFPNLayer(tf.keras.layers.Layer):\n \"\"\"A Keras Layer implementing Bidirectional Feature Pyramids.\"\"\"\n\n def __init__(self, min_level: int, max_level: int, image_size: int,\n fpn_weight_method: str, apply_bn_for_resampling: bool,\n is_training_bn: bool, conv_after_downsample: bool,\n use_native_resize_op: bool, data_format: str, pooling_type: str,\n fpn_num_filters: int, conv_bn_act_pattern: bool, act_type: str,\n separable_conv: bool, strategy: bool, fpn_name: str, **kwargs):\n self.min_level = min_level\n self.max_level = max_level\n self.image_size = image_size\n self.feat_sizes = utils.get_feat_sizes(image_size, max_level)\n\n self.fpn_weight_method = fpn_weight_method\n self.apply_bn_for_resampling = apply_bn_for_resampling\n self.is_training_bn = is_training_bn\n self.conv_after_downsample = conv_after_downsample\n self.use_native_resize_op = use_native_resize_op\n self.data_format = data_format\n self.fpn_num_filters = fpn_num_filters\n self.pooling_type = pooling_type\n self.conv_bn_act_pattern = conv_bn_act_pattern\n self.act_type = act_type\n self.strategy = strategy\n self.separable_conv = separable_conv\n\n self.fpn_config = None\n self.fpn_name = fpn_name\n\n super(BiFPNLayer, self).__init__(**kwargs)\n\n def call(self, feats):\n # @TODO: Implement this with keras logic\n return legacy_arch.build_bifpn_layer(feats, self.feat_sizes, self)\n\n def get_config(self):\n base_config = super(BiFPNLayer, self).get_config()\n\n return {\n **base_config,\n 'min_level': self.min_level,\n 'max_level': self.max_level,\n 'image_size': self.image_size,\n 'fpn_name': self.fpn_name,\n 'fpn_weight_method': self.fpn_weight_method,\n 'apply_bn_for_resampling': self.apply_bn_for_resampling,\n 'is_training_bn': self.is_training_bn,\n 'conv_after_downsample': self.conv_after_downsample,\n 'use_native_resize_op': self.use_native_resize_op,\n 'data_format': self.data_format,\n 'pooling_type': self.pooling_type,\n 'fpn_num_filters': self.fpn_num_filters,\n 'conv_bn_act_pattern': self.conv_bn_act_pattern,\n 'act_type': self.act_type,\n 'separable_conv': self.separable_conv,\n 'strategy': self.strategy,\n }\n\n\nclass ResampleFeatureMap(tf.keras.layers.Layer):\n \"\"\"Resample feature map for downsampling or upsampling.\"\"\"\n\n def __init__(self,\n target_height,\n target_width,\n target_num_channels,\n apply_bn=False,\n is_training=None,\n conv_after_downsample=False,\n use_native_resize_op=False,\n pooling_type=None,\n strategy=None,\n data_format=None,\n name='resample_feature_map'):\n super(ResampleFeatureMap, self).__init__(name='resample_{}'.format(name))\n self.apply_bn = apply_bn\n self.is_training = is_training\n self.data_format = data_format\n self.target_num_channels = target_num_channels\n self.target_height = target_height\n self.target_width = target_width\n self.strategy = strategy\n self.conv_after_downsample = conv_after_downsample\n self.use_native_resize_op = use_native_resize_op\n self.pooling_type = pooling_type\n self.conv2d = tf.keras.layers.Conv2D(self.target_num_channels, (1, 1),\n padding='same',\n data_format=self.data_format)\n self.bn = utils_keras.build_batch_norm(is_training_bn=self.is_training,\n data_format=self.data_format,\n strategy=self.strategy,\n name='bn')\n\n def build(self, input_shape):\n \"\"\"Resample input feature map to have target number of channels and size.\"\"\"\n if self.data_format == 'channels_first':\n _, num_channels, height, width = input_shape.as_list()\n else:\n _, height, width, num_channels = input_shape.as_list()\n\n if height is None or width is None or num_channels is None:\n raise ValueError(\n 'shape[1] or shape[2] or shape[3] of feat is None (shape:{}).'.format(\n input_shape.as_list()))\n if self.apply_bn and self.is_training is None:\n raise ValueError('If BN is applied, need to provide is_training')\n self.num_channels = num_channels\n self.height = height\n self.width = width\n height_stride_size = int((self.height - 1) // self.target_height + 1)\n width_stride_size = int((self.width - 1) // self.target_width + 1)\n\n if self.pooling_type == 'max' or self.pooling_type is None:\n # Use max pooling in default.\n self.pool2d = tf.keras.layers.MaxPooling2D(\n pool_size=[height_stride_size + 1, width_stride_size + 1],\n strides=[height_stride_size, width_stride_size],\n padding='SAME',\n data_format=self.data_format)\n elif self.pooling_type == 'avg':\n self.pool2d = tf.keras.layers.AveragePooling2D(\n pool_size=[height_stride_size + 1, width_stride_size + 1],\n strides=[height_stride_size, width_stride_size],\n padding='SAME',\n data_format=self.data_format)\n else:\n raise ValueError('Unknown pooling type: {}'.format(self.pooling_type))\n\n height_scale = self.target_height // self.height\n width_scale = self.target_width // self.width\n if (self.use_native_resize_op or self.target_height % self.height != 0 or\n self.target_width % self.width != 0):\n self.upsample2d = tf.keras.layers.UpSampling2D(\n (height_scale, width_scale), data_format=self.data_format)\n else:\n self.upsample2d = functools.partial(legacy_arch.nearest_upsampling,\n height_scale=height_scale,\n width_scale=width_scale,\n data_format=self.data_format)\n super(ResampleFeatureMap, self).build(input_shape)\n\n def _maybe_apply_1x1(self, feat):\n \"\"\"Apply 1x1 conv to change layer width if necessary.\"\"\"\n if self.num_channels != self.target_num_channels:\n feat = self.conv2d(feat)\n if self.apply_bn:\n feat = self.bn(feat, training=self.is_training)\n return feat\n\n def call(self, feat):\n # If conv_after_downsample is True, when downsampling, apply 1x1 after\n # downsampling for efficiency.\n if self.height > self.target_height and self.width > self.target_width:\n if not self.conv_after_downsample:\n feat = self._maybe_apply_1x1(feat)\n feat = self.pool2d(feat)\n if self.conv_after_downsample:\n feat = self._maybe_apply_1x1(feat)\n elif self.height <= self.target_height and self.width <= self.target_width:\n feat = self._maybe_apply_1x1(feat)\n if self.height < self.target_height or self.width < self.target_width:\n feat = self.upsample2d(feat)\n else:\n raise ValueError(\n 'Incompatible target feature map size: target_height: {},'\n 'target_width: {}'.format(self.target_height, self.target_width))\n\n return feat\n\n def get_config(self):\n config = {\n 'apply_bn': self.apply_bn,\n 'is_training': self.is_training,\n 'data_format': self.data_format,\n 'target_num_channels': self.target_num_channels,\n 'target_height': self.target_height,\n 'target_width': self.target_width,\n 'strategy': self.strategy,\n 'conv_after_downsample': self.conv_after_downsample,\n 'use_native_resize_op': self.use_native_resize_op,\n 'pooling_type': self.pooling_type,\n }\n base_config = super(ResampleFeatureMap, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ClassNet(tf.keras.layers.Layer):\n \"\"\"Object class prediction network.\"\"\"\n\n def __init__(self,\n num_classes=90,\n num_anchors=9,\n num_filters=32,\n min_level=3,\n max_level=7,\n is_training=False,\n act_type='swish',\n repeats=4,\n separable_conv=True,\n survival_prob=None,\n strategy=None,\n data_format='channels_last',\n name='class_net',\n **kwargs):\n \"\"\"Initialize the ClassNet.\n\n Args:\n num_classes: number of classes.\n num_anchors: number of anchors.\n num_filters: number of filters for \"intermediate\" layers.\n min_level: minimum level for features.\n max_level: maximum level for features.\n is_training: True if we train the BatchNorm.\n act_type: String of the activation used.\n repeats: number of intermediate layers.\n separable_conv: True to use separable_conv instead of conv2D.\n survival_prob: if a value is set then drop connect will be used.\n strategy: string to specify training strategy for TPU/GPU/CPU.\n data_format: string of 'channel_first' or 'channels_last'.\n name: the name of this layerl.\n **kwargs: other parameters.\n \"\"\"\n\n super(ClassNet, self).__init__(name=name, **kwargs)\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.num_filters = num_filters\n self.min_level = min_level\n self.max_level = max_level\n self.repeats = repeats\n self.separable_conv = separable_conv\n self.is_training = is_training\n self.survival_prob = survival_prob\n self.act_type = act_type\n self.strategy = strategy\n self.data_format = data_format\n self.use_dc = survival_prob and is_training\n\n self.conv_ops = []\n self.bns = []\n\n for i in range(self.repeats):\n # If using SeparableConv2D\n if self.separable_conv:\n self.conv_ops.append(\n tf.keras.layers.SeparableConv2D(\n filters=self.num_filters,\n depth_multiplier=1,\n pointwise_initializer=tf.initializers.VarianceScaling(),\n depthwise_initializer=tf.initializers.VarianceScaling(),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='class-%d' % i))\n # If using Conv2d\n else:\n self.conv_ops.append(\n tf.keras.layers.Conv2D(\n filters=self.num_filters,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='class-%d' % i))\n\n bn_per_level = {}\n for level in range(self.min_level, self.max_level + 1):\n bn_per_level[level] = utils_keras.build_batch_norm(\n is_training_bn=self.is_training,\n init_zero=False,\n strategy=self.strategy,\n data_format=self.data_format,\n name='class-%d-bn-%d' % (i, level),\n )\n self.bns.append(bn_per_level)\n\n if self.separable_conv:\n self.classes = tf.keras.layers.SeparableConv2D(\n filters=self.num_classes * self.num_anchors,\n depth_multiplier=1,\n pointwise_initializer=tf.initializers.VarianceScaling(),\n depthwise_initializer=tf.initializers.VarianceScaling(),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.constant_initializer(-np.math.log((1 - 0.01) /\n 0.01)),\n padding='same',\n name='class-predict')\n\n else:\n self.classes = tf.keras.layers.Conv2D(\n filters=self.num_classes * self.num_anchors,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.constant_initializer(-np.math.log((1 - 0.01) /\n 0.01)),\n padding='same',\n name='class-predict')\n\n def call(self, inputs, **kwargs):\n \"\"\"Call ClassNet.\"\"\"\n\n class_outputs = {}\n for level in range(self.min_level, self.max_level + 1):\n image = inputs[level]\n for i in range(self.repeats):\n original_image = image\n image = self.conv_ops[i](image)\n image = self.bns[i][level](image, training=self.is_training)\n if self.act_type:\n image = utils.activation_fn(image, self.act_type)\n if i > 0 and self.use_dc:\n image = utils.drop_connect(image, self.is_training,\n self.survival_prob)\n image = image + original_image\n\n class_outputs[level] = self.classes(image)\n\n return class_outputs\n\n def get_config(self):\n base_config = super(ClassNet, self).get_config()\n\n return {\n **base_config,\n 'num_classes': self.num_classes,\n 'num_anchors': self.num_anchors,\n 'num_filters': self.num_filters,\n 'min_level': self.min_level,\n 'max_level': self.max_level,\n 'is_training': self.is_training,\n 'act_type': self.act_type,\n 'repeats': self.repeats,\n 'separable_conv': self.separable_conv,\n 'survival_prob': self.survival_prob,\n 'strategy': self.strategy,\n 'data_format': self.data_format,\n }\n\n\nclass BoxNet(tf.keras.layers.Layer):\n \"\"\"Box regression network.\"\"\"\n\n def __init__(self,\n num_anchors=9,\n num_filters=32,\n min_level=3,\n max_level=7,\n is_training=False,\n act_type='swish',\n repeats=4,\n separable_conv=True,\n survival_prob=None,\n strategy=None,\n data_format='channels_last',\n name='box_net',\n **kwargs):\n \"\"\"Initialize BoxNet.\n\n Args:\n num_anchors: number of anchors used.\n num_filters: number of filters for \"intermediate\" layers.\n min_level: minimum level for features.\n max_level: maximum level for features.\n is_training: True if we train the BatchNorm.\n act_type: String of the activation used.\n repeats: number of \"intermediate\" layers.\n separable_conv: True to use separable_conv instead of conv2D.\n survival_prob: if a value is set then drop connect will be used.\n strategy: string to specify training strategy for TPU/GPU/CPU.\n data_format: string of 'channel_first' or 'channels_last'.\n name: Name of the layer.\n **kwargs: other parameters.\n \"\"\"\n\n super(BoxNet, self).__init__(name=name, **kwargs)\n\n self.num_anchors = num_anchors\n self.num_filters = num_filters\n self.min_level = min_level\n self.max_level = max_level\n self.repeats = repeats\n self.separable_conv = separable_conv\n self.is_training = is_training\n self.survival_prob = survival_prob\n self.act_type = act_type\n self.strategy = strategy\n self.data_format = data_format\n self.use_dc = survival_prob and is_training\n\n self.conv_ops = []\n self.bns = []\n\n for i in range(self.repeats):\n # If using SeparableConv2D\n if self.separable_conv:\n self.conv_ops.append(\n tf.keras.layers.SeparableConv2D(\n filters=self.num_filters,\n depth_multiplier=1,\n pointwise_initializer=tf.initializers.VarianceScaling(),\n depthwise_initializer=tf.initializers.VarianceScaling(),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-%d' % i))\n # If using Conv2d\n else:\n self.conv_ops.append(\n tf.keras.layers.Conv2D(\n filters=self.num_filters,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-%d' % i))\n\n bn_per_level = {}\n for level in range(self.min_level, self.max_level + 1):\n bn_per_level[level] = utils_keras.build_batch_norm(\n is_training_bn=self.is_training,\n init_zero=False,\n strategy=self.strategy,\n data_format=self.data_format,\n name='box-%d-bn-%d' % (i, level))\n self.bns.append(bn_per_level)\n\n if self.separable_conv:\n self.boxes = tf.keras.layers.SeparableConv2D(\n filters=4 * self.num_anchors,\n depth_multiplier=1,\n pointwise_initializer=tf.initializers.VarianceScaling(),\n depthwise_initializer=tf.initializers.VarianceScaling(),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-predict')\n\n else:\n self.boxes = tf.keras.layers.Conv2D(\n filters=4 * self.num_anchors,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n data_format=self.data_format,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-predict')\n\n def call(self, inputs, **kwargs):\n \"\"\"Call boxnet.\"\"\"\n box_outputs = {}\n for level in range(self.min_level, self.max_level + 1):\n image = inputs[level]\n for i in range(self.repeats):\n original_image = image\n image = self.conv_ops[i](image)\n image = self.bns[i][level](image, training=self.is_training)\n if self.act_type:\n image = utils.activation_fn(image, self.act_type)\n if i > 0 and self.use_dc:\n image = utils.drop_connect(image, self.is_training,\n self.survival_prob)\n image = image + original_image\n\n box_outputs[level] = self.boxes(image)\n\n return box_outputs\n\n def get_config(self):\n base_config = super(BoxNet, self).get_config()\n\n return {\n **base_config,\n 'num_anchors': self.num_anchors,\n 'num_filters': self.num_filters,\n 'min_level': self.min_level,\n 'max_level': self.max_level,\n 'is_training': self.is_training,\n 'act_type': self.act_type,\n 'repeats': self.repeats,\n 'separable_conv': self.separable_conv,\n 'survival_prob': self.survival_prob,\n 'strategy': self.strategy,\n 'data_format': self.data_format,\n }\n\n\ndef build_class_and_box_outputs(feats, config):\n \"\"\"Builds box net and class net.\n\n Args:\n feats: input tensor.\n config: a dict-like config, including all parameters.\n\n Returns:\n A tuple (class_outputs, box_outputs) for class/box predictions.\n \"\"\"\n num_anchors = len(config.aspect_ratios) * config.num_scales\n num_filters = config.fpn_num_filters\n class_outputs = ClassNet(num_classes=config.num_classes,\n num_anchors=num_anchors,\n num_filters=num_filters,\n min_level=config.min_level,\n max_level=config.max_level,\n is_training=config.is_training_bn,\n act_type=config.act_type,\n repeats=config.box_class_repeats,\n separable_conv=config.separable_conv,\n survival_prob=config.survival_prob,\n strategy=config.strategy,\n data_format=config.data_format)(feats)\n\n box_outputs = BoxNet(num_anchors=num_anchors,\n num_filters=num_filters,\n min_level=config.min_level,\n max_level=config.max_level,\n is_training=config.is_training_bn,\n act_type=config.act_type,\n repeats=config.box_class_repeats,\n separable_conv=config.separable_conv,\n survival_prob=config.survival_prob,\n strategy=config.strategy,\n data_format=config.data_format)(feats)\n\n return class_outputs, box_outputs\n\n\ndef efficientdet(features, model_name=None, config=None, **kwargs):\n \"\"\"Build EfficientDet model.\n\n Args:\n features: input tensor.\n model_name: String of the model (eg. efficientdet-d0)\n config: Dict of parameters for the network\n **kwargs: other parameters.\n\n Returns:\n A tuple (class_outputs, box_outputs) for predictions.\n \"\"\"\n if not config and not model_name:\n raise ValueError('please specify either model name or config')\n\n if not config:\n config = hparams_config.get_efficientdet_config(model_name)\n elif isinstance(config, dict):\n config = hparams_config.Config(config) # wrap dict in Config object\n\n if kwargs:\n config.override(kwargs)\n\n logging.info(config)\n\n # build backbone features.\n features = legacy_arch.build_backbone(features, config)\n logging.info('backbone params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n # build feature network.\n fpn_feats = legacy_arch.build_feature_network(features, config)\n logging.info('backbone+fpn params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n # build class and box predictions.\n class_outputs, box_outputs = build_class_and_box_outputs(fpn_feats, config)\n logging.info('backbone+fpn+box params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n return class_outputs, box_outputs\n" ]
[ [ "tensorflow.keras.layers.AveragePooling2D", "tensorflow.initializers.VarianceScaling", "tensorflow.zeros_initializer", "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.layers.Conv2D", "tensorflow.random_normal_initializer", "tensorflow.keras.layers.MaxPooling2D", "numpy.math.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
EileenWang90/mmpose
[ "3fa1328a3b6351bf9b35df60d4d959973a6f8a71" ]
[ "mmpose/models/backbones/litehrnet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule,\n build_conv_layer, build_norm_layer, constant_init,\n normal_init)\nfrom torch.nn.modules.batchnorm import _BatchNorm\nimport torch.utils.checkpoint as cp\n\nimport mmcv\nfrom mmpose.utils import get_root_logger\nfrom mmpose.models.registry import BACKBONES\nfrom mmpose.models.backbones.resnet import BasicBlock, Bottleneck\nfrom mmpose.models.backbones.utils import load_checkpoint, channel_shuffle\n\n\nclass SpatialWeighting(nn.Module):\n\n def __init__(self,\n channels,\n ratio=16,\n conv_cfg=None,\n act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))):\n super().__init__()\n if isinstance(act_cfg, dict):\n act_cfg = (act_cfg, act_cfg)\n assert len(act_cfg) == 2\n assert mmcv.is_tuple_of(act_cfg, dict)\n self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n self.conv1 = ConvModule(\n in_channels=channels,\n out_channels=int(channels / ratio),\n kernel_size=1,\n stride=1,\n conv_cfg=conv_cfg,\n act_cfg=act_cfg[0])\n self.conv2 = ConvModule(\n in_channels=int(channels / ratio),\n out_channels=channels,\n kernel_size=1,\n stride=1,\n conv_cfg=conv_cfg,\n act_cfg=act_cfg[1])\n\n def forward(self, x):\n out = self.global_avgpool(x)\n out = self.conv1(out)\n out = self.conv2(out)\n return x * out\n\n\nclass CrossResolutionWeighting(nn.Module):\n\n def __init__(self,\n channels,\n ratio=16,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))):\n super().__init__()\n if isinstance(act_cfg, dict):\n act_cfg = (act_cfg, act_cfg)\n assert len(act_cfg) == 2\n assert mmcv.is_tuple_of(act_cfg, dict)\n self.channels = channels\n total_channel = sum(channels)\n self.conv1 = ConvModule(\n in_channels=total_channel,\n out_channels=int(total_channel / ratio),\n kernel_size=1,\n stride=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg[0])\n self.conv2 = ConvModule(\n in_channels=int(total_channel / ratio),\n out_channels=total_channel,\n kernel_size=1,\n stride=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg[1])\n\n def forward(self, x):\n mini_size = x[-1].size()[-2:]\n out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]]\n out = torch.cat(out, dim=1)\n out = self.conv1(out)\n out = self.conv2(out)\n out = torch.split(out, self.channels, dim=1)\n out = [\n s * F.interpolate(a, size=s.size()[-2:], mode='nearest')\n for s, a in zip(x, out)\n ]\n return out\n\n\nclass ConditionalChannelWeighting(nn.Module):\n\n def __init__(self,\n in_channels,\n stride,\n reduce_ratio,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n with_cp=False):\n super().__init__()\n self.with_cp = with_cp\n self.stride = stride\n assert stride in [1, 2]\n\n branch_channels = [channel // 2 for channel in in_channels]\n\n self.cross_resolution_weighting = CrossResolutionWeighting(\n branch_channels,\n ratio=reduce_ratio,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg)\n\n self.depthwise_convs = nn.ModuleList([\n ConvModule(\n channel,\n channel,\n kernel_size=3,\n stride=self.stride,\n padding=1,\n groups=channel,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None) for channel in branch_channels\n ])\n\n self.spatial_weighting = nn.ModuleList([\n SpatialWeighting(channels=channel, ratio=4)\n for channel in branch_channels\n ])\n\n def forward(self, x):\n\n def _inner_forward(x):\n x = [s.chunk(2, dim=1) for s in x]\n x1 = [s[0] for s in x]\n x2 = [s[1] for s in x]\n\n x2 = self.cross_resolution_weighting(x2)\n x2 = [dw(s) for s, dw in zip(x2, self.depthwise_convs)]\n x2 = [sw(s) for s, sw in zip(x2, self.spatial_weighting)]\n\n out = [torch.cat([s1, s2], dim=1) for s1, s2 in zip(x1, x2)]\n out = [channel_shuffle(s, 2) for s in out]\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\nclass Stem(nn.Module):\n\n def __init__(self,\n in_channels,\n stem_channels,\n out_channels,\n expand_ratio,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n with_cp=False):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.with_cp = with_cp\n\n self.conv1 = ConvModule(\n in_channels=in_channels,\n out_channels=stem_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=dict(type='ReLU'))\n\n mid_channels = int(round(stem_channels * expand_ratio))\n branch_channels = stem_channels // 2\n if stem_channels == self.out_channels:\n inc_channels = self.out_channels - branch_channels\n else:\n inc_channels = self.out_channels - stem_channels\n\n self.branch1 = nn.Sequential(\n ConvModule(\n branch_channels,\n branch_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n groups=branch_channels,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None),\n ConvModule(\n branch_channels,\n inc_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU')),\n )\n\n self.expand_conv = ConvModule(\n branch_channels,\n mid_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU'))\n self.depthwise_conv = ConvModule(\n mid_channels,\n mid_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n groups=mid_channels,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n self.linear_conv = ConvModule(\n mid_channels,\n branch_channels\n if stem_channels == self.out_channels else stem_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU'))\n\n def forward(self, x):\n\n def _inner_forward(x):\n x = self.conv1(x)\n x1, x2 = x.chunk(2, dim=1)\n\n x2 = self.expand_conv(x2)\n x2 = self.depthwise_conv(x2)\n x2 = self.linear_conv(x2)\n\n out = torch.cat((self.branch1(x1), x2), dim=1)\n\n out = channel_shuffle(out, 2)\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\nclass IterativeHead(nn.Module):\n\n def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN')):\n super().__init__()\n projects = []\n num_branchs = len(in_channels)\n self.in_channels = in_channels[::-1]\n\n for i in range(num_branchs):\n if i != num_branchs - 1:\n projects.append(\n DepthwiseSeparableConvModule(\n in_channels=self.in_channels[i],\n out_channels=self.in_channels[i + 1],\n kernel_size=3,\n stride=1,\n padding=1,\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU'),\n dw_act_cfg=None,\n pw_act_cfg=dict(type='ReLU')))\n else:\n projects.append(\n DepthwiseSeparableConvModule(\n in_channels=self.in_channels[i],\n out_channels=self.in_channels[i],\n kernel_size=3,\n stride=1,\n padding=1,\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU'),\n dw_act_cfg=None,\n pw_act_cfg=dict(type='ReLU')))\n self.projects = nn.ModuleList(projects)\n\n def forward(self, x):\n x = x[::-1]\n\n y = []\n last_x = None\n for i, s in enumerate(x):\n if last_x is not None:\n last_x = F.interpolate(\n last_x,\n size=s.size()[-2:],\n mode='bilinear',\n align_corners=True)\n s = s + last_x\n s = self.projects[i](s)\n y.append(s)\n last_x = s\n\n return y[::-1]\n\n\nclass ShuffleUnit(nn.Module):\n \"\"\"InvertedResidual block for ShuffleNetV2 backbone.\n\n Args:\n in_channels (int): The input channels of the block.\n out_channels (int): The output channels of the block.\n stride (int): Stride of the 3x3 convolution layer. Default: 1\n conv_cfg (dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU').\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n stride=1,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n with_cp=False):\n super().__init__()\n self.stride = stride\n self.with_cp = with_cp\n\n branch_features = out_channels // 2\n if self.stride == 1:\n assert in_channels == branch_features * 2, (\n f'in_channels ({in_channels}) should equal to '\n f'branch_features * 2 ({branch_features * 2}) '\n 'when stride is 1')\n\n if in_channels != branch_features * 2:\n assert self.stride != 1, (\n f'stride ({self.stride}) should not equal 1 when '\n f'in_channels != branch_features * 2')\n\n if self.stride > 1:\n self.branch1 = nn.Sequential(\n ConvModule(\n in_channels,\n in_channels,\n kernel_size=3,\n stride=self.stride,\n padding=1,\n groups=in_channels,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None),\n ConvModule(\n in_channels,\n branch_features,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg),\n )\n\n self.branch2 = nn.Sequential(\n ConvModule(\n in_channels if (self.stride > 1) else branch_features,\n branch_features,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg),\n ConvModule(\n branch_features,\n branch_features,\n kernel_size=3,\n stride=self.stride,\n padding=1,\n groups=branch_features,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None),\n ConvModule(\n branch_features,\n branch_features,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg))\n\n def forward(self, x):\n\n def _inner_forward(x):\n if self.stride > 1:\n out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)\n else:\n x1, x2 = x.chunk(2, dim=1)\n out = torch.cat((x1, self.branch2(x2)), dim=1)\n\n out = channel_shuffle(out, 2)\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\nclass LiteHRModule(nn.Module):\n\n def __init__(\n self,\n num_branches,\n num_blocks,\n in_channels,\n reduce_ratio,\n module_type,\n multiscale_output=False,\n with_fuse=True,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n with_cp=False,\n ):\n super().__init__()\n self._check_branches(num_branches, in_channels)\n\n self.in_channels = in_channels\n self.num_branches = num_branches\n\n self.module_type = module_type\n self.multiscale_output = multiscale_output\n self.with_fuse = with_fuse\n self.norm_cfg = norm_cfg\n self.conv_cfg = conv_cfg\n self.with_cp = with_cp\n\n if self.module_type == 'LITE':\n self.layers = self._make_weighting_blocks(num_blocks, reduce_ratio)\n elif self.module_type == 'NAIVE':\n self.layers = self._make_naive_branches(num_branches, num_blocks)\n if self.with_fuse:\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU()\n\n def _check_branches(self, num_branches, in_channels):\n \"\"\"Check input to avoid ValueError.\"\"\"\n if num_branches != len(in_channels):\n error_msg = f'NUM_BRANCHES({num_branches}) ' \\\n f'!= NUM_INCHANNELS({len(in_channels)})'\n raise ValueError(error_msg)\n\n def _make_weighting_blocks(self, num_blocks, reduce_ratio, stride=1):\n layers = []\n for i in range(num_blocks):\n layers.append(\n ConditionalChannelWeighting(\n self.in_channels,\n stride=stride,\n reduce_ratio=reduce_ratio,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n with_cp=self.with_cp))\n\n return nn.Sequential(*layers)\n\n def _make_one_branch(self, branch_index, num_blocks, stride=1):\n \"\"\"Make one branch.\"\"\"\n layers = []\n layers.append(\n ShuffleUnit(\n self.in_channels[branch_index],\n self.in_channels[branch_index],\n stride=stride,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=dict(type='ReLU'),\n with_cp=self.with_cp))\n for i in range(1, num_blocks):\n layers.append(\n ShuffleUnit(\n self.in_channels[branch_index],\n self.in_channels[branch_index],\n stride=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=dict(type='ReLU'),\n with_cp=self.with_cp))\n\n return nn.Sequential(*layers)\n\n def _make_naive_branches(self, num_branches, num_blocks):\n \"\"\"Make branches.\"\"\"\n branches = []\n\n for i in range(num_branches):\n branches.append(self._make_one_branch(i, num_blocks))\n\n return nn.ModuleList(branches)\n\n def _make_fuse_layers(self):\n \"\"\"Make fuse layer.\"\"\"\n if self.num_branches == 1:\n return None\n\n num_branches = self.num_branches\n in_channels = self.in_channels\n fuse_layers = []\n num_out_branches = num_branches if self.multiscale_output else 1\n for i in range(num_out_branches):\n fuse_layer = []\n for j in range(num_branches):\n if j > i:\n fuse_layer.append(\n nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n in_channels[j],\n in_channels[i],\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False),\n build_norm_layer(self.norm_cfg, in_channels[i])[1],\n nn.Upsample(\n scale_factor=2**(j - i), mode='nearest')))\n elif j == i:\n fuse_layer.append(None)\n else:\n conv_downsamples = []\n for k in range(i - j):\n if k == i - j - 1:\n conv_downsamples.append(\n nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n in_channels[j],\n in_channels[j],\n kernel_size=3,\n stride=2,\n padding=1,\n groups=in_channels[j],\n bias=False),\n build_norm_layer(self.norm_cfg,\n in_channels[j])[1],\n build_conv_layer(\n self.conv_cfg,\n in_channels[j],\n in_channels[i],\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False),\n build_norm_layer(self.norm_cfg,\n in_channels[i])[1]))\n else:\n conv_downsamples.append(\n nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n in_channels[j],\n in_channels[j],\n kernel_size=3,\n stride=2,\n padding=1,\n groups=in_channels[j],\n bias=False),\n build_norm_layer(self.norm_cfg,\n in_channels[j])[1],\n build_conv_layer(\n self.conv_cfg,\n in_channels[j],\n in_channels[j],\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False),\n build_norm_layer(self.norm_cfg,\n in_channels[j])[1],\n nn.ReLU(inplace=True)))\n fuse_layer.append(nn.Sequential(*conv_downsamples))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n if self.num_branches == 1:\n return [self.layers[0](x[0])]\n\n if self.module_type == 'LITE':\n out = self.layers(x)\n elif self.module_type == 'NAIVE':\n for i in range(self.num_branches):\n x[i] = self.layers[i](x[i])\n out = x\n\n if self.with_fuse:\n out_fuse = []\n for i in range(len(self.fuse_layers)):\n y = out[0] if i == 0 else self.fuse_layers[i][0](out[0])\n for j in range(self.num_branches):\n if i == j:\n y += out[j]\n else:\n y += self.fuse_layers[i][j](out[j])\n out_fuse.append(self.relu(y))\n out = out_fuse\n elif not self.multiscale_output:\n out = [out[0]]\n return out\n\n\[email protected]_module()\nclass LiteHRNet(nn.Module):\n \"\"\"Lite-HRNet backbone.\n\n `High-Resolution Representations for Labeling Pixels and Regions\n <https://arxiv.org/abs/1904.04514>`_\n\n Args:\n extra (dict): detailed configuration for each stage of HRNet.\n in_channels (int): Number of input image channels. Default: 3.\n conv_cfg (dict): dictionary to construct and config conv layer.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from mmpose.models import HRNet\n >>> import torch\n >>> extra = dict(\n >>> stage1=dict(\n >>> num_modules=1,\n >>> num_branches=1,\n >>> block='BOTTLENECK',\n >>> num_blocks=(4, ),\n >>> num_channels=(64, )),\n >>> stage2=dict(\n >>> num_modules=1,\n >>> num_branches=2,\n >>> block='BASIC',\n >>> num_blocks=(4, 4),\n >>> num_channels=(32, 64)),\n >>> stage3=dict(\n >>> num_modules=4,\n >>> num_branches=3,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4),\n >>> num_channels=(32, 64, 128)),\n >>> stage4=dict(\n >>> num_modules=3,\n >>> num_branches=4,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4, 4),\n >>> num_channels=(32, 64, 128, 256)))\n >>> self = HRNet(extra, in_channels=1)\n >>> self.eval()\n >>> inputs = torch.rand(1, 1, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 32, 8, 8)\n (1, 64, 4, 4)\n (1, 128, 2, 2)\n (1, 256, 1, 1)\n \"\"\"\n\n def __init__(self,\n extra,\n in_channels=3,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n norm_eval=False,\n with_cp=False,\n zero_init_residual=False):\n super().__init__()\n self.extra = extra\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n self.zero_init_residual = zero_init_residual\n\n self.stem = Stem(\n in_channels,\n stem_channels=self.extra['stem']['stem_channels'],\n out_channels=self.extra['stem']['out_channels'],\n expand_ratio=self.extra['stem']['expand_ratio'],\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg)\n\n self.num_stages = self.extra['num_stages']\n self.stages_spec = self.extra['stages_spec']\n\n num_channels_last = [\n self.stem.out_channels,\n ]\n for i in range(self.num_stages):\n num_channels = self.stages_spec['num_channels'][i]\n num_channels = [num_channels[i] for i in range(len(num_channels))]\n setattr(\n self, 'transition{}'.format(i),\n self._make_transition_layer(num_channels_last, num_channels))\n\n stage, num_channels_last = self._make_stage(\n self.stages_spec, i, num_channels, multiscale_output=True)\n setattr(self, 'stage{}'.format(i), stage)\n\n self.with_head = self.extra['with_head']\n if self.with_head:\n self.head_layer = IterativeHead(\n in_channels=num_channels_last,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n )\n\n def _make_transition_layer(self, num_channels_pre_layer,\n num_channels_cur_layer):\n \"\"\"Make transition layer.\"\"\"\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n transition_layers.append(\n nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n num_channels_pre_layer[i],\n num_channels_pre_layer[i],\n kernel_size=3,\n stride=1,\n padding=1,\n groups=num_channels_pre_layer[i],\n bias=False),\n build_norm_layer(self.norm_cfg,\n num_channels_pre_layer[i])[1],\n build_conv_layer(\n self.conv_cfg,\n num_channels_pre_layer[i],\n num_channels_cur_layer[i],\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False),\n build_norm_layer(self.norm_cfg,\n num_channels_cur_layer[i])[1],\n nn.ReLU()))\n else:\n transition_layers.append(None)\n else:\n conv_downsamples = []\n for j in range(i + 1 - num_branches_pre):\n in_channels = num_channels_pre_layer[-1]\n out_channels = num_channels_cur_layer[i] \\\n if j == i - num_branches_pre else in_channels\n conv_downsamples.append(\n nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n in_channels,\n in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n groups=in_channels,\n bias=False),\n build_norm_layer(self.norm_cfg, in_channels)[1],\n build_conv_layer(\n self.conv_cfg,\n in_channels,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False),\n build_norm_layer(self.norm_cfg, out_channels)[1],\n nn.ReLU()))\n transition_layers.append(nn.Sequential(*conv_downsamples))\n\n return nn.ModuleList(transition_layers)\n\n def _make_stage(self,\n stages_spec,\n stage_index,\n in_channels,\n multiscale_output=True):\n num_modules = stages_spec['num_modules'][stage_index]\n num_branches = stages_spec['num_branches'][stage_index]\n num_blocks = stages_spec['num_blocks'][stage_index]\n reduce_ratio = stages_spec['reduce_ratios'][stage_index]\n with_fuse = stages_spec['with_fuse'][stage_index]\n module_type = stages_spec['module_type'][stage_index]\n\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multiscale_output and i == num_modules - 1:\n reset_multiscale_output = False\n else:\n reset_multiscale_output = True\n\n modules.append(\n LiteHRModule(\n num_branches,\n num_blocks,\n in_channels,\n reduce_ratio,\n module_type,\n multiscale_output=reset_multiscale_output,\n with_fuse=with_fuse,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n with_cp=self.with_cp))\n in_channels = modules[-1].in_channels\n\n return nn.Sequential(*modules), in_channels\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n x = self.stem(x)\n\n y_list = [x]\n for i in range(self.num_stages):\n x_list = []\n transition = getattr(self, 'transition{}'.format(i))\n for j in range(self.stages_spec['num_branches'][i]):\n if transition[j]:\n if j >= len(y_list):\n x_list.append(transition[j](y_list[-1]))\n else:\n x_list.append(transition[j](y_list[j]))\n else:\n x_list.append(y_list[j])\n y_list = getattr(self, 'stage{}'.format(i))(x_list)\n\n x = y_list\n if self.with_head:\n x = self.head_layer(x)\n\n return [x[0]]\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode.\"\"\"\n super().train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.ModuleList", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.AdaptiveAvgPool2d", "torch.utils.checkpoint.checkpoint", "torch.nn.Upsample", "torch.split", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
owenagnel/RockPaperScissors
[ "d7ac01570d925ac0bc23dc816721cedc88ac9439" ]
[ "source/gestureanalyst.py" ]
[ "'''GestureAnalyst class analyses frames and returns drawn hand\nand predicted gesture if one is found'''\nimport handtracker as ht\nimport torch\nimport model as cm\n# pylint: disable=E1101\n\nclass GestureAnalyst():\n '''Analyses frames for gestures'''\n def __init__(self, weights_location = 'source/model/model_weights.pth'):\n self.model_weights_loc = weights_location\n self.handtracker = ht.HandDetector(max_num_hands=1,min_detection_confidence=0.5,\n min_tracking_confidence=0.5)\n self.classifier = cm.get_classifier()\n self.classifier.load_state_dict(torch.load('source/model/model_weights.pth'))\n self.classifier.eval()\n\n def analyse(self, frame):\n '''returns drawn frames and gesture (as an int from 0 to 2)'''\n gesture = None\n image, positions = self.handtracker.find_position(frame)\n if positions.any():\n tensor_rep = torch.from_numpy(positions)\n gesture = torch.argmax(self.classifier(tensor_rep)).item()\n return image, gesture\n" ]
[ [ "torch.from_numpy", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Aria-K-Alethia/Semi-supervised-VAE
[ "5ede4e755515109a34d8f1bcc279c3c915a6c5ce" ]
[ "main.py" ]
[ "'''\n Copyright (c) 2020 [email protected]\n\n Description:\n train and exp code\n Licence:\n MIT\n THE USER OF THIS CODE AGREES TO ASSUME ALL LIABILITY FOR THE USE OF THIS CODE.\n Any use of this code should display all the info above.\n'''\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nimport torch.distributions as dist\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nfrom dataset import get_mnist\nfrom model import VAE, CVAE, StackedVAE, GMVAE\nfrom utils import onehot_vector\nfrom itertools import cycle\n\nparser = argparse.ArgumentParser(description='VAE MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1024, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--train', action='store_true', default=False)\nparser.add_argument('--output', type=str, default='./model/model.pt')\nparser.add_argument('--label', action='store_true', default=False)\nparser.add_argument('--alpha', type=float, default=1)\nparser.add_argument('--architecture', type=str)\nparser.add_argument('--pretrained-vae', type=str, default='./model/vae.pt')\nparser.add_argument('--labels-per-class', type=int)\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\nlabelled, unlabelled, validation = get_mnist(location=\"./data\", batch_size=args.batch_size, labels_per_class=args.labels_per_class)\n\nprev_loss = float('inf')\n\nX = 784\nY = 10\nZ = 20\nH = 400\nC = [400, 128]\nif args.architecture == 'vae':\n model = VAE(X, Y, Z, H)\nelif args.architecture == 'cvae':\n model = CVAE(X, Y, Z, H, C)\nelif args.architecture == 'stackedvae':\n vae = VAE(X, Y, Z, H)\n vae.load_state_dict(torch.load(args.pretrained_vae))\n model = StackedVAE(X, Y, Z, H, C, vae)\nelif args.architecture == 'gmvae':\n model = GMVAE(X, Y, Z, H, C)\nelse:\n raise ValueError('Model architecture {} is not defined'.format(args.architecture))\nmodel = model.to(device)\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\n\ndef train(epoch):\n model.train()\n train_loss = 0\n print('Train start, labelled: {}, unlablled: {}'.format(len(labelled), len(unlabelled)))\n if epoch == 1:\n for x, y in labelled:\n continue\n for x, y in unlabelled:\n continue\n for batch_idx, ((x, y), (u, _)) in enumerate(zip(cycle(labelled), unlabelled)):\n #for (x, y), (u, _) in zip(cycle(labelled), unlabelled):\n x = x.to(device)\n y = y.to(device)\n u = u.to(device)\n optimizer.zero_grad()\n # labelled data\n l_recon_batch, L, classification_loss, l_loss_state, l_state = model(x, y)\n u_recon_batch, U, _, u_loss_state, u_state = model(u)\n if args.architecture == 'vae':\n loss = U\n else:\n loss = L + U + args.alpha * classification_loss\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, L_BCE: {:.6f}, L_KLD: {:.6f}, L_CLAS: {:.6f}, U_BCE: {:.6f}, U_KLD: {:.6f}'.format(\n epoch, batch_idx * len(x), len(unlabelled.dataset),\n 100. * batch_idx / len(unlabelled),\n loss.item() / len(x),\n l_loss_state['reconstruction'].item() / len(x),\n l_loss_state['kl'].item() / len(x),\n l_loss_state['classification'].item() / len(x),\n u_loss_state['reconstruction'].item() / len(x),\n u_loss_state['kl'].item() / len(x)))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(unlabelled.dataset)))\n\ndef lr_schedule():\n old_lr = optimizer.param_groups[0]['lr']\n factor = 0.4\n lr = old_lr * factor\n for pg in optimizer.param_groups:\n pg['lr'] = lr\n print('Learning rate change: {} -> {}'.format(old_lr, lr))\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n outdata = []\n with torch.no_grad():\n for i, (x, y) in enumerate(validation):\n x = x.to(device)\n y = y.to(device)\n recon_batch, loss, classification_loss, loss_state, state = model(x, y)\n test_loss += loss_state['reconstruction'].item()\n if i == 0:\n n = min(x.shape[0], 8)\n comparison = torch.cat([x.view(x.shape[0], 1, 28, 28)[:n],\n recon_batch.view(x.shape[0], 1, 28, 28)[:n]])\n save_image(comparison.cpu(),\n 'results/{}_reconstruction_'.format(args.architecture) + str(epoch) + '.png', nrow=n)\n\n global prev_loss\n test_loss /= len(validation.dataset)\n if test_loss > prev_loss:\n lr_schedule()\n prev_loss = test_loss\n print('====> Test set loss: {:.4f}'.format(test_loss))\n\ndef random_sample(epoch):\n with torch.no_grad():\n if args.architecture != 'gmvae':\n sample = torch.randn(64, 20).to(device)\n y = onehot_vector(torch.randint(0, 10, (64,)), 10).to(device).type_as(sample)\n else:\n y = onehot_vector(torch.randint(0, 10, (64,)), 10).to(device).float()\n loc = model.loc(y)\n scale = model.sp(model.scale(y))\n temp_dist = dist.Independent(dist.Normal(loc, scale), 1)\n sample = temp_dist.rsample()\n if args.label:\n sample = torch.cat([sample, y], dim=1)\n sample = model.decode(sample).cpu()\n save_image(sample.view(64, 1, 28, 28),\n 'results/{}_sample_'.format(args.architecture) + str(epoch) + '.png')\n\ndef main_train():\n for epoch in range(1, args.epochs + 1):\n train(epoch)\n test(epoch)\n random_sample(epoch)\n state_dict = model.state_dict()\n torch.save(state_dict, args.output)\n\ndef analysis():\n state_dict = torch.load(args.output)\n model.load_state_dict(state_dict)\n embedding, label = None, None\n # latent variable visualization and unsupervised accuracy\n plt.figure()\n tsne = TSNE(2, 50, init='pca')\n #tsne = PCA(n_components=2, whiten=True)\n correct_count = 0\n with torch.no_grad():\n for i, (x, y) in enumerate(validation):\n x = x.to(device)\n y = y.to(device)\n recon_batch, _, _, _, state= model(x, y)\n mu = state['mean']\n if embedding is None:\n embedding = state['mean']\n label = y\n else:\n embedding = torch.cat([embedding, mu], 0)\n label = torch.cat([label, y], 0)\n if args.architecture == 'stackedvae':\n feat = model.vae.sample(x)\n logits = model.classify(feat)\n else:\n logits = model.classify(x)\n temp = torch.argmax(logits, dim=-1)\n correct_count += (torch.argmax(logits, dim=-1).squeeze() == y).sum().item()\n accuracy = correct_count / len(validation.dataset)\n print('Unsupervised accuracy: {:.2f}%'.format(accuracy * 100))\n embedding2 = embedding.cpu().numpy()\n label = label.cpu().numpy()\n label = label[:10000]\n embedding2 = embedding2[:10000]\n #pca.fit(embedding)\n #out = pca.transform(embedding)\n #print(pca.explained_variance_ratio_)\n out = tsne.fit_transform(embedding2)\n out = (out - out.min(0)) / (out.max(0) - out.min(0))\n for i in range(10):\n d = out[label==i]\n plt.scatter(d[:, 0], d[:, 1], label=str(i))\n plt.legend(loc='upper right')\n f = plt.gcf()\n f.savefig('./output/{}_{}_latent_variable.png'.format(args.architecture, args.labels_per_class))\n plt.clf()\n '''\n with torch.no_grad():\n sample = torch.diag(torch.ones(20)).to(device)\n if args.label:\n y = onehot_vector(torch.arange(10).repeat(2), 10).to(device).type_as(sample)\n sample = torch.cat([sample, y], dim=1)\n sample = model.decode(sample).cpu()\n save_image(sample.view(20, 1, 28, 28), './output/sample.png')\n buf = []\n for i in range(10):\n mu = embedding[label==i]\n mu = torch.mean(mu, 0)\n buf.append(mu)\n sample = torch.stack(buf)\n with torch.no_grad():\n if args.label:\n y = onehot_vector(torch.arange(10), 10).to(device).type_as(sample)\n sample = torch.cat([sample, y], dim=1)\n sample = model.decode(sample).cpu()\n save_image(sample.view(10, 1, 28, 28), './output/mean.png')\n '''\n buf = [-3, -1.5, 0, 1.5, 3]\n if args.architecture != 'gmvae':\n base_sample = torch.zeros(Z).to(device)\n sample = []\n with torch.no_grad():\n for i in range(Z * len(buf)):\n temp = base_sample.clone()\n temp[i//len(buf)] = buf[i%len(buf)]\n sample.append(temp)\n sample = torch.stack(sample)\n if args.label:\n y = onehot_vector(torch.cat([torch.ones(Z * len(buf) // Y) * i for i in range(Y)]), Y).to(device).type_as(sample)\n sample = torch.cat([sample, y], dim=1)\n sample = model.decode(sample).cpu()\n else:\n y = onehot_vector(torch.cat([torch.ones(Z * len(buf) // Y) * i for i in range(Y)]), Y).to(device).float()\n mean = model.loc(y)\n scale = model.scale(y)\n for i in range(y.shape[0]):\n dim = i // len(buf)\n index = i % len(buf)\n mean[i, dim] = mean[i, dim] + buf[index] * scale[i, dim]\n sample = torch.cat([mean, y], dim=1)\n sample= model.decode(sample).cpu()\n save_image(sample.view(Z * len(buf), 1, 28, 28), './output/{}_traverse.png'.format(args.architecture), nrow=2 * len(buf))\n\nif __name__ == \"__main__\":\n if args.train:\n main_train()\n else:\n analysis()\n\n" ]
[ [ "matplotlib.pyplot.legend", "torch.randint", "torch.load", "torch.cat", "torch.manual_seed", "matplotlib.pyplot.figure", "torch.argmax", "torch.zeros", "torch.stack", "torch.randn", "matplotlib.pyplot.gcf", "sklearn.manifold.TSNE", "matplotlib.pyplot.clf", "torch.no_grad", "torch.cuda.is_available", "torch.distributions.Normal", "torch.device", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
robust-ml/example
[ "6b971397c308a80d92976a4b8e4c7e4f209a9905" ]
[ "attack.py" ]
[ "import robustml\nimport sys\nimport tensorflow as tf\nimport numpy as np\n\nclass InceptionV3PGDAttack(robustml.attack.Attack):\n def __init__(self, sess, model, epsilon, max_steps=100, learning_rate=0.001, debug=False):\n self._sess = sess\n self._model = model\n self._epsilon = epsilon\n self._max_steps = max_steps\n self._learning_rate = learning_rate\n self._debug = debug\n\n self._label = tf.placeholder(tf.int32, ())\n one_hot = tf.expand_dims(tf.one_hot(self._label, 1000), axis=0)\n self._loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=model.logits, labels=one_hot)\n self._grad, = tf.gradients(self._loss, model.input)\n\n def run(self, x, y, target):\n mult = -1\n if target is None:\n target = y\n mult = 1\n adv = np.copy(x)\n lower = np.clip(x - self._epsilon, 0, 1)\n upper = np.clip(x + self._epsilon, 0, 1)\n for i in range(self._max_steps):\n p, l, g = self._sess.run(\n [self._model.predictions, self._loss, self._grad],\n {self._model.input: adv, self._label: target}\n )\n if self._debug:\n print(\n 'attack: step %d/%d, loss = %g (true %d, predicted %d)' % (i+1, self._max_steps, l, y, p),\n file=sys.stderr\n )\n if p != y:\n # we're done\n if self._debug:\n print('returning early', file=sys.stderr)\n break\n adv += mult * self._learning_rate * np.sign(g)\n adv = np.clip(adv, lower, upper)\n return adv\n\nclass InceptionV3FGSMAttack(robustml.attack.Attack):\n def __init__(self, sess, model, epsilon):\n self._sess = sess\n self._model = model\n self._epsilon = epsilon\n\n self._label = tf.placeholder(tf.int32, ())\n one_hot = tf.expand_dims(tf.one_hot(self._label, 1000), axis=0)\n self._loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=model.logits, labels=one_hot)\n self._grad, = tf.gradients(self._loss, model.input)\n\n def run(self, x, y, target):\n mult = -1\n if target is None:\n target = y\n mult = 1\n g = self._sess.run(self._grad, {self._model.input: x, self._label: y})\n adv = np.clip(x + mult * self._epsilon * np.sign(g), 0, 1)\n return adv\n\nclass NullAttack(robustml.attack.Attack):\n def run(self, x, y, target):\n return x\n" ]
[ [ "numpy.clip", "tensorflow.gradients", "tensorflow.placeholder", "numpy.sign", "numpy.copy", "tensorflow.one_hot", "tensorflow.nn.softmax_cross_entropy_with_logits_v2" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
GIS-PuppetMaster/tinyflow
[ "9351a95957b68c37f480c169ff9dd012b68b4b1e" ]
[ "tests/LeNet5/memoryTest.py" ]
[ "import numpy as np\nfrom pycode.tinyflow import autodiff as ad\nfrom pycode.tinyflow import gpu_op\nfrom pycode.tinyflow import ndarray\nfrom pycode.tinyflow import train\n\n\ndef test_dense():\n\n inputs = ad.Placeholder(\"inputs\")\n filters = ad.Variable(\"filters\")\n b = ad.Variable(\"b\")\n y_ = ad.Variable(name=\"y_\")\n\n # ini\n ctx = ndarray.gpu(0)\n x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))\n filters_val = np.ones((32, 1, 5, 5)) * 0.001\n b_val = np.ones((32))\n y_val = np.zeros((5, 1))\n x_val = ndarray.array(x_val, ctx)\n filters_val = ndarray.array(filters_val, ctx)\n y_val = ndarray.array(y_val, ctx)\n\n # outputs = ad.convolution_2d_forward_op(inputs, filters, \"NCHW\", \"VALID\", 1, 1)\n outputs = ad.conv2withbias(inputs, filters, b, \"NCHW\", \"VALID\", 1, 1)\n\n aph = 0.001\n t = train.Adam_minimize(outputs, aph)\n # outputs_pool = ad.pooling_2d_forward_op(outputs, \"NCHW\", \"max\", 0, 0, 1, 1, 2, 2)\n #outputs_relu = ad.activation_forward_op(outputs, \"NCHW\", \"relu\")\n #executor = train.TrainExecutor([outputs], ctx=ctx)\n t.init_Variable({filters: filters_val, b: b_val})\n\n for i in range(100000):\n if i % 100 ==0:\n print(i)\n loss_val = t.run(feed_dict={inputs: x_val, b: b_val})\n\n print(loss_val[0].asnumpy())\n\n# test_dense()\n\ndef test_pool():\n\n inputs = ad.Placeholder(\"inputs\")\n filters = ad.Variable(\"filters\")\n b = ad.Variable(\"b\")\n y_ = ad.Variable(name=\"y_\")\n\n # ini\n ctx = ndarray.gpu(0)\n x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))\n filters_val = np.ones((32, 1, 5, 5)) * 0.001\n b_val = np.ones((32))\n y_val = np.zeros((5, 1))\n x_val = ndarray.array(x_val, ctx)\n filters_val = ndarray.array(filters_val, ctx)\n y_val = ndarray.array(y_val, ctx)\n\n # outputs = ad.convolution_2d_forward_op(inputs, filters, \"NCHW\", \"VALID\", 1, 1)\n outputs = ad.conv2withbias(inputs, filters, b, \"NCHW\", \"VALID\", 1, 1)\n outputs_pool = ad.pooling_2d_forward_op(outputs, \"NCHW\", \"max\", 0, 0, 1, 1, 2, 2)\n\n aph = 0.001\n t = train.Adam_minimize(outputs_pool, aph)\n\n #outputs_relu = ad.activation_forward_op(outputs, \"NCHW\", \"relu\")\n #executor = train.TrainExecutor([outputs], ctx=ctx)\n t.init_Variable({filters: filters_val, b: b_val})\n\n for i in range(100000):\n if i % 100 ==0:\n print(i)\n loss_val = t.run(feed_dict={inputs: x_val, b: b_val})\n\n print(loss_val[0].asnumpy())\n\n# test_pool()\n\ndef test_bn():\n\n inputs = ad.Placeholder(\"inputs\")\n filters = ad.Variable(\"filters\")\n b = ad.Variable(\"b\")\n y_ = ad.Variable(name=\"y_\")\n\n # ini\n ctx = ndarray.gpu(0)\n x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))\n filters_val = np.ones((32, 1, 5, 5)) * 0.001\n b_val = np.ones((32))\n y_val = np.zeros((5, 1))\n x_val = ndarray.array(x_val, ctx)\n filters_val = ndarray.array(filters_val, ctx)\n y_val = ndarray.array(y_val, ctx)\n\n # outputs = ad.convolution_2d_forward_op(inputs, filters, \"NCHW\", \"VALID\", 1, 1)\n outputs = ad.conv2withbias(inputs, filters, b, \"NCHW\", \"VALID\", 1, 1)\n outputs_pool = ad.pooling_2d_forward_op(outputs, \"NCHW\", \"max\", 0, 0, 1, 1, 2, 2)\n outputs_bn = ad.bn_forward_op(outputs_pool, \"NCHW\", \"pre_activation\")\n\n aph = 0.001\n t = train.Adam_minimize(outputs_bn, aph)\n\n #outputs_relu = ad.activation_forward_op(outputs, \"NCHW\", \"relu\")\n #executor = train.TrainExecutor([outputs], ctx=ctx)\n t.init_Variable({filters: filters_val, b: b_val})\n\n for i in range(100000):\n if i % 100 ==0:\n print(i)\n loss_val = t.run(feed_dict={inputs: x_val, b: b_val})\n\n print(loss_val[0].asnumpy())\n\n# test_bn()\n\ndef test_flat():\n\n inputs = ad.Placeholder(\"inputs\")\n filters = ad.Variable(\"filters\")\n b = ad.Variable(\"b\")\n y_ = ad.Variable(name=\"y_\")\n\n # ini\n ctx = ndarray.gpu(0)\n x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))\n filters_val = np.ones((32, 1, 5, 5)) * 0.001\n b_val = np.ones((32))\n y_val = np.zeros((5, 1))\n x_val = ndarray.array(x_val, ctx)\n filters_val = ndarray.array(filters_val, ctx)\n y_val = ndarray.array(y_val, ctx)\n\n # outputs = ad.convolution_2d_forward_op(inputs, filters, \"NCHW\", \"VALID\", 1, 1)\n outputs = ad.conv2withbias(inputs, filters, b, \"NCHW\", \"VALID\", 1, 1)\n outputs_pool = ad.pooling_2d_forward_op(outputs, \"NCHW\", \"max\", 0, 0, 1, 1, 2, 2)\n outputs_flat = ad.flatten_op(outputs_pool)\n\n aph = 0.001\n t = train.Adam_minimize(outputs_flat, aph)\n\n #outputs_relu = ad.activation_forward_op(outputs, \"NCHW\", \"relu\")\n #executor = train.TrainExecutor([outputs], ctx=ctx)\n t.init_Variable({filters: filters_val, b: b_val})\n\n for i in range(100000):\n if i % 100 ==0:\n print(i)\n loss_val = t.run(feed_dict={inputs: x_val, b: b_val})\n\n print(loss_val[0].asnumpy())\n\n# test_flat()\n\ndef test_bnfully():\n\n inputs = ad.Placeholder(\"inputs\")\n filters = ad.Variable(\"filters\")\n b = ad.Variable(\"b\")\n y_ = ad.Variable(name=\"y_\")\n\n # ini\n ctx = ndarray.gpu(0)\n x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))\n filters_val = np.ones((32, 1, 5, 5)) * 0.001\n b_val = np.ones((32))\n y_val = np.zeros((5, 1))\n x_val = ndarray.array(x_val, ctx)\n filters_val = ndarray.array(filters_val, ctx)\n y_val = ndarray.array(y_val, ctx)\n\n # outputs = ad.convolution_2d_forward_op(inputs, filters, \"NCHW\", \"VALID\", 1, 1)\n outputs = ad.conv2withbias(inputs, filters, b, \"NCHW\", \"VALID\", 1, 1)\n outputs_pool = ad.pooling_2d_forward_op(outputs, \"NCHW\", \"max\", 0, 0, 1, 1, 2, 2)\n outputs_flat = ad.flatten_op(outputs_pool)\n outputs_bn = ad.fullybn_forward_op(outputs_flat, \"NCHW\")\n\n aph = 0.001\n t = train.Adam_minimize(outputs_bn, aph)\n\n #outputs_relu = ad.activation_forward_op(outputs, \"NCHW\", \"relu\")\n #executor = train.TrainExecutor([outputs], ctx=ctx)\n t.init_Variable({filters: filters_val, b: b_val})\n\n for i in range(100000):\n if i % 100 ==0:\n print(i)\n loss_val = t.run(feed_dict={inputs: x_val, b: b_val})\n\n print(loss_val[0].asnumpy())\n\n# test_bnfully()\n\ndef test_matmul():\n\n inputs = ad.Placeholder(\"inputs\")\n w = ad.Variable(\"w\")\n b = ad.Variable(\"b\")\n y_ = ad.Variable(name=\"y_\")\n\n # ini\n ctx = ndarray.gpu(0)\n x_val = np.linspace(0, 1000, 9000000).reshape((3000, 3000))\n w_val = np.linspace(0, 1000, 9000000).reshape((3000, 3000))\n b_val = np.ones((32))\n y_val = np.zeros((5, 1))\n x_val = ndarray.array(x_val, ctx)\n w_val = ndarray.array(w_val, ctx)\n y_val = ndarray.array(y_val, ctx)\n\n xw = ad.matmul_op(inputs, w)\n\n aph = 0.001\n t = train.Adam_minimize(xw, aph)\n\n #outputs_relu = ad.activation_forward_op(outputs, \"NCHW\", \"relu\")\n #executor = train.TrainExecutor([outputs], ctx=ctx)\n t.init_Variable({w: w_val, b: b_val})\n\n for i in range(100000):\n if i % 100 ==0:\n print(i)\n loss_val = t.run(feed_dict={inputs: x_val, b: b_val})\n\n print(loss_val[0].asnumpy())\n\ntest_matmul()" ]
[ [ "numpy.zeros", "numpy.linspace", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yyc9268/sealion_count
[ "96237e54f04a5282d1e9757a823adbb0ccddae4e" ]
[ "cnn.py" ]
[ "# NOAA SEALION COUNTING CHALLENGE COMPETITION CODE 58th/385\r\n# Author : Young-chul Yoon\r\n\r\nimport tensorflow as tf\r\n\r\n############################### CNN materials ###########################################\r\ndef weight_variable(shape, w_name):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial, name=w_name)\r\ndef bias_variable(shape, b_name):\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial, name = b_name)\r\ndef conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\r\ndef max_pool_2x2(x):\r\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n\r\n#def batch_normalization(x, w, b, is_training):\r\n #z = conv2d(x, w) + b\r\n #return tf.layers.batch_normalization(z, center=False, scale=False, training=is_training)\r\n\r\ndef fc_mul(flat_input, input_size, fc_size, w_name, b_name):\r\n W_fc = weight_variable([input_size, fc_size], w_name)\r\n b_fc = bias_variable([fc_size], b_name)\r\n fc_out = tf.matmul(flat_input, W_fc) + b_fc\r\n return fc_out\r\n\r\ndef fc_drop(flat_input, input_size, fc_size, keep_prob, w_name, b_name):\r\n fc_out = fc_mul(flat_input, input_size, fc_size, w_name, b_name)\r\n fc_relu = tf.nn.relu(fc_out)\r\n fc_drop = tf.nn.dropout(fc_relu, keep_prob)\r\n return fc_drop\r\n\r\ndef batch_normalization(x, n_out, phase_train):\r\n \"\"\"\r\n Batch normalization on convolutional maps.\r\n Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow\r\n Args:\r\n x: Tensor, 4D BHWD input maps\r\n n_out: integer, depth of input maps\r\n phase_train: boolean tf.Varialbe, true indicates training phase\r\n scope: string, variable scope\r\n Return:\r\n normed: batch-normalized maps\r\n \"\"\"\r\n with tf.variable_scope('bn'):\r\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\r\n name='beta', trainable=True)\r\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\r\n name='gamma', trainable=True)\r\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\r\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\r\n\r\n def mean_var_with_update():\r\n ema_apply_op = ema.apply([batch_mean, batch_var])\r\n with tf.control_dependencies([ema_apply_op]):\r\n return tf.identity(batch_mean), tf.identity(batch_var)\r\n\r\n mean, var = tf.cond(phase_train,\r\n mean_var_with_update,\r\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\r\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\r\n return normed\r\n##########################################################################################\r\n\r\ndef featureExtract(img, ft_sz, is_training, sz):\r\n W_conv1 = weight_variable([ft_sz, ft_sz, 3, 64], sz + \"_det_w1\") # patch size, in_channel, out_channel\r\n b_conv1 = bias_variable([64], sz + \"_det_b1\") # bias vector with a component for each out_channel\r\n out_conv1_1 = conv2d(img, W_conv1) + b_conv1\r\n batch1_1 = batch_normalization(out_conv1_1, 64, is_training)\r\n relu_out1_1 = tf.nn.relu(batch1_1)\r\n\r\n W_re1 = weight_variable([ft_sz, ft_sz, 64, 64], sz + \"_det_w_re1\") #\r\n b_re1 = bias_variable([64], sz + \"_det_b_re1\")\r\n out_conv1_2 = conv2d(relu_out1_1, W_re1) + b_re1\r\n batch1_2 = batch_normalization(out_conv1_2, 64, is_training)\r\n relu_out1_2 = tf.nn.relu(batch1_2) #\r\n h_pool1 = max_pool_2x2(relu_out1_2) #\r\n\r\n out_size = 8 * 8 * 64\r\n\r\n h_pool2_flat = tf.reshape(h_pool1, [-1, out_size])\r\n\r\n return h_pool2_flat, out_size\r\n\r\ndef deepDetect(img, ft_sz, is_training):\r\n\r\n W_conv1 = weight_variable([ft_sz, ft_sz, 3, 64], \"det_w1\") # patch size, in_channel, out_channel\r\n b_conv1 = bias_variable([64], \"det_b1\") # bias vector with a component for each out_channel\r\n out_conv1_1 = conv2d(img, W_conv1) + b_conv1\r\n batch1_1 = batch_normalization(out_conv1_1, 64, is_training)\r\n relu_out1_1 = tf.nn.relu(batch1_1)\r\n\r\n W_re1 = weight_variable([ft_sz, ft_sz, 64, 64], \"det_w_re1\") #\r\n b_re1 = bias_variable([64], \"det_b_re1\")\r\n out_conv1_2 = conv2d(relu_out1_1, W_re1) + b_re1\r\n batch1_2 = batch_normalization(out_conv1_2, 64, is_training)\r\n relu_out1_2 = tf.nn.relu(batch1_2) #\r\n h_pool1 = max_pool_2x2(relu_out1_2) #\r\n\r\n W_conv2 = weight_variable([ft_sz, ft_sz, 64, 128], \"det_w2\")\r\n b_conv2 = bias_variable([128], \"det_b2\")\r\n out_conv2_1 = conv2d(h_pool1, W_conv2) + b_conv2\r\n batch2_1 = batch_normalization(out_conv2_1, 128, is_training)\r\n relu_out2_1 = tf.nn.relu(batch2_1)\r\n\r\n W_re2 = weight_variable([ft_sz, ft_sz, 128, 128], \"det_w_re2\") #\r\n b_re2 = bias_variable([128], \"det_b_re2\")\r\n out_conv2_2 = conv2d(relu_out2_1, W_re2) + b_re2\r\n batch2_2 = batch_normalization(out_conv2_2, 128, is_training)\r\n relu_out2_2 = tf.nn.relu(batch2_2) #\r\n h_pool2 = max_pool_2x2(relu_out2_2) #\r\n\r\n W_conv3 = weight_variable([ft_sz, ft_sz, 128, 256], \"det_w3\")\r\n b_conv3 = bias_variable([256], \"det_b3\")\r\n out_conv3_1 = conv2d(h_pool2, W_conv3) + b_conv3\r\n batch3_1 = batch_normalization(out_conv3_1, 256, is_training)\r\n relu_out3_1 = tf.nn.relu(batch3_1)\r\n\r\n W_re3 = weight_variable([ft_sz, ft_sz, 256, 256], \"det_w_re3\") #\r\n b_re3 = bias_variable([256], \"det_b_re3\")\r\n out_conv3_2 = conv2d(relu_out3_1, W_re3) + b_re3\r\n batch3_2 = batch_normalization(out_conv3_2, 256, is_training)\r\n relu_out3_2 = tf.nn.relu(batch3_2) #\r\n W_rere3 = weight_variable([ft_sz, ft_sz, 256, 256], \"det_w_rere3\")\r\n b_rere3 = bias_variable([256], \"det_b_rere3\")\r\n out_conv3_3 = conv2d(relu_out3_2, W_rere3) + b_rere3\r\n batch3_3 = batch_normalization(out_conv3_3, 256, is_training)\r\n relu_out3_3 = tf.nn.relu(batch3_3) #\r\n h_pool3 = max_pool_2x2(relu_out3_3) #\r\n\r\n out_size = 4*4*256\r\n\r\n h_pool3_flat = tf.reshape(h_pool3, [-1, out_size])\r\n\r\n return h_pool3_flat, out_size\r\n\r\ndef deepClass(img, ft_sz, is_training):\r\n\r\n W_conv1 = weight_variable([ft_sz, ft_sz, 3, 64], \"cls_w1\") # patch size, in_channel, out_channel\r\n b_conv1 = bias_variable([64], \"cls_b1\") # bias vector with a component for each out_channel\r\n out_conv1_1 = conv2d(img, W_conv1) + b_conv1\r\n batch1_1 = batch_normalization(out_conv1_1, 64, is_training)\r\n relu_out1_1 = tf.nn.relu(batch1_1)\r\n\r\n W_re1 = weight_variable([ft_sz, ft_sz, 64, 64], \"cls_w_re1\") #\r\n b_re1 = bias_variable([64], \"cls_b_re1\")\r\n out_conv1_2 = conv2d(relu_out1_1, W_re1) + b_re1\r\n batch1_2 = batch_normalization(out_conv1_2, 64, is_training)\r\n relu_out1_2 = tf.nn.relu(batch1_2) #\r\n h_pool1 = max_pool_2x2(relu_out1_2) #\r\n\r\n W_conv2 = weight_variable([ft_sz, ft_sz, 64, 128], \"cls_w2\")\r\n b_conv2 = bias_variable([128], \"cls_b2\")\r\n out_conv2_1 = conv2d(h_pool1, W_conv2) + b_conv2\r\n batch2_1 = batch_normalization(out_conv2_1, 128, is_training)\r\n relu_out2_1 = tf.nn.relu(batch2_1)\r\n\r\n W_re2 = weight_variable([ft_sz, ft_sz, 128, 128], \"cls_w_re2\") #\r\n b_re2 = bias_variable([128], \"cls_b_re2\")\r\n out_conv2_2 = conv2d(relu_out2_1, W_re2) + b_re2\r\n batch2_2 = batch_normalization(out_conv2_2, 128, is_training)\r\n relu_out2_2 = tf.nn.relu(batch2_2) #\r\n h_pool2 = max_pool_2x2(relu_out2_2) #\r\n\r\n W_conv3 = weight_variable([ft_sz, ft_sz, 128, 256], \"cls_w3\")\r\n b_conv3 = bias_variable([256], \"cls_b3\")\r\n out_conv3_1 = conv2d(h_pool2, W_conv3) + b_conv3\r\n batch3_1 = batch_normalization(out_conv3_1, 256, is_training)\r\n relu_out3_1 = tf.nn.relu(batch3_1)\r\n\r\n W_re3 = weight_variable([ft_sz, ft_sz, 256, 256], \"cls_w_re3\") #\r\n b_re3 = bias_variable([256], \"cls_b_re3\")\r\n out_conv3_2 = conv2d(relu_out3_1, W_re3) + b_re3\r\n batch3_2 = batch_normalization(out_conv3_2, 256, is_training)\r\n relu_out3_2 = tf.nn.relu(batch3_2) #\r\n W_rere3 = weight_variable([ft_sz, ft_sz, 256, 256], \"cls_w_rere3\")\r\n b_rere3 = bias_variable([256], \"cls_b_rere3\")\r\n out_conv3_3 = conv2d(relu_out3_2, W_rere3) + b_rere3\r\n batch3_3 = batch_normalization(out_conv3_3, 256, is_training)\r\n relu_out3_3 = tf.nn.relu(batch3_3) #\r\n h_pool3 = max_pool_2x2(relu_out3_3) #\r\n\r\n out_size = 4 * 4 * 256\r\n\r\n h_pool3_flat = tf.reshape(h_pool3, [-1, out_size])\r\n\r\n return h_pool3_flat, out_size\r\n\r\ndef deepDetectNet(batch, class_num, keep_prob, is_training):\r\n conv_out, out_size = deepDetect(batch, 3, is_training)\r\n fc_drop1 = fc_drop(conv_out, out_size, 2048, keep_prob, \"det_fc_w1\", \"det_fc_b1\")\r\n fc_drop2 = fc_drop(fc_drop1, 2048, 1024, keep_prob, \"det_fc_w2\", \"det_fc_b2\")\r\n y_conv = fc_mul(fc_drop2, 1024, class_num, \"det_fc_w3\", \"det_fc_b3\")\r\n soft_out = tf.nn.softmax(y_conv)\r\n return y_conv, soft_out\r\n\r\ndef deepClassNet(batch, class_num, keep_prob, is_training):\r\n conv_out, out_size = deepClass(batch, 3, is_training)\r\n fc_drop1 = fc_drop(conv_out, out_size, 2048, keep_prob, \"cls_fc_w1\", \"det_fc_b1\")\r\n fc_drop2 = fc_drop(fc_drop1, 2048, 1024, keep_prob, \"cls_fc_w2\", \"cls_fc_b2\")\r\n y_conv = fc_mul(fc_drop2, 1024, class_num, \"cls_fc_w3\", \"cls_fc_b3\")\r\n soft_out = tf.nn.softmax(y_conv)\r\n return y_conv, soft_out\r\n\r\ndef deepConcatNet(s_batch, m_batch, b_batch, class_num, keep_prob, is_training):\r\n small_out, small_size = featureExtract(s_batch, 3, is_training, \"small\")\r\n med_out, med_size = featureExtract(m_batch, 3, is_training, \"med\")\r\n big_out, big_size = featureExtract(b_batch, 3, is_training, \"big\")\r\n\r\n tot_sz = small_size + med_size + big_size\r\n concat_out = tf.concat([small_out,med_out, big_out], 1)\r\n\r\n fc_drop1 = fc_drop(concat_out, tot_sz, 1024, keep_prob, \"concat_fc_w1\", \"concat_fc_w2\")\r\n y_conv = fc_mul(fc_drop1, 1024, class_num, \"concat_fc_w2\", \"concat_fc_b2\")\r\n soft_out = tf.nn.softmax(y_conv)\r\n return y_conv, soft_out\r\n\r\ndef trainStep(y_conv, gt):\r\n cross_entropy = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y_conv))\r\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\r\n return train_step\r\n\r\ndef accuracy(y_conv, gt):\r\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(gt, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n return accuracy" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.nn.max_pool", "tensorflow.cast", "tensorflow.train.ExponentialMovingAverage", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "tensorflow.Variable", "tensorflow.nn.moments", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.nn.batch_normalization", "tensorflow.truncated_normal", "tensorflow.identity", "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.reshape", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
YangTaoCN/IntroNeuralNetworks
[ "45b0311f85c9cdd9d3f0806e0059201e2655697f" ]
[ "backtest.py" ]
[ "import pandas_datareader.data as pdr\nimport yfinance as fix\nimport numpy as np\nfix.pdr_override()\n\n\ndef back_test(strategy, seq_len, ticker, start_date, end_date, dim):\n \"\"\"\n A simple back test for a given date period\n :param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.\n :param seq_len: length of the days used for prediction\n :param ticker: company ticker\n :param start_date: starting date\n :type start_date: \"YYYY-mm-dd\"\n :param end_date: ending date\n :type end_date: \"YYYY-mm-dd\"\n :param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP\n :type dim: tuple\n :return: Percentage errors array that gives the errors for every test in the given date range\n \"\"\"\n data = pdr.get_data_yahoo(ticker, start_date, end_date)\n stock_data = data[\"Adj Close\"]\n errors = []\n for i in range((len(stock_data) // 10) * 10 - seq_len - 1):\n x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200\n y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200\n predict = strategy.predict(x)\n while predict == 0:\n predict = strategy.predict(x)\n error = (predict - y) / 100\n errors.append(error)\n total_error = np.array(errors)\n print(f\"Average error = {total_error.mean()}\")\n # If you want to see the full error list then print the following statement\n # print(errors)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pangyuteng/aigonewrong
[ "98a2c7a172be4664fc372d581cef5f23cf317b51" ]
[ "puzzles/planb/archive/mypandas.py" ]
[ "import pandas as pd\n\nd = {}\n\n# transform each row\ndef transform(row):\n l = row.iloc[0]\n v=l.strip('\\n').split(\" \")\n b=int(v[0])\n i=int(v[2])\n o=int(v[3])\n for x in range(4+i,4+i+o):\n d[v[x+o]]=(b,float(v[x]))\n for y in range(4,4+i):\n del d[v[y]]\n\ndf = pd.read_csv('data_test',header=None)\ndf.apply(transform,axis=1)\n\nprint(str(d)[-256:])\nimport hashlib\nm = hashlib.sha256()\nm.update(str(d).encode('utf-8'))\nprint(len(d))\nprint(m.digest().hex())\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
itsraina/insightface
[ "e78eee59caff3515a4db467976cf6293aba55035" ]
[ "reconstruction/ostec/external/stylegan2/run_generator.py" ]
[ "# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\nimport argparse\nimport numpy as np\nimport PIL.Image\nimport dnnlib\nimport dnnlib.tflib as tflib\nimport re\nimport sys\n\nimport pretrained_networks\n\n#----------------------------------------------------------------------------\n\ndef generate_images(network_pkl, seeds, truncation_psi):\n print('Loading networks from \"%s\"...' % network_pkl)\n _G, _D, Gs = pretrained_networks.load_networks(network_pkl)\n noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]\n\n Gs_kwargs = dnnlib.EasyDict()\n Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_kwargs.randomize_noise = False\n if truncation_psi is not None:\n Gs_kwargs.truncation_psi = truncation_psi\n\n for seed_idx, seed in enumerate(seeds):\n print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))\n rnd = np.random.RandomState(seed)\n z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]\n tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]\n images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]\n PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))\n\n#----------------------------------------------------------------------------\n\ndef style_mixing_example(network_pkl, row_seeds, col_seeds, truncation_psi, col_styles, minibatch_size=4):\n print('Loading networks from \"%s\"...' % network_pkl)\n _G, _D, Gs = pretrained_networks.load_networks(network_pkl)\n w_avg = Gs.get_var('dlatent_avg') # [component]\n\n Gs_syn_kwargs = dnnlib.EasyDict()\n Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_syn_kwargs.randomize_noise = False\n Gs_syn_kwargs.minibatch_size = minibatch_size\n\n print('Generating W vectors...')\n all_seeds = list(set(row_seeds + col_seeds))\n all_z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:]) for seed in all_seeds]) # [minibatch, component]\n all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]\n w1 = np.load('latent_representations/im_right_01.npy')\n w2 = np.load('latent_representations/04753.000002.02_C_01.npy')\n all_w = np.stack([w1, w2],0)\n all_w = w_avg + (all_w - w_avg) * truncation_psi # [minibatch, layer, component]\n w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))} # [layer, component]\n\n print('Generating images...')\n all_images = Gs.components.synthesis.run(all_w, **Gs_syn_kwargs) # [minibatch, height, width, channel]\n image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}\n\n print('Generating style-mixed images...')\n for row_seed in row_seeds:\n for col_seed in col_seeds:\n w = w_dict[row_seed].copy()\n w[col_styles] = w_dict[col_seed][col_styles]\n image = Gs.components.synthesis.run(w[np.newaxis], **Gs_syn_kwargs)[0]\n image_dict[(row_seed, col_seed)] = image\n\n print('Saving images...')\n for (row_seed, col_seed), image in image_dict.items():\n PIL.Image.fromarray(image, 'RGB').save(dnnlib.make_run_dir_path('%d-%d.png' % (row_seed, col_seed)))\n\n print('Saving image grid...')\n _N, _C, H, W = Gs.output_shape\n canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')\n for row_idx, row_seed in enumerate([None] + row_seeds):\n for col_idx, col_seed in enumerate([None] + col_seeds):\n if row_seed is None and col_seed is None:\n continue\n key = (row_seed, col_seed)\n if row_seed is None:\n key = (col_seed, col_seed)\n if col_seed is None:\n key = (row_seed, row_seed)\n canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))\n canvas.save(dnnlib.make_run_dir_path('grid.png'))\n\n#----------------------------------------------------------------------------\n\ndef _parse_num_range(s):\n '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''\n\n range_re = re.compile(r'^(\\d+)-(\\d+)$')\n m = range_re.match(s)\n if m:\n return range(int(m.group(1)), int(m.group(2))+1)\n vals = s.split(',')\n return [int(x) for x in vals]\n\n#----------------------------------------------------------------------------\n\n_examples = '''examples:\n\n # Generate ffhq uncurated images (matches paper Figure 12)\n python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=6600-6625 --truncation-psi=0.5\n\n # Generate ffhq curated images (matches paper Figure 11)\n python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=66,230,389,1518 --truncation-psi=1.0\n\n # Generate uncurated car images (matches paper Figure 12)\n python %(prog)s generate-images --network=gdrive:networks/stylegan2-car-config-f.pkl --seeds=6000-6025 --truncation-psi=0.5\n\n # Generate style mixing example (matches style mixing video clip)\n python %(prog)s style-mixing-example --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --row-seeds=85,100,75,458,1500 --col-seeds=55,821,1789,293 --truncation-psi=1.0\n'''\n\n#----------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(\n description='''StyleGAN2 generator.\n\nRun 'python %(prog)s <subcommand> --help' for subcommand help.''',\n epilog=_examples,\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n\n subparsers = parser.add_subparsers(help='Sub-commands', dest='command')\n\n parser_generate_images = subparsers.add_parser('generate-images', help='Generate images')\n parser_generate_images.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)\n parser_generate_images.add_argument('--seeds', type=_parse_num_range, help='List of random seeds', required=True)\n parser_generate_images.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)\n parser_generate_images.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')\n\n parser_style_mixing_example = subparsers.add_parser('style-mixing-example', help='Generate style mixing video')\n parser_style_mixing_example.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)\n parser_style_mixing_example.add_argument('--row-seeds', type=_parse_num_range, help='Random seeds to use for image rows', required=True)\n parser_style_mixing_example.add_argument('--col-seeds', type=_parse_num_range, help='Random seeds to use for image columns', required=True)\n parser_style_mixing_example.add_argument('--col-styles', type=_parse_num_range, help='Style layer range (default: %(default)s)', default='0-6')\n parser_style_mixing_example.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)\n parser_style_mixing_example.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')\n\n args = parser.parse_args()\n kwargs = vars(args)\n subcmd = kwargs.pop('command')\n\n if subcmd is None:\n print ('Error: missing subcommand. Re-run with --help for usage.')\n sys.exit(1)\n\n sc = dnnlib.SubmitConfig()\n sc.num_gpus = 1\n sc.submit_target = dnnlib.SubmitTarget.LOCAL\n sc.local.do_not_copy_source_files = True\n sc.run_dir_root = kwargs.pop('result_dir')\n sc.run_desc = subcmd\n\n func_name_map = {\n 'generate-images': 'run_generator.generate_images',\n 'style-mixing-example': 'run_generator.style_mixing_example'\n }\n dnnlib.submit_run(sc, func_name_map[subcmd], **kwargs)\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.load", "numpy.random.RandomState", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ddr95070/RMIsaac
[ "ee3918f685f0a88563248ddea11d089581077973" ]
[ "sdk/packages/object_pose_estimation/apps/pose_cnn_decoder/evaluation/pose_evaluation_utils.py" ]
[ "'''\nCopyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n\nNVIDIA CORPORATION and its licensors retain all intellectual property\nand proprietary rights in and to this software, related documentation\nand any modifications thereto. Any use, reproduction, disclosure or\ndistribution of this software and related documentation without an express\nlicense agreement from NVIDIA CORPORATION is strictly prohibited.\n'''\n\nfrom packages.object_pose_estimation.apps.pose_cnn_decoder.evaluation.cuboid import Cuboid3d\nfrom matplotlib import pyplot as plt\n\nimport cv2\nimport math\nimport matplotlib.animation as animation\nimport numpy as np\nimport quaternion\n\n\ndef save_animation_from_image_list(image_list, save_animation_path):\n \"\"\"\n Saves a stream of images received as animation.\n\n Args:\n image_list: ndarray of image tensors to be displayed in stream\n save_animation_path: Path to save the animation of stream of images\n \"\"\"\n fig = plt.figure()\n im = plt.imshow(image_list[0])\n\n def updatefig(array, *args):\n im.set_array(array)\n return im,\n\n ani = animation.FuncAnimation(fig, func=updatefig, frames=image_list, interval=100, blit=True)\n ani.save(save_animation_path, writer='ffmpeg')\n\n\ndef compute_median_pose_errors(rotation_err, translation_err):\n \"\"\"\n Returns the median error of the rotation (index 0) and translation errors (indices 1-3)\n as list of 4 values.\n\n Args:\n translation_err: Translation error of dimensions [num_data x 3]\n rotation_err: Rotation errors of dimensions [num_data x 1]\n \"\"\"\n median_trans_err = np.median(translation_err, axis=0)\n median_rot_err = np.median(rotation_err)\n return [median_rot_err, median_trans_err[0], median_trans_err[1], median_trans_err[2]]\n\n\ndef compute_accuracy(translation_err, rotation_err, translation_err_threshold,\n rotation_err_threshold):\n \"\"\"\n Returns the accuracy metric of the model which is fraction of data within the specified\n error thresholds\n Ref: PoseCNN-https://arxiv.org/pdf/1711.00199.pdf\n\n Args:\n translation_err: Translation error of dimensions [num_data x 3]\n rotation_err: Rotation errors of dimensions [num_data x 1]\n translation_err_threshold: Tuple of size 3 specifying thresholds for the translation errors\n rotation_err_threshold: Scalar specifying threshold for the rotation error in degrees.\n \"\"\"\n positive_ind = (np.where((translation_err[:, 0] <= translation_err_threshold[0]) & \\\n (translation_err[:, 1] <= translation_err_threshold[1]) & \\\n (translation_err[:, 2] <= translation_err_threshold[2]) & \\\n (rotation_err < rotation_err_threshold)))[0].tolist()\n return float(len(positive_ind)) / translation_err.shape[0]\n\n\ndef quaternion_to_ndarray(q):\n \"\"\"\n Converts the numpy Quaternion to ndarray\n\n Args:\n q: Numpy quaternion in order (q.w, q.x, q.y, q.z)\n \"\"\"\n return np.array([q.w, q.x, q.y, q.z])\n\n\ndef compute_roi_abs_translation_error(predicted_pose, gt_pose, depth_roi=None):\n \"\"\"\n Returns absolute error of the translation x, y an d z positions, given\n ndarrays of predicted and ground truth poses.\n If region of interest (ROI) for depth/distance from camera is given as input,\n only errors for samples inside ROI are returned.\n\n Args:\n predicted_pose: ndarray of predcited pose of dimensions [num_samples, 3]\n gt_pose: ndarray of ground truth pose of dimensions [num_samples, 3]\n depth_roi: Tuple of minimum and maximum depth values for depth.\n \"\"\"\n region_of_interest = range(predicted_pose.shape[0])\n if (depth_roi is not None):\n roi_index = np.where((gt_pose[:,6] > depth_roi[0]) & (gt_pose[:,6] \\\n <= depth_roi[1]))[0].tolist()\n if (len(roi_index) > 0):\n # If no sample in region of interest, compute error over full region\n region_of_interest = roi_index\n\n return np.abs(gt_pose[region_of_interest, 4:7] - predicted_pose[region_of_interest, 4:7])\n\n\ndef compute_roi_rotation_error(predicted_pose,\n gt_pose,\n symm_axis,\n num_rotation_symmetry,\n depth_roi=None):\n \"\"\"\n Returns rotation error between two input quaternions in degrees by taking all\n the symmetric rotations of the object into account.\n If region of interest for depth/distance from camera is given as input,\n only errors for samples inside ROI are returned.\n\n Args:\n predicted_pose: ndarray of predcited pose of dimensions [num_samples, 3]\n gt_pose: ndarray of ground truth pose of dimensions [num_samples, 3]\n symm_axis: Rotation axis of symmetry of the object\n num_rotation_symmetry: Number of rotations of symmetry in the given symmetry axis.\n depth_roi: Tuple of minimum and maximum depth values for depth.\n \"\"\"\n rotation_err = []\n for i in range(predicted_pose.shape[0]):\n # Get all symmetric ground truth rotations with default axis of symmetry\n # TODO(Sravya): Extend the capability to handle multiple axes of symmetry\n gt_symm_rotations = compute_symmetry_rotations(gt_pose[i, :4], symm_axis,\\\n num_rotation_symmetry)\n rotation_err.append(compute_rotation_err(gt_symm_rotations, predicted_pose[i, :4]))\n # Convert rotation errors from radians to degrees\n rotation_err = np.asarray(rotation_err) * 180 / np.pi\n region_of_interest = range(predicted_pose.shape[0])\n if (depth_roi is not None):\n roi_index = np.where((gt_pose[:,6] > depth_roi[0]) & (gt_pose[:,6] \\\n <= depth_roi[1]))[0].tolist()\n if (len(roi_index) > 0):\n # If no sample in region of interest, compute error over full region\n region_of_interest = roi_index\n return rotation_err[region_of_interest]\n\n\ndef compute_symmetry_rotations(ref, symmetry_axis, num_rotation_symmetry):\n \"\"\"\n Returns the list of all symmetric rotations of input rotation\n given the symmetry axis and the number of rotation symmetries.\n\n Args:\n ref: input rotation in quaternion order (q.w, q.x, q.y, q.z)\n symmetry_axis: 0 for x-axis, 1 for y-axis and 2 for z-axis\n num_rotation_symmetry: Number of rotation symmetries about an axis\n \"\"\"\n symm_rotations = np.zeros((num_rotation_symmetry, 4))\n unit_rotation = np.zeros((3))\n unit_rotation[num_rotation_symmetry] = 1\n for i_rot_symm in range(num_rotation_symmetry):\n symm_rotation_angle = 2 * math.pi * i_rot_symm / num_rotation_symmetry\n rotation_z = quaternion.from_rotation_vector((symm_rotation_angle * \\\n unit_rotation).tolist())\n quat_rotate = quaternion.from_float_array(ref) * rotation_z\n symm_rotations[i_rot_symm, :] = quaternion_to_ndarray(quat_rotate)\n return symm_rotations\n\n\ndef compute_rotation_err(ground_truth, predictions):\n \"\"\"\n Returns the minimum of the quaternion rotation error of the predicted rotation\n across all ground truth rotations\n\n Args:\n ground_truth: ndarray of all ground truth rotations as quaternions including\n object symmetries.\n predictions: ndarray of quaternion rotation predicted by the model\n \"\"\"\n num_rotations = ground_truth.shape[0]\n rotation_err = 2 * np.pi\n for i in range(num_rotations):\n rotation_err = np.minimum(\n rotation_err, compute_angle_between_rotations(ground_truth[i, :], predictions))\n return rotation_err\n\n\ndef compute_angle_between_rotations(rotation_lhs, rotation_rhs):\n \"\"\"\n Returns the angle in rad between two input rotations as quaternions\n\n Args:\n rotation_lhs, rotation_rhs: Ndarrays of the two quaternion rotations in order\n (q.w, q.x, q.y, q.z)\n \"\"\"\n rotation_lhs /= np.linalg.norm(rotation_lhs)\n rotation_rhs /= np.linalg.norm(rotation_rhs)\n return np.arccos(np.clip(2 * np.square(np.dot(rotation_lhs, rotation_rhs)) - 1.0, -1.0, 1.0))\n\n\ndef compute_pose_error_depth_bins(rotation_err, translation_err, gt_poses, dist_bins):\n \"\"\"\n Returns the rotation and translation errors, ground truth depth values in bins/intervals\n determined by the input argument depth_bins\n Ref: PoseCNN-https://arxiv.org/pdf/1711.00199.pdf\n\n Args:\n translation_err: Translation error of dimensions [num_data x 3]\n rotation_err: Rotation errors of dimensions [num_data x 1]\n dist_bins: list of camera distances in ascending order.\n Bins are formed between two adjacent values in the list.\n \"\"\"\n trans_err_x_bins = []\n trans_err_y_bins = []\n trans_err_z_bins = []\n dist_values_bins = []\n rotation_err_bins = []\n for i in range(len(dist_bins)):\n if i == 0:\n z_min = 0\n else:\n z_min = dist_bins[i - 1]\n z_max = dist_bins[i]\n roi = np.where((gt_poses[:, 6] > z_min) & (gt_poses[:, 6] <= z_max))\n trans_err_x_bins.append(translation_err[roi, 0].squeeze().tolist())\n trans_err_y_bins.append(translation_err[roi, 1].squeeze().tolist())\n trans_err_z_bins.append(translation_err[roi, 2].squeeze().tolist())\n rotation_err_bins.append(rotation_err[roi].squeeze().tolist())\n dist_values_bins.append(gt_poses[roi, 6].squeeze().tolist())\n return (rotation_err_bins, trans_err_x_bins, trans_err_y_bins, trans_err_z_bins,\n dist_values_bins)\n\n\ndef pose_capnp_to_list(pose):\n \"\"\"\n Reads Detections3Proto capnp message and returns pose as python list of 7 values.\n First four values are the quaternion for rotation and next three are the translation values.\n\n Args:\n pose: Detections3Proto capnp message\n \"\"\"\n return [pose.rotation.q.w, pose.rotation.q.x, pose.rotation.q.y, pose.rotation.q.z, \\\n pose.translation.x, pose.translation.y, pose.translation.z]\n\n\ndef get_camera_matrix(focal, center):\n \"\"\"\n Returns camera intrinsics matrix from focal and image centers\n f = [fx, fy]: focal lengths along x and y axes\n c = [cx, cy]: Image centers along x and y axes\n \"\"\"\n matrix_camera = np.zeros((3, 3))\n matrix_camera[0, 0] = focal[0]\n matrix_camera[1, 1] = focal[1]\n matrix_camera[0, 2] = center[0]\n matrix_camera[1, 2] = center[1]\n matrix_camera[2, 2] = 1\n return matrix_camera\n\n\ndef draw_cuboid(vert, img):\n \"\"\"\n Draws the cuboid on an input image with given vertices in pixels and returns the updated image\n\n Args:\n vert: Ndarray of the 8 vertices of the cuboid\n img: Ndarray of image that the cubiod is added to.\n \"\"\"\n img = cv2.line(img, tuple(np.int_(vert[1])), tuple(np.int_(vert[0])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[0])), tuple(np.int_(vert[3])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[3])), tuple(np.int_(vert[2])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[2])), tuple(np.int_(vert[1])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[5])), tuple(np.int_(vert[4])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[4])), tuple(np.int_(vert[7])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[7])), tuple(np.int_(vert[6])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[6])), tuple(np.int_(vert[5])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[2])), tuple(np.int_(vert[6])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[1])), tuple(np.int_(vert[5])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[3])), tuple(np.int_(vert[7])), (0, 255, 0), 4)\n img = cv2.line(img, tuple(np.int_(vert[0])), tuple(np.int_(vert[4])), (0, 255, 0), 4)\n return img\n\n\ndef visualize_3Dboundingbox(object_size, object_center, camera_matrix, pose, img):\n \"\"\"\n Returns the input image with added 3D bounding box visualization\n\n Args:\n Object size, object_center: Tuple of size 3 each\n camera_matrix: camera intrinsics matrix,\n pose: Ndarray of 7 elements [q.w, q.x, q.y, q.z, x, y, z],\n img: input image to draw the 3D bounding box on.\n \"\"\"\n _cuboid3d = Cuboid3d(object_size, object_center)\n cuboid3d_points = np.array(_cuboid3d.get_vertices())\n rotation_matrix = quaternion.as_rotation_matrix(np.quaternion(pose[0], pose[1], \\\n pose[2], pose[3]))\n # Reference: https://www.programcreek.com/python/example/89450/cv2.Rodrigues\n rvec = cv2.Rodrigues(rotation_matrix)[0]\n tvec = pose[4:]\n dist_coeffs = np.zeros((4, 1))\n # Compute the pixel coordinates of the 3D points\n projected_points, _ = cv2.projectPoints(cuboid3d_points, rvec, tvec, camera_matrix,\\\n dist_coeffs)\n projected_points = np.squeeze(projected_points)\n # Draw line to form 3D bounding box from project points\n img = draw_cuboid(projected_points, img)\n return img\n\n\ndef visualize_2Dboundingbox(detection, img):\n \"\"\"\n Returns 2D bounding box on the image\n Args:\n detection: [bbox.min.x, bbox.min.y, bbox.max.x, bbox.max.y]\n img: input image to draw the 2D bounding box on.\n \"\"\"\n if (np.linalg.norm(detection) == 0):\n return img\n img = cv2.rectangle(img, (int(detection[1]), int(detection[0])), (int(detection[3]),\\\n int(detection[2])), (255,255,255), 2)\n return img" ]
[ [ "numpy.dot", "matplotlib.pyplot.imshow", "numpy.abs", "numpy.asarray", "numpy.median", "numpy.squeeze", "numpy.linalg.norm", "numpy.quaternion", "numpy.int_", "matplotlib.animation.FuncAnimation", "numpy.array", "numpy.zeros", "numpy.where", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
soodoku/decline
[ "a4b6c5e20d4660d6d343e35663fcc49c5af03578" ]
[ "incline/trend.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom scipy.signal import savgol_filter\nfrom scipy.interpolate import UnivariateSpline\n\n\ndef naive_trend(df, column_value='value'):\n \"\"\"\n naive_trend\n\n Gives the naive slope: look to the right, look to the left, \n travel one unit each, and get the average change. At the ends,\n we merely use the left or the right value.\n\n Args:\n df: pandas dataFrame time series object\n \"\"\"\n y = df[column_value]\n\n y_1 = y.shift(1)\n y_2 = y.shift(-1)\n\n y1_diff = y_1 - y\n yneg1_diff = y - y-2\n\n yy = pd.concat([y.rename('orig'),\n y_1.rename('plus_1'),\n y_2.rename('min_1'),\n y1_diff.rename('plus_1_diff'),\n yneg1_diff.rename('min_1_diff')], axis = 1)\n odf = df.copy()\n odf['derivative_value'] = yy[['plus_1_diff', 'min_1_diff']].mean(axis = 1)\n odf['derivative_method'] = 'naive'\n odf['function_order'] = None\n odf['derivative_order'] = 1\n\n return odf\n\n\ndef spline_trend(df, column_value='value', function_order=3,\n derivative_order=1, s=3):\n \"\"\"\n spline_trend\n\n Interpolates time series with splines of 'function_order'. And then\n calculates the derivative_order using the smoothed function.\n\n Args:\n df: pandas dataFrame time series object\n function_order: spline order (default is 3)\n derivative_order: (0, 1, 2, ... with default as 1)\n\n Returns:\n DataFrame: dataframe with 6 columns:- datetime,\n function_order (value of the polynomial order), smoothed_value,\n derivative_method, derivative_order, derivative_value.\n\n A row can be 2012-01-01, \"spline\", 2, 1, 0\n \"\"\"\n x = df.reset_index().index.values.astype(float)\n y = df[column_value]\n spl = UnivariateSpline(x, y, k=function_order, s=s)\n odf = df.copy()\n odf['smoothed_value'] = spl(x)\n odf['derivative_value'] = spl(x, nu=derivative_order)\n odf['function_order'] = function_order\n odf['derivative_method'] = 'spline'\n odf['derivative_order'] = derivative_order\n return odf\n\n\ndef sgolay_trend(df, column_value='value', function_order=3,\n derivative_order=1, window_length=15):\n \"\"\"\n sgolay_trend\n\n Interpolates time series with savitzky-golay using polynomials of\n 'function_order'. And then calculates the derivative_order using\n the smoothed function.\n\n Args:\n df: pandas dataFrame time series object\n window_size: default is 15\n function_order: polynomial order (default is 3)\n derivative_order: (0, 1, 2, ... with default as 1)\n\n Returns:\n DataFrame: dataframe with 6 columns:- datetime,\n function_order (value of the polynomial order), smoothed_value,\n derivative_method, derivative_order, derivative_value.\n\n Sample row: 2012-01-01, \"sgolay\", 2, 1, 0\n \"\"\"\n y = df[column_value]\n odf = df.copy()\n odf['smoothed_value'] = savgol_filter(y, window_length=window_length,\n polyorder=function_order)\n odf['derivative_value'] = savgol_filter(y, window_length=window_length,\n polyorder=function_order,\n deriv=derivative_order)\n odf['function_order'] = function_order\n odf['derivative_method'] = 'sgolay'\n odf['derivative_order'] = derivative_order\n return odf\n\n\ndef trending(df_list, column_id='id', derivative_order=1, max_or_avg='max',\n k=5):\n \"\"\"\n trending\n\n For each item in the list, calculate either the max or the average\n (depending on max_or_avg) of the Yth derivative (based on the\n derivative_order) over the last k time_periods (based on the input).\n It then orders the list based on max to min.\n \n For instance, for derivative_order = 1, max_or_avg = \"max\",\n time_periods = 3, for each item in the list, the function will take\n the max of the last 3 rows of the dataframe entries identifying the\n 1st derivative.\n\n So each item in the list produces one number (max or avg.). We then\n produce a new dataframe with 2 columns: id, max_or_avg\n\n Args:\n df_list: list of outputs (dataframes) from sgolay_trend or\n spline_trend with a new column called 'id' that identifies\n the time series\n derivative_order: (1 or 2)\n k: number of latest time periods to consider.\n max_or_avg: \"max\" or \"avg\"\n\n\n Returns:\n DataFrame: dataframe with 2 columns: id, max_or_avg\n \"\"\"\n\n cdf = []\n for df in df_list:\n cdf.append(df[df.derivative_order == derivative_order][-k:])\n tdf = pd.concat(cdf, sort=False)\n if max_or_avg == 'avg':\n max_or_avg = 'mean'\n odf = tdf.groupby('id').agg({'derivative_value': max_or_avg})\n odf.reset_index(inplace=True)\n odf.columns = ['id', 'max_or_avg'] \n return odf\n\n\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "scipy.interpolate.UnivariateSpline", "scipy.signal.savgol_filter", "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
Wizardcn/Detection
[ "3901f0748c32562854537b93f384597a37581685" ]
[ "Transmitter.py" ]
[ "from math import sqrt\r\nimport numpy as np\r\nimport random\r\n\r\n\r\ndef generate_mi(Pm0, n):\r\n \"\"\" input message generator function \"\"\"\r\n mi = []\r\n m0 = 0 # m0 counter\r\n m1 = 0 # m1 counter\r\n for i in range(n):\r\n if random.uniform(0, 1) < Pm0:\r\n mi.append(0)\r\n m0 += 1\r\n else:\r\n mi.append(1)\r\n m1 += 1\r\n # print(m0, m1)\r\n return np.array(mi)\r\n\r\n\r\ndef voltage_s(mi, E):\r\n \"\"\" pass array of mi to this function to transmit voltage s \"\"\"\r\n return np.power(-1, mi) * np.sqrt(E)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # test\r\n mi = generate_mi(0.5, 50)\r\n s = voltage_s(mi, 10)\r\n print(mi)\r\n print(s)\r\n" ]
[ [ "numpy.array", "numpy.sqrt", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leon3428/SatSeg
[ "c3a4c3064ab0629cc82e3e4c6c4d80ff009770be" ]
[ "training.py" ]
[ "import numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nos.environ[\"KERAS_BACKEND\"] = \"plaidml.keras.backend\"\n\nimport keras\nfrom keras.layers import Conv2D, Conv2DTranspose, MaxPool2D, Concatenate, Input, BatchNormalization, Activation\nfrom keras.models import Model, load_model\nfrom keras.utils import plot_model\nfrom keras.optimizers import Adam\nimport keras.backend as K\nimport random\nimport cv2\nimport math\nfrom rotate_and_crop import rotate_and_crop\nfrom tqdm import tqdm\nfrom keras.callbacks import LearningRateScheduler\n\nx_train = np.load('dataset/x_train.npy')\ny_train = np.load('dataset/y_train.npy')\n\nx_val = np.load('dataset/x_val.npy')\ny_val = np.load('dataset/y_val.npy')\n\nx_test = np.load('dataset/x_test.npy')\ny_test = np.load('dataset/y_test.npy')\n\nvalLoss_file = \"dsboard_project/valLoss.data\"\ntrainLoss_file = \"dsboard_project/trainLoss.data\"\nvalDice_file = \"dsboard_project/valDice.data\"\ntrainDice_file = \"dsboard_project/trainDice.data\"\nloss_file = \"dsboard_project/loss.data\"\nlr_file = \"dsboard_project/lr.data\"\n\nINIT_LR = 1e-3\nMIN_LR = 1e-5\nLR_DECAY = 0.94\n\nclass LrVis(keras.callbacks.Callback):\n def __init__(self):\n self.__epoch = 0\n self.__clearFile(valLoss_file)\n self.__clearFile(trainLoss_file)\n self.__clearFile(valDice_file)\n self.__clearFile(trainDice_file)\n self.__clearFile(loss_file)\n self.__clearFile(lr_file)\n self.__batch = 0\n\n def __appendData(self, file, point):\n with open(file, 'a') as f:\n f.write(str(point[0]) + ',' + str(point[1]) + '/\\n')\n\n def __clearFile(self, file):\n with open(file, 'w') as f:\n f.write('')\n\n def on_train_begin(self, logs={}):\n self.__epoch+=1\n\n def on_epoch_end(self, epoch, logs={}):\n self.__appendData(valLoss_file, (self.__epoch*2 + epoch, logs.get('val_loss')))\n self.__appendData(trainLoss_file, (self.__epoch*2 + epoch, logs.get('loss')))\n self.__appendData(valDice_file, (self.__epoch*2 + epoch, logs.get('val_dice_metric')))\n self.__appendData(trainDice_file, (self.__epoch*2 + epoch, logs.get('dice_metric')))\n self.__appendData(lr_file, (self.__epoch*2 + epoch, K.eval(self.model.optimizer.lr)))\n\n def on_batch_end(self, batch, logs={}):\n self.__appendData(loss_file, (self.__batch, logs.get('loss')))\n self.__batch += 1\n \n\ndef schedule(epoch, lr):\n lr *= LR_DECAY\n return max(lr, MIN_LR)\n\ndef pxsoftmax(z):\n z = K.exp(z)\n norm = K.sum(z, axis = 3, keepdims=True)\n norm = K.repeat_elements(norm, 3, axis=3)\n return z/norm\n\n\ndef dice_metric(y_true, y_pred, smooth=1):\n intersection = K.sum(y_true * y_pred, axis=[1,2,3])\n union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3])\n dice = K.mean((2. * intersection + smooth)/(union + smooth), axis=0)\n return dice\n\ndef weighted_crossentropy(y_true, y_pred):\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n\n dims = y_true.shape.dims\n n = dims[1]*dims[2]*dims[3]\n weights = K.constant([1.15,1.0,1.1], dtype='float32')\n weights = K.reshape(weights, (1,1,1,3))\n entropy = y_true * K.log(y_pred) * weights\n entropy = (-1.0/n) * K.sum(entropy, axis = [1,2,3])\n return entropy\n\ndef conv_block(inp, filters):\n conv1 = Conv2D(filters, (3,3), activation = 'relu', padding = 'same')(inp)\n conv2 = Conv2D(filters, (3,3), activation = 'relu', padding = 'same')(conv1)\n\n return conv2\n\ndef build_model():\n inputs = Input(shape=(128,128,4))\n en_block1 = conv_block(inputs, 16)\n en_pool1 = MaxPool2D((2,2))(en_block1)\n en_block2 = conv_block(en_pool1, 32)\n en_pool2 = MaxPool2D((2,2))(en_block2)\n en_block3 = conv_block(en_pool2, 64)\n en_pool3 = MaxPool2D((2,2))(en_block3)\n en_block4 = conv_block(en_pool3, 128)\n en_pool4 = MaxPool2D((2,2))(en_block4)\n\n en_block5 = conv_block(en_pool4, 256)\n\n \n de_upconv1 = Conv2DTranspose(128, (3,3), strides = (2,2), activation = 'relu', padding = 'same')(en_block5)\n de_concat1 = Concatenate()([de_upconv1, en_block4])\n de_block1 = conv_block(de_concat1, 128)\n \n de_upconv2 = Conv2DTranspose(64, (3,3), strides = (2,2), activation = 'relu', padding = 'same')(de_block1)\n de_concat2 = Concatenate()([de_upconv2, en_block3])\n de_block2 = conv_block(de_concat2, 64)\n\n de_upconv3 = Conv2DTranspose(32, (3,3), strides = (2,2), activation = 'relu', padding = 'same')(de_block2)\n de_concat3 = Concatenate()([de_upconv3, en_block2])\n de_block3 = conv_block(de_concat3, 32)\n\n de_upconv4 = Conv2DTranspose(16, (3,3), strides = (2,2), activation = 'relu', padding = 'same')(de_block3)\n de_concat4 = Concatenate()([de_upconv4, en_block1])\n de_block4 = conv_block(de_concat4, 16)\n\n output = Conv2D(3, (1,1), activation = pxsoftmax, padding = 'same')(de_block4)\n \n ret = Model(inputs = inputs, outputs = output, name = 'unet')\n ret.compile(\n optimizer = Adam(lr = INIT_LR),\n loss = weighted_crossentropy,\n metrics=[dice_metric]\n )\n\n return ret\n\n\ndef data_augmentation():\n x_aug = []\n y_aug = []\n rotations = [cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_180]\n\n for i in tqdm(range(len(x_train))):\n x = x_train[i]\n y = y_train[i]\n\n rows,cols, ch = x.shape\n \n flip = random.randrange(-1, 1)\n x = cv2.flip(x, flip)\n y = cv2.flip(y, flip)\n\n angle = random.randrange(0,3)\n if angle != 3:\n x = cv2.rotate(x, rotations[angle])\n y = cv2.rotate(y, rotations[angle])\n \n \n brightness = random.randrange(-10,10)/100\n x += brightness\n\n r_shift = random.randrange(95,105)/100\n g_shift = random.randrange(95,105)/100\n b_shift = random.randrange(95,105)/100\n i_shift = random.randrange(95,105)/100\n x[:,:,0]*=b_shift\n x[:,:,1]*=g_shift\n x[:,:,2]*=r_shift\n x[:,:,3]*=i_shift\n\n if (128,128) != x.shape[:2]:\n x = cv2.resize(x, (128, 128), interpolation = cv2.INTER_AREA)\n if (128,128) != y.shape[:2]:\n y = cv2.resize(y, (128, 128), interpolation = cv2.INTER_AREA)\n\n x_aug.append(x)\n y_aug.append(y)\n\n return (np.array(x_aug), np.array(y_aug))\n\n\n\ndef main():\n print(x_train.shape, x_val.shape, x_test.shape)\n print(y_train.shape, y_val.shape, y_test.shape)\n\n model = build_model()\n #model = load_model('model-02-0.89.h5', custom_objects={'pxsoftmax': pxsoftmax, 'dice_metric': dice_metric, 'weighted_crossentropy': weighted_crossentropy})\n plot_model(model, \"dsboard_project/model.png\", show_shapes=True)\n\n \n filepath=\"batchnorm/model-{epoch:02d}-{val_dice_metric:.3f}.h5\"\n checkpoint_callback = keras.callbacks.ModelCheckpoint(\n filepath=filepath,\n monitor='val_dice_metric',\n mode='max')\n\n learning_vis = LrVis()\n lr_sch = LearningRateScheduler(schedule, verbose=0)\n\n for i in range(45):\n print(f'Epoch: {i} and {i+1}')\n print('Augmenting data')\n x,y = data_augmentation()\n model.fit(x, y, batch_size=32, epochs=2, callbacks=[learning_vis, checkpoint_callback, lr_sch], validation_data=(x_val, y_val))\n \n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.load", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
njanirudh/nca
[ "00cdc9783d4ee7186697caa0beadef6ad0d1ba6e" ]
[ "examples/dim_reduct.py" ]
[ "\"\"\"NCA for linear dimensionality reduction.\n\"\"\"\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\nfrom torchnca import NCA\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.manifold import TSNE\n\ndef make_circle(r, num_samples):\n t = np.linspace(0, 2*np.pi, num_samples)\n xc, yc = 0, 0 # circle center coordinates\n x = r*np.cos(t) + 0.2*np.random.randn(num_samples) + xc\n y = r*np.sin(t) + 0.2*np.random.randn(num_samples) + yc\n return x, y\n\n\ndef gen_data(num_samples, num_classes, mean, std):\n \"\"\"Generates the data.\n \"\"\"\n num_samples_per = num_samples // num_classes\n X = []\n y = []\n for i, r in enumerate(range(num_classes)):\n # first two dimensions are that of a circle\n x1, x2 = make_circle(r+1.5, num_samples_per)\n # third dimension is Gaussian noise\n x3 = std*np.random.randn(num_samples_per) + mean\n X.append(np.stack([x1, x2, x3]))\n y.append(np.repeat(i, num_samples_per))\n X = np.concatenate(X, axis=1)\n y = np.concatenate(y)\n indices = list(range(X.shape[1]))\n np.random.shuffle(indices)\n X = X[:, indices]\n y = y[indices]\n X = X.T # make it (N, D)\n return X, y\n\n\ndef plot(Xs, y, labels, save=None):\n fig, axes = plt.subplots(1, len(labels), figsize=(18, 4))\n for ax, X, lab in zip(axes, Xs, labels):\n ax.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n ax.title.set_text(lab)\n if save is not None:\n filename = \"./assets/{}\".format(save)\n plt.savefig(filename, format=\"png\", dpi=300, bbox_inches='tight')\n plt.show()\n\n\ndef main(args):\n np.random.seed(args.seed)\n if args.cuda and torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n device = torch.device(\"cuda\")\n else:\n print(\"[*] Using cpu.\")\n torch.manual_seed(args.seed)\n device = torch.device(\"cpu\")\n\n num_samples = 500\n X, y = gen_data(num_samples, 5, 0, args.sigma)\n print(\"data\", X.shape)\n\n # plot first two dimensions of original data\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.show()\n\n # fit PCA\n pipeline = Pipeline([('scaling', StandardScaler()), ('pca', PCA(n_components=2))])\n X_pca = pipeline.fit_transform(X)\n\n # fit LDA\n X_lda = LinearDiscriminantAnalysis(n_components=2).fit_transform(X, y)\n\n # fit NCA\n X = torch.from_numpy(X).float().to(device)\n y = torch.from_numpy(y).long().to(device)\n nca = NCA(dim=2, init=args.init, max_iters=1000, tol=1e-5)\n nca.train(X, y, batch_size=None, weight_decay=20)\n X_nca = nca(X).detach().cpu().numpy()\n\n # fit t-SNE with default values\n X_tsne = TSNE(n_components=2).fit_transform(X)\n\n # plot PCA vs NCA\n y = y.detach().cpu().numpy()\n X = X.detach().cpu().numpy()\n plot([X,X_nca, X_pca, X_lda,X_tsne], y, [\"original\", \"torchnca\", \"pca\", \"lda\",\"t-SNE\"])\n\n A = nca.A.detach().cpu().numpy()\n print(\"\\nSolution: \\n\", A)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--seed\", type=int, default=0, help=\"The rng seed.\")\n parser.add_argument(\"--sigma\", type=float, default=5, help=\"The standard deviation of the Gaussian noise.\")\n parser.add_argument(\"--init\", type=str, default=\"identity\", help=\"Which initialization to use.\")\n parser.add_argument(\"--cuda\", type=lambda x: x.lower() in ['true', '1'], default=False, help=\"Whether to show GUI.\")\n args, unparsed = parser.parse_known_args()\n main(args)\n" ]
[ [ "numpy.linspace", "numpy.concatenate", "sklearn.manifold.TSNE", "numpy.random.randn", "torch.cuda.is_available", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "torch.device", "torch.from_numpy", "numpy.stack", "numpy.sin", "numpy.repeat", "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "sklearn.decomposition.PCA", "numpy.random.seed", "matplotlib.pyplot.scatter", "torch.cuda.manual_seed", "torch.manual_seed", "numpy.cos", "numpy.random.shuffle", "sklearn.preprocessing.StandardScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jfarrugia-uom/hyperstar
[ "896db10da2506e5144b99361dfb43609edf05012" ]
[ "train_refactored.py" ]
[ "#!/usr/bin/env python\n\nimport datetime\nimport glob\nimport os\nimport sys\nimport pickle\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom projlearn import *\nfrom projlearn.toyota import Toyota\nimport pandas as pd\nimport gensim\nfrom tqdm import tqdm \n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string( 'model', 'baseline', 'Model name.')\nflags.DEFINE_string( 'train', 'train.npz', 'Training set.')\nflags.DEFINE_string( 'test', 'test.npz', 'Test set.')\nflags.DEFINE_float( 'stddev', .01, 'Value of stddev for matrix initialization.')\nflags.DEFINE_float( 'lambdac', .10, 'Value of lambda.')\nflags.DEFINE_integer('seed', 228, 'Random seed.')\nflags.DEFINE_integer('num_epochs', 300, 'Number of training epochs.')\nflags.DEFINE_integer('batch_size', 2048, 'Batch size.')\nflags.DEFINE_boolean('gpu', True, 'Try using GPU.')\nflags.DEFINE_boolean('cpuembs', False, 'Place embedding matrix and ops with it on CPU (instead of soft placement).')\nflags.DEFINE_string('w2v', 'corpus_en.norm-sz100-w8-cb0-it1-min20.w2v', 'Path to w2v file (for Toyota model).')\nflags.DEFINE_integer('eval_limit', None, 'Maximum number of examples from train/evaluation/test set to evaluate train/test loss etc. during training.')\nflags.DEFINE_boolean('log_device_placement', False, 'Log device placement of nodes in TensorFlow graph.')\n\nMODELS = {\n 'baseline': Baseline,\n 'regularized_hyponym': RegularizedHyponym,\n 'regularized_synonym': RegularizedSynonym,\n 'regularized_hypernym': RegularizedHypernym,\n 'frobenius_loss': FrobeniusLoss,\n 'mlp': MLP,\n 'toyota': Toyota\n}\n\ndef train(sess, train_op, model, data, callback=lambda: None, train_writer=None, test_writer=None):\n train_losses, test_losses = [], []\n train_times = []\n\n # Init all vars except embs_var\n init_vars = tf.global_variables()\n if FLAGS.model=='toyota':\n init_vars.remove(model.embs_var)\n sess.run(tf.variables_initializer(init_vars))\n limit = FLAGS.eval_limit\n feed_dict_train, feed_dict_test = {\n model.X: data.X_train[:limit],\n model.Y: data.Y_train[:limit],\n model.Z: data.Z_train[:limit]\n }, {\n model.X: data.X_test[:limit],\n model.Y: data.Y_test[:limit],\n model.Z: data.Z_test[:limit]\n }\n\n steps = max(data.Y_train.shape[0] // FLAGS.batch_size, 1)\n\n print('Cluster %d: %d train items and %d test items available; using %d steps of %d items.' % (\n data.cluster + 1,\n data.X_train.shape[0],\n data.X_test.shape[0],\n steps,\n min(FLAGS.batch_size, data.X_train.shape[0])),\n flush=True)\n\n for epoch in tqdm(range(FLAGS.num_epochs), unit='epoch'):\n X, Y, Z = data.train_shuffle()\n\n for step in range(steps):\n head = step * FLAGS.batch_size\n tail = (step + 1) * FLAGS.batch_size\n\n feed_dict = {\n model.X: X[head:tail, :],\n model.Y: Y[head:tail, :],\n model.Z: Z[head:tail, :]\n }\n\n t_this = datetime.datetime.now()\n sess.run(train_op, feed_dict=feed_dict)\n t_last = datetime.datetime.now()\n\n train_times.append(t_last - t_this)\n\n if (epoch + 1) % 10 == 0 or (epoch == 0):\n res = sess.run([model.loss, model.summary, model.acc_2, model.acc_10], feed_dict=feed_dict_train)\n train_losses.append(res[0])\n train_writer.add_summary(res[1], epoch)\n res = sess.run([model.loss, model.summary, model.acc_2, model.acc_10], feed_dict=feed_dict_test)\n test_losses.append(res[0])\n test_writer.add_summary(res[1], epoch)\n\n print('Cluster %d: epoch = %05d, train loss = %f, test loss = %f, test acc_2 = %f, test acc_10 = %f.' % (\n data.cluster + 1,\n epoch + 1,\n train_losses[-1] / data.X_train.shape[0],\n test_losses[-1] / data.X_test.shape[0],\n res[2], res[3]), \n file=sys.stderr, flush=True)\n\n t_delta = sum(train_times, datetime.timedelta())\n print('Cluster %d done in %s.' % (data.cluster + 1, str(t_delta)), flush=True)\n callback(sess)\n\n return sess.run(model.Y_hat, feed_dict=feed_dict_test)\n\ndef main(_):\n random.seed(FLAGS.seed)\n tf.set_random_seed(FLAGS.seed)\n\n if not FLAGS.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n\n config = tf.ConfigProto(log_device_placement = FLAGS.log_device_placement) \n\n if FLAGS.model == 'toyota':\n # Load w2v\n embs_type = 'float32'\n # Load word embeddings from W2V file in vectors of tensor_type dtype\n print('Loading w2v model from ', FLAGS.w2v)\n w2v = gensim.models.Word2Vec.load_word2vec_format(FLAGS.w2v, binary=True, unicode_errors='ignore', datatype=embs_type)\n print('Loaded.')\n w2v.init_sims(replace=True)\n embs = w2v.syn0norm\n\n # Load hypernymy datasets\n dfs = {}\n\n def load_ds(part):\n f = 'subsumptions-%s.txt' % part\n df = pd.read_csv(f, sep='\\t', header=None, names=['hypo', 'hyper'])\n # Convert words to indices\n for col in df.columns:\n df[col + '_ind'] = df[col].apply(lambda x: w2v.vocab[x].index)\n\n print(f, len(df))\n return df\n\n dfs['train'] = load_ds('train')\n dfs['test'] = load_ds('validation') if FLAGS.test.endswith('validation.npz') else load_ds('test')\n\n\n # get embeddings for hyponym and hypernym\n Y_ind_train = np.array(dfs['train']['hyper_ind'])[:,np.newaxis]\n Y_ind_test = np.array(dfs['test']['hyper_ind'])[:, np.newaxis]\n\n\n\n with np.load(FLAGS.train) as npz:\n X_index_train = npz['X_index']\n Y_all_train = npz['Y_all']\n Z_all_train = npz['Z_all']\n\n with np.load(FLAGS.test) as npz:\n X_index_test = npz['X_index']\n Y_all_test = npz['Y_all']\n Z_all_test = npz['Z_all']\n\n X_all_train = Z_all_train[X_index_train[:, 0], :]\n X_all_test = Z_all_test[X_index_test[:, 0], :]\n\n kmeans = pickle.load(open('kmeans.pickle', 'rb'))\n clusters_train = kmeans.predict(Y_all_train - X_all_train)\n clusters_test = kmeans.predict(Y_all_test - X_all_test)\n\n if FLAGS.model=='toyota':\n dfs['train']['cluster'] = clusters_train\n dfs['test']['cluster'] = clusters_test\n\n if FLAGS.model == 'toyota':\n model = Toyota(embs_type, embs.shape, cpuembs=FLAGS.cpuembs, w_stddev=FLAGS.stddev)\n else:\n model = MODELS[FLAGS.model](x_size=Z_all_train.shape[1], y_size=Y_all_train.shape[1], w_stddev=FLAGS.stddev,\n lambda_=FLAGS.lambdac)\n print(model, flush=True)\n\n model.init_summary()\n\n for path in glob.glob('%s.k*.trained*' % FLAGS.model):\n print('Removing a stale file: \"%s\".' % path, flush=True)\n os.remove(path)\n\n if os.path.isfile('%s.test.npz' % FLAGS.model):\n print('Removing a stale file: \"%s\".' % ('%s.test.npz' % FLAGS.model), flush=True)\n os.remove('%s.test.npz' % FLAGS.model)\n\n Y_hat_test = {}\n\n # Training\n with tf.name_scope('Training'):\n global_step = tf.Variable(tf.constant(0, tf.int32))\n train_op = tf.train.AdamOptimizer().minimize(model.loss, global_step)\n # train_op = tf.train.AdamOptimizer(epsilon=1.).minimize(model.loss)\n\n with tf.Session(config=config) as sess:\n from datetime import datetime\n t = datetime.now().replace(microsecond=0)\n\n if FLAGS.model == 'toyota':\n model.load_w2v(embs, sess)\n\n for cluster in range(kmeans.n_clusters):\n train_writer = tf.summary.FileWriter('./tf_train_logs5/%s-cl%d-train' % (t, cluster), sess.graph)\n test_writer = tf.summary.FileWriter('./tf_train_logs5/%s-cl%d-test' % (t, cluster), sess.graph)\n\n if FLAGS.model == 'toyota':\n # data = Data_toyota(cluster, dfs['train'], dfs['test'])\n data = Data(\n cluster, clusters_train, clusters_test,\n X_index_train, Y_ind_train, Z_all_train,\n X_index_test, Y_ind_test, Z_all_test\n )\n\n else:\n data = Data(\n cluster, clusters_train, clusters_test,\n X_index_train, Y_all_train, Z_all_train,\n X_index_test, Y_all_test, Z_all_test\n )\n\n saver = tf.train.Saver()\n saver_path = '%s.k%d.trained' % (FLAGS.model, cluster + 1)\n Y_hat_test[str(cluster)] = train(sess, train_op, model, data, callback=lambda sess: saver.save(sess, saver_path),\n train_writer=train_writer, test_writer=test_writer)\n print('Writing the output model to \"%s\".' % saver_path, flush=True)\n\n test_path = '%s.test.npz' % FLAGS.model\n np.savez_compressed(test_path, **Y_hat_test)\n print('Writing the test data to \"%s\".' % test_path)\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "pandas.read_csv", "tensorflow.constant", "tensorflow.summary.FileWriter", "tensorflow.global_variables", "tensorflow.variables_initializer", "tensorflow.ConfigProto", "numpy.savez_compressed", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "tensorflow.set_random_seed", "numpy.load", "tensorflow.train.Saver", "numpy.array", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
stefantaubert/deepvoice3_pytorch
[ "0b9a65757446c2fb54db199612752e0a1a58a9d2" ]
[ "synthesis.py" ]
[ "# coding: utf-8\n\"\"\"\nSynthesis waveform from trained model.\n\nusage: synthesis.py [options] <checkpoint> <text_list_file> <dst_dir>\n\noptions:\n --hparams=<parmas> Hyper parameters [default: ].\n --preset=<json> Path of preset parameters (json).\n --checkpoint-seq2seq=<path> Load seq2seq model from checkpoint path.\n --checkpoint-postnet=<path> Load postnet model from checkpoint path.\n --file-name-suffix=<s> File name suffix [default: ].\n --max-decoder-steps=<N> Max decoder steps [default: 500].\n --replace_pronunciation_prob=<N> Prob [default: 0.0].\n --speaker_id=<id> Speaker ID (for multi-speaker model).\n --output-html Output html for blog post.\n -h, --help Show help message.\n\"\"\"\nfrom docopt import docopt\n\nimport sys\nimport os\nfrom os.path import dirname, join, basename, splitext\n\nimport audio\n\nimport torch\nimport numpy as np\nimport nltk\n\n# The deepvoice3 model\nfrom deepvoice3_pytorch import frontend\nfrom hparams import hparams, hparams_debug_string\n\nfrom tqdm import tqdm\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n_frontend = None # to be set later\n\n\ndef tts(model, text, p=0, speaker_id=None, fast=False):\n \"\"\"Convert text to speech waveform given a deepvoice3 model.\n\n Args:\n text (str) : Input text to be synthesized\n p (float) : Replace word to pronounciation if p > 0. Default is 0.\n \"\"\"\n model = model.to(device)\n model.eval()\n if fast:\n model.make_generation_fast_()\n\n sequence = np.array(_frontend.text_to_sequence(text, p=p))\n sequence = torch.from_numpy(sequence).unsqueeze(0).long().to(device)\n text_positions = torch.arange(1, sequence.size(-1) + 1).unsqueeze(0).long().to(device)\n speaker_ids = None if speaker_id is None else torch.LongTensor([speaker_id]).to(device)\n\n # Greedy decoding\n with torch.no_grad():\n mel_outputs, linear_outputs, alignments, done = model(\n sequence, text_positions=text_positions, speaker_ids=speaker_ids)\n\n linear_output = linear_outputs[0].cpu().data.numpy()\n spectrogram = audio._denormalize(linear_output)\n alignment = alignments[0].cpu().data.numpy()\n mel = mel_outputs[0].cpu().data.numpy()\n mel = audio._denormalize(mel)\n\n # Predicted audio signal\n waveform = audio.inv_spectrogram(linear_output.T)\n\n return waveform, alignment, spectrogram, mel\n\n\ndef _load(checkpoint_path):\n if use_cuda:\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n print(\"Command line args:\\n\", args)\n checkpoint_path = args[\"<checkpoint>\"]\n text_list_file_path = args[\"<text_list_file>\"]\n dst_dir = args[\"<dst_dir>\"]\n checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"]\n checkpoint_postnet_path = args[\"--checkpoint-postnet\"]\n max_decoder_steps = int(args[\"--max-decoder-steps\"])\n file_name_suffix = args[\"--file-name-suffix\"]\n replace_pronunciation_prob = float(args[\"--replace_pronunciation_prob\"])\n output_html = args[\"--output-html\"]\n speaker_id = args[\"--speaker_id\"]\n if speaker_id is not None:\n speaker_id = int(speaker_id)\n preset = args[\"--preset\"]\n\n # Load preset if specified\n if preset is not None:\n with open(preset) as f:\n hparams.parse_json(f.read())\n # Override hyper parameters\n hparams.parse(args[\"--hparams\"])\n assert hparams.name == \"deepvoice3\"\n\n _frontend = getattr(frontend, hparams.frontend)\n import train\n train._frontend = _frontend\n from train import plot_alignment, build_model\n\n # Model\n model = build_model()\n\n # Load checkpoints separately\n if checkpoint_postnet_path is not None and checkpoint_seq2seq_path is not None:\n checkpoint = _load(checkpoint_seq2seq_path)\n model.seq2seq.load_state_dict(checkpoint[\"state_dict\"])\n checkpoint = _load(checkpoint_postnet_path)\n model.postnet.load_state_dict(checkpoint[\"state_dict\"])\n checkpoint_name = splitext(basename(checkpoint_seq2seq_path))[0]\n else:\n checkpoint = _load(checkpoint_path)\n model.load_state_dict(checkpoint[\"state_dict\"])\n checkpoint_name = splitext(basename(checkpoint_path))[0]\n\n model.seq2seq.decoder.max_decoder_steps = max_decoder_steps\n\n os.makedirs(dst_dir, exist_ok=True)\n with open(text_list_file_path, \"rb\") as f:\n lines = f.readlines()\n for idx, line in enumerate(lines):\n text = line.decode(\"utf-8\")[:-1]\n words = nltk.word_tokenize(text)\n waveform, alignment, _, _ = tts(\n model, text, p=replace_pronunciation_prob, speaker_id=speaker_id, fast=True)\n dst_wav_path = join(dst_dir, \"{}_{}{}.wav\".format(\n idx, checkpoint_name, file_name_suffix))\n dst_alignment_path = join(\n dst_dir, \"{}_{}{}_alignment.png\".format(idx, checkpoint_name,\n file_name_suffix))\n plot_alignment(alignment.T, dst_alignment_path,\n info=\"{}, {}\".format(hparams.builder, basename(checkpoint_path)))\n audio.save_wav(waveform, dst_wav_path)\n name = splitext(basename(text_list_file_path))[0]\n if output_html:\n print(\"\"\"\n{}\n\n({} chars, {} words)\n\n<audio controls=\"controls\" >\n<source src=\"/audio/{}/{}/{}\" autoplay/>\nYour browser does not support the audio element.\n</audio>\n\n<div align=\"center\"><img src=\"/audio/{}/{}/{}\" /></div>\n \"\"\".format(text, len(text), len(words),\n hparams.builder, name, basename(dst_wav_path),\n hparams.builder, name, basename(dst_alignment_path)))\n else:\n print(idx, \": {}\\n ({} chars, {} words)\".format(text, len(text), len(words)))\n\n print(\"Finished! Check out {} for generated audio samples.\".format(dst_dir))\n sys.exit(0)\n" ]
[ [ "torch.LongTensor", "torch.load", "torch.from_numpy", "torch.no_grad", "torch.cuda.is_available", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
karmic-creditor/pygeopackage
[ "13366d54f80bd827b84c6538b9b08b6656111ef4" ]
[ "src/geopackage/_geopackage.py" ]
[ "import os\nimport sys\nimport json\nimport sqlite3\n\nimport tempfile\nfrom io import BytesIO, StringIO\nfrom sqlite3 import Binary as sBinary\nfrom collections import OrderedDict, MutableMapping\n\ntry:\n import geomet\n from geomet import wkt as geometwkt\n from geomet import wkb as geometwkb\n _HASGEOMET = True\nexcept ImportError:\n _HASGEOMET = False\n\nfrom ._gpkg import _create_feature_class, _create_gpkg, _create_table, _insert_values\nfrom ._wkb import loads, dumps\n# ----------------------------------------------------------------------\ndef _handle_wkb(wkb):\n \"\"\"handles the insert convertion for the custom geometry types\"\"\"\n if isinstance(wkb, bytes):\n wkb = bytearray(wkb)\n return wkb\n# ----------------------------------------------------------------------\ndef _adapt_wkb(wkb):\n \"\"\"ensures the wkb values are bytes, not bytearrays\"\"\"\n return wkb\n########################################################################\nclass GeoPackage(object):\n \"\"\"\n A single instance of a GeoPackage file.\n \"\"\"\n _con = None\n _dir = None\n _path = None\n _db_name = None\n\n #----------------------------------------------------------------------\n def __init__(self, path, overwrite=False):\n \"\"\"Constructor\"\"\"\n\n self._dir = os.path.dirname(path)\n self._db_name = os.path.basename(path)\n\n if self._db_name.lower().endswith('.gpkg') == False:\n self._db_name += \".gpkg\"\n self._path = os.path.join(self._dir, self._db_name)\n if os.path.isfile(self._path) and \\\n overwrite:\n os.remove(self._path)\n self._path = _create_gpkg(name=self._db_name,\n path=self._dir,\n overwrite=overwrite)\n self._con = sqlite3.connect(self._path,\n detect_types=sqlite3.PARSE_DECLTYPES)\n # register custom dtypes\n sqlite3.register_adapter(bytearray, _adapt_wkb)\n for g in [\"POINT\", \"LINESTRING\", \"POLYGON\",\n \"MULTIPOINT\", \"MULTILINESTRING\", \"MULTIPOLYGON\"]:\n sqlite3.register_converter(g, _handle_wkb)\n #----------------------------------------------------------------------\n def _setup(self):\n \"\"\"sets up the registration for the GeoPackage\"\"\"\n if self._path != \":memory:\":\n self._con.close()\n self._con = None\n self._con = sqlite3.connect(self._path,\n detect_types=sqlite3.PARSE_DECLTYPES)\n # register custom dtypes\n sqlite3.register_adapter(bytearray, _adapt_wkb)\n for g in [\"POINT\", \"LINESTRING\", \"POLYGON\",\n \"MULTIPOINT\", \"MULTILINESTRING\", \"MULTIPOLYGON\"]:\n sqlite3.register_converter(g, _handle_wkb)\n #----------------------------------------------------------------------\n def __len__(self):\n \"\"\"returns the number of registered tables\"\"\"\n try:\n sql = \"\"\"SELECT count(*) FROM gpkg_contents\"\"\"\n cur = self._con.execute(sql)\n return cur.fetchone()[0]\n except:\n return 0\n #----------------------------------------------------------------------\n def __enter__(self):\n if self._con is None:\n self._con = sqlite3.connect(self._path)\n return self\n #----------------------------------------------------------------------\n def __exit__(self, type, value, traceback):\n self._con.commit()\n self._con.close()\n #----------------------------------------------------------------------\n def exists(self, name):\n \"\"\"\n Returns boolean if the table exists\n\n :returns: boolean\n\n \"\"\"\n sql = \"SELECT table_name, data_type FROM gpkg_contents\"\n cur = self._con.execute(sql)\n for tbl in cur:\n if tbl[0].lower() == name.lower():\n return True\n return False\n #----------------------------------------------------------------------\n def get(self, name):\n \"\"\"\n Returns a table if it exists in the geopackage.\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n name Optional String. The name of the table or\n feature class.\n =============== ===============================================\n\n :returns: Table/SpatialTable\n\n \"\"\"\n sql = \"SELECT table_name, data_type FROM gpkg_contents where table_name = ?\"\n cur = self._con.execute(sql, [name])\n for tbl in cur:\n if tbl[1] == 'attributes':\n return Table(tbl[0], self._con)\n else:\n return SpatialTable(tbl[0], self._con)\n #----------------------------------------------------------------------\n @property\n def tables(self):\n \"\"\"\n Gets a list of registered table names with the geopackage\n\n :returns: iterator\n\n \"\"\"\n sql = \"SELECT table_name, data_type FROM gpkg_contents\"\n cur = self._con.execute(sql)\n for tbl in cur:\n if tbl[1] == 'attributes':\n yield Table(tbl[0], self._con)\n else:\n yield SpatialTable(tbl[0], self._con)\n #----------------------------------------------------------------------\n def enable(self):\n \"\"\"\n enables the sqlite database to be a geopackage\n\n :returns: Boolean\n\n \"\"\"\n try:\n self._setup()\n except:\n return False\n return True\n #----------------------------------------------------------------------\n def create(self,\n name,\n fields=None,\n wkid=None,\n geometry_type=None,\n overwrite=True):\n \"\"\"\n The `create` method generates a new table or feature class in the\n geopackage.\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n name Optional String. The name of the table or\n feature class.\n --------------- -----------------------------------------------\n fields Optional dict. The columns to add to a table.\n An OBJECTID field is always created for any table.\n\n Allowed Fields:\n\n + TEXT -Any string of characters.\n + FLOAT - Fractional numbers between -3.4E38 and 1.2E38.\n + DOUBLE - Fractional numbers between -2.2E308 and 1.8E308.\n + SHORT - Whole numbers between -32,768 and 32,767.\n + LONG - Whole numbers between -2,147,483,648 and 2,147,483,647.\n + DATE -Date and/or time.\n + BLOB -Long sequence of binary numbers.\n + GUID -Globally unique identifier.\n\n --------------- -----------------------------------------------\n wkid Optional Int. The SRS code for the feature class.\n --------------- -----------------------------------------------\n geometry_type Optional String. If given the output will be a\n SpatialTable instead of a Table. Allowed values\n are: point, line, polygon, and multipoint.\n --------------- -----------------------------------------------\n overwrite Optional Boolean. If True, the geopackage will\n attempt to erase the table and recreate it with\n the new schema. All records from the old table\n will be lost.\n =============== ===============================================\n\n :returns: Table/SpatialTable\n \"\"\"\n if overwrite:\n sql_drop = \"\"\"DROP TABLE IF EXISTS %s\"\"\" % name\n sql_delete_row = \"\"\"DELETE FROM gpkg_contents where table_name = '{tbl}'\"\"\".format(tbl=name)\n sql_geom_col = \"\"\"DELETE FROM gpkg_geometry_columns where table_name = '{tbl}'\"\"\".format(tbl=name)\n self._con.execute(sql_drop)\n self._con.execute(sql_delete_row)\n self._con.execute(sql_geom_col)\n self._con.commit()\n elif self.exists(name) and \\\n overwrite == False:\n raise ValueError(\"Table %s exists. Please pick a different table name\" % name)\n if geometry_type:\n iftrue = _create_feature_class(con=self._con,\n name=name,\n wkid=wkid,\n fields=fields,\n geometry=geometry_type)\n if iftrue:\n return SpatialTable(table=name,\n con=self._con)\n\n else:\n iftrue = _create_table(con=self._con,\n name=name,\n fields=fields)\n if iftrue:\n return Table(table=name, con=self._con)\n\n return\n########################################################################\nclass _Row(MutableMapping, OrderedDict):\n \"\"\"\n A Single Row Entry. This class is created by the `Table` class.\n\n ** It should not be created by a user. **\n \"\"\"\n _con = None\n _values = None\n _table_name = None\n _dict = None\n _keys = None\n #----------------------------------------------------------------------\n def __init__(self, values, table_name=None, con=None, header=None):\n \"\"\"Constructor\"\"\"\n self._table_name = table_name\n self._con = con\n self._values = values\n self._header = header\n #----------------------------------------------------------------------\n def __str__(self):\n return \"<Row OBJECTID=%s>\" % self['OBJECTID']\n #----------------------------------------------------------------------\n def __repr__(self):\n return self.__str__()\n #----------------------------------------------------------------------\n def __setattr__(self, name, value):\n if name in {'_values','_dict', '_table_name', '_con', '_keys', '_header'}:\n super().__setattr__(name, value)\n elif name.lower() == 'shape':\n self._values[name] = value\n self._update()\n elif name.lower() != 'objectid' and \\\n name in self.keys():\n self._values[name] = value\n self._update()\n\n elif name.lower() == 'objectid':\n raise ValueError(\"OBJECTID values cannot be updated.\")\n else:\n raise ValueError(\"The field: {field} does not exist.\".format(field=name))\n #----------------------------------------------------------------------\n def __getattr__(self, name):\n if name in self._values:\n return self._values[name]\n return\n #----------------------------------------------------------------------\n def __getitem__(self, name):\n return self.__getattr__(name)\n #----------------------------------------------------------------------\n def __setitem__(self, name, value):\n self.__setattr__(name, value)\n #----------------------------------------------------------------------\n def keys(self):\n \"\"\"returns the column names in the dataset\"\"\"\n return list(self._values.keys())\n #----------------------------------------------------------------------\n @property\n def fields(self):\n \"\"\"returns the field names in the dataset\"\"\"\n return self.keys()\n #----------------------------------------------------------------------\n def as_dict(self):\n \"\"\"returns the row as a dictionary\"\"\"\n return dict(zip(self.keys(), self.values()))\n #----------------------------------------------------------------------\n def values(self):\n \"\"\"returns the row values\"\"\"\n return list(self._values.values())\n #----------------------------------------------------------------------\n def _update(self):\n \"\"\"updates the current row\"\"\"\n txts = []\n values = []\n for k,v in self._values.items():\n if k.lower() != \"objectid\" and \\\n k.lower() != 'shape':\n txts.append(\"%s=?\" % k)\n values.append(v)\n elif k.lower() == 'shape':\n if isinstance(v, dict) and \"coordinates\" not in v:\n v = self._header + dumps(v, False)\n elif isinstance(v, dict) and \"coordinates\" in v:\n v = self._gpheader + geometwkb.dumps(obj=v)\n elif isinstance(v, str):\n gj = geometwkt.loads(v)\n v = self._gpheader + geometwkb.dumps(obj=gj)\n elif isinstance(v, (bytes, bytearray)):\n if isinstance(v, (bytearray)):\n v = bytes(v)\n if len(v) > 2 and \\\n v[:2] != b'GB':\n v = self._gpheader + v\n elif v is None:\n v = self._gpheader + b'0x000000000000f87f'\n else:\n raise ValueError((\"Shape column must be Esri JSON dictionary, \"\n \"WKT, GeoJSON dictionary, or WKB (bytes)\"))\n txts.append(\"%s=?\" % k)\n values.append(v)\n sql = '''UPDATE {table} SET {values} WHERE OBJECTID={oid}'''.format(\n table=self._table_name,\n values=\",\".join(txts),\n oid=self._values['OBJECTID'])\n cur = self._con.execute(sql, values)\n self._con.commit()\n del sql\n del values, txts\n return True\n #----------------------------------------------------------------------\n def delete(self):\n \"\"\"\n Deletes the current row\n\n :returns: Boolean\n \"\"\"\n try:\n cur = self._con.execute(\n '''DELETE FROM {tbl} WHERE OBJECTID=?'''.format(tbl=self._table_name),\n [self['OBJECTID']])\n return True\n except:\n return False\n########################################################################\nclass Table(object):\n \"\"\"\n A Table object is a attribute only set of data. No spatial data is associated with\n this information.\n\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n table Requred String. The name of the table.\n --------------- -----------------------------------------------\n con Required sqlite3.Connection. The active connection\n to the geopackage.\n =============== ===============================================\n\n \"\"\"\n\n _con = None\n _table_name = None\n _create_sql = None\n _fields = None\n #----------------------------------------------------------------------\n def __init__(self, table, con):\n \"\"\"Constructor\"\"\"\n self._con = con\n self._table_name = table\n #----------------------------------------------------------------------\n @property\n def dtype(self):\n \"\"\"returns the table type\"\"\"\n return \"attribute\"\n #----------------------------------------------------------------------\n def __str__(self):\n return \"<Attribute Table: {table}>\".format(table=self._table_name)\n #----------------------------------------------------------------------\n def __repr__(self):\n return self.__str__()\n #----------------------------------------------------------------------\n def __iter__(self):\n for row in self.rows():\n yield row\n #----------------------------------------------------------------------\n @property\n def fields(self):\n \"\"\"\n returns the field information for a table\n\n :returns: Dictionary\n\n \"\"\"\n if self._fields is None:\n sql = \"\"\"PRAGMA table_info({tbl});\"\"\".format(tbl=self._table_name)\n rows = self._con.execute(sql).fetchall()\n self._fields = {row[1]: row[2] for row in rows}\n return self._fields\n #----------------------------------------------------------------------\n def add_field(self, name, data_type):\n \"\"\"\n\n Adds a new column to the table.\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n name Required String. The name of the field.\n --------------- -----------------------------------------------\n data_types Required String. The type of column to add.\n\n Allowed Data Types:\n\n + TEXT -Any string of characters.\n + FLOAT - Fractional numbers between -3.4E38 and 1.2E38.\n + DOUBLE - Fractional numbers between -2.2E308 and 1.8E308.\n + SHORT - Whole numbers between -32,768 and 32,767.\n + LONG - Whole numbers between -2,147,483,648 and 2,147,483,647.\n + DATE -Date and/or time.\n + BLOB -Long sequence of binary numbers.\n + GUID -Globally unique identifier.\n =============== ===============================================\n\n :returns: Boolean\n\n \"\"\"\n _field_lookup = {\n \"text\" : [\"TEXT\", \"check((typeof({field}) = 'text' or typeof({field}) = 'null') and not length({field}) > {l})\"],\n \"float\" : [\"DOUBLE\", \"\"\"check(typeof({field}) = 'real' or typeof({field}) = 'null')\"\"\"],\n \"double\" : [\"DOUBLE\", \"check(typeof({field}) = 'real' or typeof({field}) = 'null')\"],\n \"short\" : [\"SMALLINT\", \"check((typeof({field}) = 'integer' or typeof({field}) = 'null') and {field} >= -32768 and {field} <= 32767)\"],\n \"long\" : [\"MEDIUMINT\", \"check((typeof({field}) = 'integer' or typeof({field}) = 'null') and {field} >= -2147483648 and {field} <= 2147483647)\"],\n \"integer\" : [\"MEDIUMINT\", \"check((typeof({field}) = 'integer' or typeof({field}) = 'null') and {field} >= -2147483648 and {field} <= 2147483647)\"],\n \"date\" : [\"DATETIME\", \"check((typeof({field}) = 'text' or typeof({field}) = 'null') and strftime('%Y-%m-%dT%H:%M:%fZ',{field}))\"],\n \"blob\" : [\"BLOB\", \"check(typeof({field}) = 'blob' or typeof({field}) = 'null')\"],\n \"guid\" : [\"TEXT(38)\", \"check((typeof({field}) = 'text' or typeof({field}) = 'null') and not length({field}) > 38)\"]\n }\n try:\n row = _field_lookup[data_type.lower()]\n if row[0].lower() != \"text\":\n fld = (\"{field} {dtype} {st}\"\n .format(field=name, dtype=row[0], st=row[1])\n .format(field=name))\n else:\n fld = \"{field} {dtype}\".format(field=name, dtype=row[0])\n sql = \"\"\"ALTER TABLE {table} ADD COLUMN {dtype};\"\"\".format(table=self._table_name,\n field=name,\n dtype=fld)\n self._con.execute(sql)\n self._con.commit()\n self._fields = None\n return True\n except:\n return False\n #----------------------------------------------------------------------\n def delete_field(self, name):\n \"\"\"\n Drops a Field from a Table\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n name Required String. The name of the field to remove.\n =============== ===============================================\n\n :returns: boolean\n \"\"\"\n fields = \",\".join([fld for fld in self.fields if fld.lower() != name.lower()])\n sql = \"\"\"\n CREATE TABLE temp_bkup AS SELECT {fields} FROM {table};\n DROP TABLE {table};\n ALTER TABLE temp_bkup RENAME TO {table};\n \"\"\".format(table=self._table_name, fields=fields)\n self._con.executescript(sql)\n self._con.commit()\n self._fields = None\n return True\n #----------------------------------------------------------------------\n def rows(self, where=None, fields=\"*\"):\n \"\"\"\n Search/update cursor like iterator\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n where Optional String. Optional Sql where clause.\n --------------- -----------------------------------------------\n fields Optional List. The default is all fields (*).\n A list of fields can be provided to limit the\n data that is returned.\n =============== ===============================================\n\n :returns: _Row object\n \"\"\"\n if isinstance(fields, (list, tuple)):\n if \"OBJECTID\" not in fields:\n fields.append(\"OBJECTID\")\n fields = \",\".join(fields)\n if where is None:\n query = \"\"\"SELECT {fields} from {tbl} \"\"\".format(tbl=self._table_name,\n fields=fields)\n else:\n query = \"\"\"SELECT {fields} from {tbl} WHERE {where}\"\"\".format(tbl=self._table_name,\n fields=fields,\n where=where)\n cursor = self._con.cursor()\n c = cursor.execute(query)\n columns = [d[0] for d in c.description]\n for row in c:\n yield _Row(values=dict(zip(columns, row)),\n table_name=self._table_name,\n con=self._con)\n #----------------------------------------------------------------------\n def insert(self, row):\n \"\"\"\n Inserts a new row via dictionary\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n row Required Dictionary. Insert a new row via a\n dictionary. The key/value pair must match up to\n the field names in the table.\n ============== ===============================================\n\n :returns: Boolean\n\n \"\"\"\n values = None\n if isinstance(row, dict):\n keys = row.keys()\n values = [list(row.values())]\n elif isinstance(row, (list, tuple)):\n keys = row[0].keys()\n values = [list(r.values()) for r in row]\n q = [\"?\"] * len(keys)\n q = \",\".join(q)\n sql = '''INSERT INTO {table} ({fields})\n VALUES({q})'''.format(table=self._table_name,\n fields=\",\".join(keys),\n q=q)\n inserts = []\n cur = self._con.cursor()\n cur.execute(sql, list(values[0]))\n self._con.commit()\n return True\n #----------------------------------------------------------------------\n def to_pandas(self, where=None, fields=\"*\", ftype=None):\n \"\"\"\n Exports a table to a Pandas' DataFrame.\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n where Optional String. Optional Sql where clause.\n --------------- -----------------------------------------------\n fields Optional List. The default is all fields (*).\n A list of fields can be provided to limit the\n data that is returned.\n --------------- -----------------------------------------------\n ftype Optional String. This value can be dataframe\n format type. The value can be None or esri.\n\n + None - means the dataframe will be a raw view of the table.\n + esri - means the dataframe will be a spatially enable dataframe. (Requires Python API for ArcGIS)\n\n =============== ===============================================\n\n :returns: pd.DataFrame\n\n \"\"\"\n import pandas as pd\n if isinstance(fields, (list, tuple)):\n if \"OBJECTID\" not in fields:\n fields.append(\"OBJECTID\")\n fields = \",\".join(fields)\n if where is None:\n query = \"\"\"SELECT {fields} from {tbl} \"\"\".format(tbl=self._table_name,\n fields=fields)\n else:\n query = \"\"\"SELECT {fields} from {tbl} WHERE {where}\"\"\".format(tbl=self._table_name,\n fields=fields,\n where=where)\n if ftype is None:\n return pd.read_sql_query(query,\n self._con)\n elif str(ftype).lower() == 'esri':\n try:\n from arcgis.geometry import Geometry\n from arcgis.features import GeoAccessor, GeoSeriesAccessor\n df = pd.read_sql_query(query,\n self._con)\n fields = list(self.fields.keys())\n lower_fields = [str(fld).lower() for fld in fields]\n if \"shape\" in lower_fields:\n idx = lower_fields.index(\"shape\")\n SHAPE = fields[idx]\n df[SHAPE] = df[SHAPE].apply(lambda x: Geometry(x[8:]))\n df.spatial.set_geometry(SHAPE)\n try:\n df.spatial.project(self.wkid)\n except:\n print('nope')\n return df\n except ImportError:\n raise Exception(\"The Python API for ArcGIS is required to import using ftype `esri`\")\n except Exception as e:\n raise Exception(e)\n########################################################################\nclass SpatialTable(Table):\n \"\"\"\n Represents a Feature Class inside a GeoPackage\n\n \"\"\"\n _con = None\n _wkid = None\n _gtype = None\n _fields = None\n _table_name = None\n _create_sql = None\n _gp_header = None\n #----------------------------------------------------------------------\n def __init__(self, table, con):\n \"\"\"Constructor\"\"\"\n self._table_name = table\n self._con = con\n self._refresh()\n #----------------------------------------------------------------------\n def _refresh(self):\n \"\"\"internal method that refreshes the table information\"\"\"\n self._sd_lu = \"\"\"SELECT * from gpkg_geometry_columns where table_name = '%s'\"\"\" % self._table_name\n cur = self._con.execute(self._sd_lu)\n self._gp_header = None\n\n for row in cur:\n self._gtype = row[2]\n self._wkid = row[3]\n break\n del cur\n #----------------------------------------------------------------------\n def __str__(self):\n return \"<Spatial Table: {table}, {gt}>\".format(table=self._table_name,\n gt=self._gtype)\n #----------------------------------------------------------------------\n def __repr__(self):\n return self.__str__()\n #----------------------------------------------------------------------\n @property\n def geometry_type(self):\n if self._gtype is None:\n self._refresh()\n return self._gtype\n #----------------------------------------------------------------------\n @property\n def dtype(self):\n \"\"\"\n Returns the table type\n\n :returns: String\n\n \"\"\"\n return \"spatial\"\n #----------------------------------------------------------------------\n @property\n def wkid(self):\n \"\"\"\n Returns the Spatial Table's WKID/SRS ID\n\n :returns: Integer\n \"\"\"\n if self._wkid is None:\n self._refresh()\n return self._wkid\n #----------------------------------------------------------------------\n def rows(self, where=None, fields=\"*\"):\n \"\"\"\n Search/update cursor like iterator\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n where Optional String. Optional Sql where clause.\n --------------- -----------------------------------------------\n fields Optional List. The default is all fields (*).\n A list of fields can be provided to limit the\n data that is returned.\n =============== ===============================================\n\n :returns: _Row object\n \"\"\"\n if isinstance(fields, (list, tuple)):\n if \"OBJECTID\" not in fields:\n fields.append(\"OBJECTID\")\n fields = \",\".join(fields)\n if where is None:\n query = \"\"\"SELECT {fields} from {tbl} \"\"\".format(tbl=self._table_name,\n fields=fields)\n else:\n query = \"\"\"SELECT {fields} from {tbl} WHERE {where}\"\"\".format(tbl=self._table_name,\n fields=fields,\n where=where)\n cursor = self._con.cursor()\n c = cursor.execute(query)\n columns = [d[0] for d in c.description]\n for row in c:\n yield _Row(values=dict(zip(columns, row)),\n table_name=self._table_name,\n con=self._con, header=self._gpheader)\n #----------------------------------------------------------------------\n def _flag_to_bytes(self, code):\n \"\"\"converts single integer to bytes\"\"\"\n return int(code).to_bytes(1,\n byteorder='little')\n #----------------------------------------------------------------------\n def _srid_to_bytes(self, srid):\n \"\"\"converst WKID values to bytes\"\"\"\n return int(srid).to_bytes(4,\n byteorder='little')\n #----------------------------------------------------------------------\n def _build_gp_header(self, version=0, empty=1):\n \"\"\"assembles the GP header for WKB geometry\"\"\"\n return b'GP' + self._flag_to_bytes(version) + self._flag_to_bytes(empty) + self._srid_to_bytes(self.wkid)\n #----------------------------------------------------------------------\n @property\n def _gpheader(self):\n \"\"\"internal only, builds the geopackage binary header\"\"\"\n if self._gp_header is None:\n self._gp_header = self._build_gp_header()\n return self._gp_header\n #----------------------------------------------------------------------\n def insert(self, row, geom_format='EsriJSON'):\n \"\"\"\n Inserts a new row via dictionary\n\n =============== ===============================================\n **Arguements** **Description**\n --------------- -----------------------------------------------\n row Required Dictionary. Insert a new row via a\n dictionary. The key/value pair must match up to\n the field names in the table.\n --------------- -----------------------------------------------\n geom_format Optional String. When providing geometries\n during insertion of new records, the method\n needs to know the format of the geometry. The\n formats supported values are: EsriJSON,\n GeoJSON, WKT, and WKB.\n\n The default geometry format is`EsriJSON`.\n\n **Note**\n\n GeoJSON and WKT require the package `geomet` to\n be installed.\n\n ============== ===============================================\n\n :returns: Boolean\n\n \"\"\"\n if _HASGEOMET == False and geom_format.lower() in ['wkt', 'geojson']:\n raise ValueError((\"The package `geomet` is required to work with \"\n \"WKT and GeoJSON. Run `pip install geomet` to install.\"))\n if isinstance(row, _Row):\n row = row._values\n values = None\n flds = {fld.lower(): fld for fld in row.keys()}\n if 'shape' in flds:\n if isinstance(row[flds['shape']], dict) and geom_format.lower() == \"esrijson\":\n row[flds['shape']] = self._gpheader + dumps(row[flds['shape']], False)\n elif isinstance(row[flds['shape']], dict) and geom_format.lower() == \"geojson\":\n row[flds['shape']] = self._gpheader + geometwkb.dumps(obj=row[flds['shape']])\n elif isinstance(row[flds['shape']], str) and geom_format.lower() == \"wkt\":\n gj = geometwkt.loads(row[flds['shape']])\n row[flds['shape']] = self._gpheader + geometwkb.dumps(obj=gj)\n elif isinstance(row[flds['shape']], (bytes, bytearray)):\n if isinstance(row[flds['shape']], (bytearray)):\n row[flds['shape']] = bytes(row[flds['shape']])\n if len(row[flds['shape']]) > 2 and \\\n row[flds['shape']][:2] != b'GB':\n row[flds['shape']] = self._gpheader + row[flds['shape']]\n elif row[flds['shape']] is None:\n row[flds['shape']] = self._gpheader + b'0x000000000000f87f'\n else:\n raise ValueError((\"Shape column must be Esri JSON dictionary, \"\n \"WKT, GeoJSON dictionary, or WKB (bytes)\"))\n if isinstance(row, dict):\n keys = row.keys()\n values = [list(row.values())]\n elif isinstance(row, (list, tuple)):\n keys = row[0].keys()\n values = [list(r.values()) for r in row]\n q = [\"?\"] * len(keys)\n q = \",\".join(q)\n sql = '''INSERT INTO {table} ({fields})\n VALUES({q})'''.format(table=self._table_name,\n fields=\",\".join(keys),\n q=q)\n inserts = []\n cur = self._con.cursor()\n cur.execute(sql, list(values[0]))\n self._con.commit()\n return True\n\n" ]
[ [ "pandas.read_sql_query" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
brumen/openpilot
[ "ac31b2ace59eb4b1f1f3eb0181bf84e715c26d56" ]
[ "models/lane_detect/lane_models/road_model.py" ]
[ "\nimport cv2\nimport numpy as np\nimport threading\n\nfrom typing import Union\n\nfrom openpilot.models.lane_detect.lane_config import BASE_TU, BASE_CU\n\nfrom openpilot.models.lane_detect.hough_lines import HoughLanesImage\n\nfrom openpilot.models.lane_detect.lane_models.lane_generator_hough import LaneGeneratorCUHough, LaneGeneratorTUHough, YellowLineSlidersMixin\n\n\nclass HSVFilterMixinOrig:\n\n def _process_X(self, orig_image) -> Union[None, np.ndarray]:\n\n image = orig_image\n # crop image\n h, w = image.shape[:2]\n #image = image[200:h - 20, 20:550]\n # create hsv\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n #low_val = (0, 0, 0)\n #high_val = (179, 45, 96)\n low_val = np.uint8(self._y_vec[:3])\n high_val = np.uint8(self._y_vec[3:])\n # Threshold the HSV image\n mask = cv2.inRange(hsv, low_val, high_val)\n\n # remove noise\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=np.ones((8, 8), dtype=np.uint8))\n # close mask\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel=np.ones((20, 20), dtype=np.uint8))\n\n # improve mask by drawing the convexhull\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n hull = cv2.convexHull(cnt)\n cv2.drawContours(mask, [hull], 0, (255), -1)\n # erode mask a bit to migitate mask bleed of convexhull\n mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, kernel=np.ones((5, 5), dtype=np.uint8))\n\n # remove this line, used to show intermediate result of masked road\n road = cv2.bitwise_and(image, image, mask=mask)\n\n return road\n # apply mask to hsv image\n road_hsv = cv2.bitwise_and(hsv, hsv, mask=mask)\n # set lower and upper color limits\n low_val = (0, 0, 102)\n high_val = (179, 255, 255)\n # Threshold the HSV image\n mask2 = cv2.inRange(road_hsv, low_val, high_val)\n # apply mask to original image\n return cv2.bitwise_and(image, image, mask=mask2)\n\n\nclass HSVFilterMixin1:\n \"\"\"\n HSV params\n low_val = (0, 0, 0)\n high_val = (179, 45, 96)\n \"\"\"\n\n ROIS = [(0, 460), (0, 720), (1280, 720), (1280, 460), (840, 260), (400, 260)] # roi vertices\n\n def _process_X(self, orig_image) -> Union[None, np.ndarray]:\n\n image = orig_image\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # creates HSV image\n\n low_val = np.uint8(self._y_vec[:3])\n high_val = np.uint8(self._y_vec[3:])\n # Threshold the HSV image\n mask = cv2.inRange(hsv, low_val, high_val)\n\n # remove noise\n #mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=np.ones((8, 8), dtype=np.uint8))\n # close mask\n #mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel=np.ones((20, 20), dtype=np.uint8))\n\n # improve mask by drawing the convexhull\n #contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n #for cnt in contours:\n # hull = cv2.convexHull(cnt)\n # cv2.drawContours(mask, [hull], 0, (255), -1)\n # erode mask a bit to migitate mask bleed of convexhull\n #mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, kernel=np.ones((5, 5), dtype=np.uint8))\n\n # remove this line, used to show intermediate result of masked road\n road = cv2.bitwise_and(image, image, mask=mask)\n\n\n hough_params = { 'rho': 1\n , 'theta': np.pi / 180.\n , 'threshold': 30\n , 'min_line_len': 20\n , 'max_line_gap': 20\n , 'gray_range': (150, 255)\n , 'canny_range': (100, 200)\n , }\n\n cl = HoughLanesImage(road\n , roi_vertices=self.ROIS\n , hough_params=hough_params )\n\n hough_img = cv2.cvtColor(cl.show_lines(road.shape[:2], pixel_tol=2).astype(np.uint8) * 255, cv2.COLOR_BGR2RGB)\n\n # return cv2.addWeighted(orig_image, 0.6, hough_img, 0.8, 0)\n return cv2.addWeighted(road, 0.6, hough_img, 0.8, 0)\n\n\n # # apply mask to hsv image\n # road_hsv = cv2.bitwise_and(hsv, hsv, mask=mask)\n # # set lower and upper color limits\n # low_val = (0, 0, 102)\n # high_val = (179, 255, 255)\n # # Threshold the HSV image\n # mask2 = cv2.inRange(road_hsv, low_val, high_val)\n # # apply mask to original image\n # return cv2.bitwise_and(image, image, mask=mask2)\n\n\nclass HSVLineTU(HSVFilterMixin1, LaneGeneratorTUHough ):\n\n # good values for y are\n # (0, 175, 0) - (255,255,255)\n # (0, 175, 180) - (255,255,255) <- THIS IS CHOSEN\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._y_vec = [0, 175, 180, 255, 255, 255]\n\n\nclass HSVLineCU(HSVFilterMixin1, LaneGeneratorCUHough):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._y_vec = [0, 175, 180, 255, 255, 255]\n\n\nclass HSVLineTUSliders(YellowLineSlidersMixin, HSVLineTU):\n \"\"\" Yellow line but with sliders.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # add the sliders from Tkinter\n sliders_th = threading.Thread(target = lambda : self._sliders())\n sliders_th.start()\n\n\nclass HSVLineCUSliders(YellowLineSlidersMixin, HSVLineCU):\n \"\"\" Yellow line but with sliders.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # add the sliders from Tkinter\n sliders_th = threading.Thread(target = lambda : self._sliders())\n sliders_th.start()\n\n\n\n# examples\ndef example_2():\n # new_image_size = (590, 1640, 3)\n batch_size = 32\n train_percentage = 0.8\n\n\n train_generator = HSVLineTUSliders( BASE_TU\n , to_train = True\n , train_percentage = train_percentage\n , batch_size=batch_size\n , scale_img= 1.)\n\n train_generator.show_movie_cont()\n\n\ndef example_1():\n # new_image_size = (590, 1640, 3)\n scale_size = 1.\n batch_size = 32\n train_percentage = 0.8\n\n train_generator = HSVLineCUSliders( BASE_CU\n , to_train = True\n , train_percentage = train_percentage\n , batch_size=batch_size )\n\n train_generator.show_movie_cont()\n\n\nexample_2()\n" ]
[ [ "numpy.uint8", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lovaulonze/bulk_mater
[ "1c611861c1c9bd16cc2d2fd7a1b3f084d135391c" ]
[ "extract.py" ]
[ "import os, os.path\nimport numpy\n\ndef extract_to_txt(base_dir):\n for name in [\"polarizability_df.npz\",\n \"polarizability_tetra.npz\"]:\n fname = os.path.join(base_dir, name)\n if os.path.exists(fname):\n print(\"Found {}\".format(fname))\n f = numpy.load(fname)\n freq = f[\"frequencies\"]\n alphax = f[\"eps_x\"]\n alphay = f[\"eps_y\"]\n alphaz = f[\"eps_z\"]\n numpy.savetxt(os.path.join(base_dir,\n name.replace(\"_\", \"_x_imag_\").replace(\".npz\", \".txt\")),\n X=numpy.vstack([freq, alphax.imag]).T,\n header=\"Freq (eV); Imaginary alpha x (AA)\")\n numpy.savetxt(os.path.join(base_dir,\n name.replace(\"_\", \"_y_imag_\").replace(\".npz\", \".txt\")),\n X=numpy.vstack([freq, alphay.imag]).T,\n header=\"Freq (eV); Imaginary alpha y (AA)\")\n numpy.savetxt(os.path.join(base_dir,\n name.replace(\"_\", \"_z_imag_\").replace(\".npz\", \".txt\")),\n X=numpy.vstack([freq, alphaz.imag]).T,\n header=\"Freq (eV); Imaginary alpha z (AA)\")\n \n numpy.savetxt(os.path.join(base_dir,\n name.replace(\"_\", \"_x_real_\").replace(\".npz\", \".txt\")),\n X=numpy.vstack([freq, alphax.real]).T,\n header=\"Freq (eV); real alpha x (AA)\")\n numpy.savetxt(os.path.join(base_dir,\n name.replace(\"_\", \"_y_real_\").replace(\".npz\", \".txt\")),\n X=numpy.vstack([freq, alphay.real]).T,\n header=\"Freq (eV); real alpha y (AA)\")\n numpy.savetxt(os.path.join(base_dir,\n name.replace(\"_\", \"_z_real_\").replace(\".npz\", \".txt\")),\n X=numpy.vstack([freq, alphaz.real]).T,\n header=\"Freq (eV); Real alpha z (AA)\")\n f.close()\n\n\nif __name__ == \"__main__\":\n root_dir = \"/cluster/scratch/ttian/bulk/\"\n for directory in os.listdir(root_dir):\n base_dir = os.path.join(root_dir, directory)\n extract_to_txt(base_dir)\n \n" ]
[ [ "numpy.load", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dragonrobotics/2018-PowerUp
[ "0fb6be22420b1488ca3d6abb04588e8564d768b9" ]
[ "autonomous/fsm_auto.py" ]
[ "\"\"\"\nAutonomous module.\n\nThis module contains classes and functions for autonomous, which is designed as\na finite-state automaton updated per tick in autonomousPeriodic.\n\nAutonomous transitions between these states:\n\n - **init**: The robot closes the claw, fully lowers the lift, and\n transitions onto the **turn** state to angle toward the next waypoint.\n - **turn**: The swerve modules (not the entire chassis) angle toward the\n next waypoint, then transitions into the drive state\n - **drive**: The robot drives over to the next waypoint, then transitions\n into the turning state or the lifting state if there are no other\n waypoints.\n - **lift**: the RD4B lifts to a predetermined height (either the height of\n the scale or switch), then transitions to the target states.\n - **target-turn**: Turns the entire chassis towards the target (either the\n switch or the scale).\n - **target-drive**: Drives the robot towards the target. This differs\n slightly from the normal **drive** state because it uses sensors to be\n more accurate.\n - **drop**: The claw opens.\n\n\"\"\"\n\nimport math\nimport wpilib\nimport numpy as np\nfrom collections import deque\n\nstart_pos_left = np.array((21.25, 82.5))\nstart_pos_middle = np.array((21.25, 197))\nstart_pos_right = np.array((21.25, 263.5))\n\nleft_switch = np.array((168, 164-54))\nright_switch = np.array((168, 164+54))\n\nstaging_left = np.array((120, 48.5))\nstaging_mid = np.array((120, 164))\nstaging_right = np.array((120, 279.5))\n\nalign_pt_left = np.array((168, 48.5))\nalign_pt_right = np.array((168, 279.5))\n\n\nclass Autonomous:\n \"\"\"\n This class implements a finite-state automaton for controlling autonomous.\n Also, this class includes a way to input waypoint coordinates into a\n a template to be automatically selected and formatted as necessary.\n\n Parameters:\n robot: the robot instance.\n robot_position: the position of the robot on the field.\n\n \"\"\"\n\n #: Dictionary of paths (arrays of waypoints). The right one is chosen\n #: at runtime.\n PATHS = {\n \"l-drive-left\": [\n start_pos_left,\n staging_left,\n align_pt_left\n ],\n\n \"l-drive-right\": [\n start_pos_right,\n staging_right,\n align_pt_right\n ],\n\n \"direct-left\": [\n staging_left,\n align_pt_left\n ],\n\n \"direct-right\": [\n staging_right,\n align_pt_right\n ],\n\n \"baseline-left\": [\n staging_left\n ],\n\n \"baseline-mid\": [\n staging_mid\n ],\n\n \"baseline-right\": [\n staging_right\n ]\n }\n\n ##################################################################\n # Internal code starts here.\n ##################################################################\n\n turn_angle_tolerance = math.radians(2.5) #: a tolerance range for turning\n drive_dist_tolerance = 3 #: a tolerance range for driving, in inches.\n lift_height_tolerance = 2 #: a tolerance range for lifting, in inches.\n drive_speed = 100 #: how fast to drive, in native units per 100ms\n init_lift_height = 6 #: initial lift height, in inches above the ground.\n\n def __init__(self, robot, robot_position):\n \"\"\"\n Initialize autonomous.\n\n This constructor mainly initializes the software.\n The correct path is chosen from SmartDashboard and initialized into\n numpy-based waypoints. The current position is set. Then, the state\n is set to 'init' and physical initializations are done there.\n \"\"\"\n\n # basic initalization.\n self.robot = robot\n self.target = None\n self.target_height = 36 # inches -- set this according to target\n self.final_drive_dist = 12 # inches\n\n self.robot.drivetrain.reset_drive_position()\n self.robot.imu.reset()\n\n # get preferences and the field string from the Game Data API.\n ds = wpilib.DriverStation.getInstance()\n field_string = ds.getGameSpecificMessage().upper()\n\n if field_string == \"\": # this only happens during tests\n field_string = 'LLL'\n\n if field_string[0] == 'L':\n self.target = left_switch\n self.init_turn_angle = math.radians(270)\n\n self.waypoints = self.PATHS['direct-left']\n else:\n self.target = right_switch\n self.init_turn_angle = math.radians(90)\n\n self.waypoints = self.PATHS['direct-right']\n\n # set current position. TODO: implement.\n self.current_pos = np.array([0, 0])\n\n if robot_position == 'Left':\n self.current_pos = start_pos_left\n elif robot_position == 'Middle':\n self.current_pos = start_pos_middle\n elif robot_position == 'Right':\n self.current_pos = start_pos_right\n\n # active waypoint: the waypoint we are currently headed towards.\n self.active_waypoint_idx = 0\n\n self.state = 'init'\n self.__module_angle_err_window = deque([], 30)\n\n self.hack_timer = wpilib.Timer()\n self.hack_timer_started = False\n\n def state_init(self):\n \"\"\"\n Perform robot-oriented initializations.\n\n Close the claw and set the lift to its lowest position, then transition\n into the turning state.\n \"\"\"\n self.robot.drivetrain.set_all_module_angles(0)\n\n if not self.hack_timer_started:\n self.hack_timer.reset()\n self.hack_timer.start()\n self.hack_timer_started = True\n else:\n #\n init_time = self.hack_timer.get()\n if init_time < 0.5:\n self.robot.lift.setLiftPower(-0.6)\n self.robot.claw.set_power(1)\n self.robot.drivetrain.set_all_module_speeds(150, True)\n elif init_time < 1:\n self.robot.lift.setLiftPower(0)\n self.robot.claw.set_power(0)\n self.robot.drivetrain.set_all_module_speeds(200, True)\n elif init_time < 1.5:\n self.robot.drivetrain.set_all_module_speeds(-200, True)\n elif self.hack_timer.get() > 1.5:\n self.robot.drivetrain.set_all_module_speeds(0, True)\n self.robot.claw.set_power(0)\n\n self.hack_timer_started = False\n\n # Get distance driven forwards during the unfold maneuver\n dist = np.mean(self.robot.drivetrain.get_module_distances())\n dist *= (4 * math.pi) / (80 * 6.67)\n self.current_pos[0] += dist\n\n self.state = 'init-turn'\n\n def state_init_turn(self):\n self.robot.drivetrain.drive(0, 0, 0.1)\n hdg = self.robot.imu.get_robot_heading()\n\n # if we are at the proper angle now, move to the target-drive state.\n if abs(hdg - self.init_turn_angle) <= self.turn_angle_tolerance:\n self.robot.drivetrain.set_all_module_speeds(0, True)\n self.robot.drivetrain.reset_drive_position()\n self.state = 'turn'\n\n def state_turn(self):\n \"\"\"\n Turn the swerve modules to their desired angle.\n Then transition into the `drive` state.\n \"\"\"\n\n # stop the drivetrain. Otherwise the robot will do weird curves.\n self.robot.drivetrain.set_all_module_speeds(0, direct=True)\n\n # get the active waypoint, and from there calculate the displacement\n # vector relative to the robot's position.\n active_waypoint = self.waypoints[self.active_waypoint_idx]\n disp_vec = active_waypoint - self.current_pos\n\n # trigonometry to find the angle, then set the module angles.\n tgt_angle = np.arctan2(disp_vec[1], disp_vec[0])\n tgt_angle -= self.robot.imu.get_robot_heading()\n\n self.robot.drivetrain.set_all_module_angles(tgt_angle)\n self.robot.drivetrain.set_all_module_speeds(0, direct=True)\n\n cur_error = np.array(\n self.robot.drivetrain.get_closed_loop_error(),\n dtype=np.float64\n )\n\n cur_error *= 180 / 512\n\n max_err = np.amax(np.abs(cur_error))\n self.__module_angle_err_window.append(max_err)\n avg_max_err = np.mean(self.__module_angle_err_window)\n\n if (\n len(self.__module_angle_err_window) >= self.__module_angle_err_window.maxlen # noqa: E501\n and avg_max_err < self.turn_angle_tolerance\n ):\n self.robot.drivetrain.reset_drive_position()\n self.state = 'drive'\n\n def state_drive(self):\n \"\"\"\n Drive toward a waypoint.\n Calculate the distance, then move the modules that distance.\n \"\"\"\n\n # get active waypoint and calculate displacement vector.\n active_waypoint = self.waypoints[self.active_waypoint_idx]\n disp_vec = active_waypoint - self.current_pos\n\n # calculate distance with pythagorean theorem\n dist = np.sqrt(np.sum(disp_vec**2))\n\n tgt_angle = np.arctan2(disp_vec[1], disp_vec[0])\n tgt_angle -= self.robot.imu.get_robot_heading()\n self.robot.drivetrain.set_all_module_angles(tgt_angle)\n\n # get the average distance the robot has gone so far\n avg_dist = np.mean(self.robot.drivetrain.get_module_distances())\n avg_dist *= (4 * math.pi) / (80 * 6.67)\n self.robot.drivetrain.set_all_module_speeds(self.drive_speed, True)\n\n # is the distance traveled somewhere close to the distance needed to\n # travel?\n if abs(avg_dist - dist) <= self.drive_dist_tolerance:\n\n # do we have waypoints left to drive to?\n self.active_waypoint_idx += 1\n\n # update the new current position as the active waypoint.\n self.current_pos = active_waypoint\n\n # do we still have waypoints left to go?\n if self.active_waypoint_idx < len(self.waypoints):\n self.__module_angle_err_window.clear()\n self.robot.drivetrain.reset_drive_position()\n self.state = 'turn'\n else:\n self.state = 'target-turn'\n\n def state_lift(self):\n \"\"\"\n Lift the RD4B to the height needed.\n Then transition to the target states for final adjustments.\n \"\"\"\n\n # stop the drivetrain.\n self.robot.drivetrain.set_all_module_speeds(0, True)\n\n if not self.hack_timer_started:\n self.hack_timer.reset()\n self.hack_timer.start()\n self.hack_timer_started = True\n else:\n if self.hack_timer.get() < 1.5:\n self.robot.lift.setLiftPower(-0.6)\n else:\n self.robot.lift.setLiftPower(0)\n self.state = 'target-turn'\n\n def state_target_turn(self):\n \"\"\"\n Complete a targeted, measured turn towards the target, turning the\n entire chassis this time instead of just each module.\n \"\"\"\n\n # the usual displacement stuff.\n disp_vec = self.target - self.current_pos\n tgt_angle = np.arctan2(disp_vec[1], disp_vec[0])\n\n # we are actually going to turn the whole chassis this time, using the\n # navx to ensure we are doing things correctly.\n self.robot.drivetrain.drive(0, 0, 0.1)\n hdg = self.robot.imu.get_robot_heading()\n\n # if we are at the proper angle now, move to the target-drive state.\n if abs(hdg - tgt_angle) <= self.turn_angle_tolerance:\n self.robot.drivetrain.set_all_module_speeds(0, True)\n self.robot.drivetrain.reset_drive_position()\n self.state = 'target-drive'\n\n def state_target_drive(self):\n \"\"\"\n Drive a measured distance towards the target.\n This should position the claw, with the cube, right over the switch or\n scale.\n \"\"\"\n\n avg_dist = np.mean(self.robot.drivetrain.get_module_distances())\n avg_dist *= (4 * math.pi) / (80 * 6.67)\n\n self.robot.drivetrain.set_all_module_angles(0)\n self.robot.drivetrain.set_all_module_speeds(self.drive_speed, True)\n\n if abs(avg_dist - self.final_drive_dist) <= self.drive_dist_tolerance:\n self.state = 'drop'\n\n def state_drop(self):\n \"\"\"\n Open the claw and drop the cube.\n \"\"\"\n self.robot.drivetrain.set_all_module_speeds(0, True)\n self.robot.claw.open()\n\n #: Maps state names to functions.\n state_table = {\n 'init': state_init,\n 'init-turn': state_init_turn,\n 'turn': state_turn,\n 'drive': state_drive,\n 'lift': state_lift,\n 'target-turn': state_target_turn,\n 'target-drive': state_target_drive,\n 'drop': state_drop\n }\n\n def periodic(self):\n \"\"\"\n Updates and progresses the autonomous state machine.\n \"\"\"\n # Call function corresponding to current state.\n self.state_table[self.state](self)\n\n def update_smart_dashboard(self):\n \"\"\"\n Periodically call this function to update the smartdashboard with\n important information about the autonomous class, for troubleshooting/\n monitoring purposes.\n \"\"\"\n\n wpilib.SmartDashboard.putString(\n 'autonomous state',\n self.state\n )\n\n wpilib.SmartDashboard.putString(\n 'Current Auto Position',\n str(self.current_pos)\n )\n\n if self.active_waypoint_idx < len(self.waypoints):\n active_waypoint = self.waypoints[self.active_waypoint_idx]\n wpilib.SmartDashboard.putString(\n 'Active Waypoint',\n str(active_waypoint)\n )\n" ]
[ [ "numpy.abs", "numpy.arctan2", "numpy.mean", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aws-samples/aws-photo-to-sketch
[ "e242f021f2a0d2ee840cb476b2ec63b132652807" ]
[ "src/src/predictor.py" ]
[ "# This file implements a flask server for inference. You can modify the file to align with your own inference logic.\nfrom __future__ import print_function\n\nimport io\nimport json\nimport os\nimport pickle\nimport signal\nimport sys\nimport traceback\n\nimport flask\nfrom flask import request\n\nimport tensorflow_hub as hub\n\nimport boto3\nimport base64\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom PIL import Image\nimport numpy as np\nfrom io import BytesIO\nimport json\n\nimport pandas as pd\n\nprefix = \"/opt/ml\"\nmodel_path = os.path.join(prefix, \"model\")\n\ndef load_img(image_object):\n \"\"\"\n Load image from Amazon S3 bucket and processed it.\n \"\"\"\n max_dim = 512\n img = tf.keras.preprocessing.image.img_to_array(image_object)\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n shape = tf.cast(tf.shape(img)[:-1], tf.float32)\n long_dim = max(shape)\n scale = max_dim / long_dim\n new_shape = tf.cast(shape * scale, tf.int32)\n img = tf.image.resize(img, new_shape)\n img = img[tf.newaxis, :] / 255\n return img\n\n\ndef read_image_from_s3(bucket_name, key):\n \"\"\"S3 to PIL Image\"\"\"\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(bucket_name)\n object = bucket.Object(key)\n response = object.get()\n return Image.open(response['Body'])\n\ndef tensor_to_image(tensor):\n \"\"\"\n Transform tensor to image.\n \"\"\"\n tensor = tensor * 255\n tensor = np.array(tensor, dtype=np.uint8)\n if np.ndim(tensor) > 3:\n assert tensor.shape[0] == 1\n tensor = tensor[0]\n return Image.fromarray(tensor)\n\nclass TensorflowService(object):\n model = None # Where we keep the model when it's loaded\n\n @classmethod\n def get_model(cls):\n \"\"\"Get the model object for this instance, loading it if it's not already loaded.\"\"\"\n if cls.model == None:\n # Load ML Model from TensorflowHub \n cls.model = hub.load(model_path)\n print(\"Tensorflow hub model loaded!\")\n return cls.model\n\n @classmethod\n def predict(cls, content_image, style_image):\n \"\"\"For the input, do the predictions and return them.\n Args:\n input (a pandas dataframe): The data on which to do the predictions. There will be\n one prediction per row in the dataframe\"\"\"\n clf = cls.get_model()\n return clf(tf.constant(content_image), tf.constant(style_image))[0]\n \n# The flask app for serving predictions\napp = flask.Flask(__name__)\n\n\[email protected](\"/ping\", methods=[\"GET\"])\ndef ping():\n \"\"\"Determine if the container is working and healthy.\n In this sample container, we declare\n it healthy if we can load the model successfully.\"\"\"\n\n health = TensorflowService.get_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response=\"\\n\", status=status, mimetype=\"application/json\")\n\n\[email protected](\"/invocations\", methods=[\"POST\"])\ndef inference():\n \"\"\"Performed an inference on incoming data.\n In this sample server, we take data as application/json,\n print it out to confirm that the server received it.\n \"\"\"\n content_type = flask.request.content_type\n if flask.request.content_type != \"application/json\":\n msg = \"I just take json, and I am fed with {}\".format(content_type)\n else:\n msg = \"I am fed with json. Therefore, I am happy\"\n\n \n data = flask.request.data.decode(\"utf-8\")\n data = io.StringIO(data)\n data = json.loads(data.read())\n \n account_id = boto3.client(\"sts\").get_caller_identity()[\"Account\"]\n region = boto3.Session().region_name\n \n bucket_name = f\"photo-to-sketch-{account_id}\"\n dict_style = {\"1\":\"style/1.jpeg\",\"2\":\"style/2.jpeg\",\"3\":\"style/3.jpeg\",\"4\":\"style/4.jpeg\"}\n effect_type = dict_style[data[\"effectType\"]]\n \n #Style image\n style_image_object = read_image_from_s3(bucket_name, effect_type)\n style_image = load_img(style_image_object)\n print(\"Style image loaded!\")\n \n # Content image\n input_image = data['image']\n content_image_object = Image.open(BytesIO(base64.b64decode(input_image)))\n content_image = load_img(content_image_object)\n print(\"Content image loaded!\")\n \n stylized_image = TensorflowService.predict(content_image,style_image)\n stylized_image = tensor_to_image(stylized_image) \n \n #Encode the response to base64 \n buffered = BytesIO()\n stylized_image.save(buffered, format=\"JPEG\")\n img_str = base64.b64encode(buffered.getvalue())\n print(\"Stylized image generated!\")\n \n return flask.Response(\n response=json.dumps({\"image\": img_str.decode(\"utf-8\")}),\n status=200,\n mimetype=\"text/plain\",\n )" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.constant", "tensorflow.shape", "tensorflow.cast", "numpy.ndim", "tensorflow.image.resize", "numpy.array", "tensorflow.keras.preprocessing.image.img_to_array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
Asadullah-Dal17/learn-opencv-python
[ "2892e1b253f1c8977662148a8721d8efb7bd63b6" ]
[ "source-code/Basic stuff/test_installation.py" ]
[ "import cv2 as cv \nimport numpy as np\n\nprint(f'opencv_version: {cv.__version__}')\n\nimg = np.zeros((500,500, 3), dtype=np.uint8)\ncv.putText(img, f'version: {cv.__version__}', (100, 250), cv.FONT_HERSHEY_PLAIN, 2.4, (0,255,0), 2, cv.LINE_AA)\ncv.imshow('img', img)\ncv.waitKey(0)\ncv.destroyAllWindows()" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
csjunxu/Open3D
[ "ef042a6c50e54c4bb56ccb4657a745c9b38c1b7f" ]
[ "src/Python/Tutorial/Basic/pointcloud.py" ]
[ "# Open3D: www.open3d.org\n# The MIT License (MIT)\n# See license file or visit www.open3d.org for details\n\nimport numpy as np\nfrom open3d import *\n\nif __name__ == \"__main__\":\n\n print(\"Load a ply point cloud, print it, and render it\")\n pcd = read_point_cloud(\"../../TestData/fragment.ply\")\n print(pcd)\n print(np.asarray(pcd.points))\n draw_geometries([pcd])\n\n print(\"Downsample the point cloud with a voxel of 0.05\")\n downpcd = voxel_down_sample(pcd, voxel_size = 0.05)\n draw_geometries([downpcd])\n\n print(\"Recompute the normal of the downsampled point cloud\")\n estimate_normals(downpcd, search_param = KDTreeSearchParamHybrid(\n radius = 0.1, max_nn = 30))\n draw_geometries([downpcd])\n\n print(\"Print a normal vector of the 0th point\")\n print(downpcd.normals[0])\n print(\"Print the normal vectors of the first 10 points\")\n print(np.asarray(downpcd.normals)[:10,:])\n print(\"\")\n\n print(\"Load a polygon volume and use it to crop the original point cloud\")\n vol = read_selection_polygon_volume(\"../../TestData/Crop/cropped.json\")\n chair = vol.crop_point_cloud(pcd)\n draw_geometries([chair])\n print(\"\")\n\n print(\"Paint chair\")\n chair.paint_uniform_color([1, 0.706, 0])\n draw_geometries([chair])\n print(\"\")\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jeanmichelscherer/mef90
[ "48b9b7d8bdaccb846a76833853f6ea81ce6fc9b1" ]
[ "python/pymef90/mesh/mef90ABAQUS.py" ]
[ "import numpy as np\nimport io\n\ndef ABAQUSread(filename):\n '''\n Import an abaqus file\n '''\n def ABAQUScellType(cellType):\n if cellType.upper() in (\"B21\", \"B21H\", \"T2D2\"):\n return \"BAR2\"\n elif cellType.upper() in (\"S3R\",\"STRI3\",\"CPE3\", \"CPE3R\", \"CPE3S\", \"CPS3\", \"CPS3R\", \"CPS3S\"):\n return \"TRI3\"\n elif cellType.upper() in (\"CPE6\", \"CPE6R\", \"CPE6S\", \"CPS6\", \"CPS6R\", \"CPS6S\"):\n return \"TRI6\"\n elif cellType.upper() in (\"S4R\", \"CPE4\", \"CPE4S\", \"CPE4R\", \"CPS4\", \"CPS4S\", \"CPS4R\"):\n return \"QUAD4\"\n elif cellType.upper() in (\"CPE8\", \"CPE8S\", \"CPE8R\", \"CPS8\", \"CPS8S\", \"CPS8R\"):\n return \"QUAD8\"\n elif cellType.upper() in (\"C3D4\"):\n return \"TETRA4\"\n elif cellType.upper() in (\"C3D10\", \"C3D10R\", \"C3D10S\"):\n return \"TETRA10\"\n elif cellType.upper() in (\"C3D8\", \"C3D8R\", \"C3D8S\"):\n return \"HEX8\"\n elif cellType.upper() in (\"C3D20\", \"C3D20R\", \"C3D20S\"):\n return \"HEX20\"\n\n def ABAQUSreadCoords(f,line):\n #creating and filling list of coordinates from abaqus file\n coord = []\n line = f.readline()\n #skip lines describing nodeset, and coords list ends on '**'\n while not line.startswith('*'):\n coord.append([float(x) for x in line.strip().split(\", \")[1:]])\n line = f.readline()\n coord = np.array(coord) #making coord list into numpy array\n if max(coord[:,2]) == min(coord[:,2]):\n numDim = 2\n else:\n numDim = 3\n return coord,numDim,line\n \n def ABAQUSreadELSET(f,cellSet,line):\n #creating and filling cellSet dictionary\n #get rid of word '*ELEMENT'\n for s in line.split(\",\")[1:]:\n if s.upper().startswith('TYPE'):\n cellType = s.split('=')[1].strip()\n if s.upper().startswith('ELSET'):\n setName = s.split('=')[1].strip()\n else:\n setName = ''\n\n usedID = list(cellSet.keys())+[0,]\n try:\n setID = int(setName)\n if setID in usedID:\n setID = max(usedID)+1 \n except ValueError:\n setID = max(usedID)+1\n\n line = f.readline() #continue to actual data \n while not line.startswith('*') and not line == '':\n if line.strip().split(\",\")[len(line.strip().split(\",\")[0:])-1] == '':\n cellConnect = [int(x)-1 for x in line.strip().split(\",\")[1:len(line.strip().split(\",\"))-1]]\n line = f.readline()\n if line.strip().split(\",\")[len(line.strip().split(\",\")[0:])-1] == '':\n tmp = [int(x)-1 for x in line.strip().split(\",\")[0:len(line.strip().split(\",\"))-1]]\n for i in range(len(tmp)):\n cellConnect.append(tmp[i])\n line = f.readline()\n tmp = [int(x)-1 for x in line.strip().split(\",\")[0:]]\n for i in range(len(tmp)):\n cellConnect.append(tmp[i])\n else:\n tmp = [int(x)-1 for x in line.strip().split(\",\")[0:]]\n for i in range(len(tmp)):\n cellConnect.append(tmp[i])\n else:\n cellConnect = [int(x)-1 for x in line.strip().split(\",\")[1:]] \n ### create cell sets:\n line = f.readline() \n cellSet[setID] = {}\n cellSet[setID]['connect'] = []\n cellSet[setID]['elemType'] = ABAQUScellType(cellType)\n cellSet[setID]['name'] = setName.strip()\n print(\"Assigning ID {0:4d}, type {2:s} to ELSET {1:s}. mef90/vDef name will be cs{0:04}\".format(setID,setName,cellSet[setID]['elemType']))\n cellSet[setID]['connect'] += cellConnect #reordering and adding to array\n return cellSet,line\n\n def ABAQUSreadNSET(f,vertexSet,line):\n setName = line.strip().split(\",\")[1]\n setName = setName.split('=')[1]\n usedID = list(vertexSet.keys())+[0,]\n try:\n setID = int(setName)\n if setID in usedID:\n setID = max(usedID)+1 \n except ValueError:\n setID = max(usedID)+1\n vertexSet[setID] = {}\n vertexSet[setID]['name'] = setName\n vertexSet[setID]['vertex'] = []\n\n #continue to next line which has actual node numbers\n line = f.readline()\n #add nodes to list\n #sometimes the node set will end with '**', and the next one will begin right after\n while not line.startswith('*') or line == '':\n #this condition is for reading lines which have a comma at the end\n #splitting at \",\" will return an additional string with nothing in it\n if line.strip().split(\",\")[len(line.strip().split(\",\")[0:])-1] == '':\n vertexSet[setID]['vertex'] += [int(x)-1 for x in line.strip().split(\",\")[0:len(line.strip().split(\",\"))-1]]\n else:\n vertexSet[setID]['vertex'] += [int(x)-1 for x in line.strip().split(\",\")[0:]]\n #move onto next line\n line = f.readline()\n return vertexSet,line\n\n # Opening and reading abaqus file\n f = io.open(filename, 'r') \n line = f.readline()\n cellSet = {}\n vertexSet = {}\n while line != '':\n if line.upper().startswith('*NODE'):\n (coord,numDim,line) = ABAQUSreadCoords(f,line)\n elif line.upper().startswith('*ELEM'):\n (cellSet,line) = ABAQUSreadELSET(f,cellSet,line)\n elif line.upper().startswith('*NSET'):\n (vertexSet,line) = ABAQUSreadNSET(f,vertexSet,line)\n else:\n line = f.readline()\n f.close()\n return coord,vertexSet,cellSet,numDim\n \n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
simran-arora/emmental-tutorials
[ "d5b82140f3826b6d90156b20d9c6731b7fe07e8c" ]
[ "data_augmentation/eda/image/modules/soft_cross_entropy_loss.py" ]
[ "from typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\nclass SoftCrossEntropyLoss(nn.Module):\n \"\"\"\n Calculate the CrossEntropyLoss with soft targets\n :param weight: Weight to assign to each of the classes. Default: None\n :type weight: list of float\n :param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.\n 'none': no reduction,\n 'mean': the mean of the losses,\n 'sum': the sum of the losses.\n :type reduction: str\n \"\"\"\n\n def __init__(self, weight: List[float] = None, reduction: str = \"mean\"):\n super().__init__()\n if weight is None:\n self.weight = None\n else:\n self.register_buffer(\"weight\", torch.Tensor(weight))\n\n self.reduction = reduction\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor: # type:ignore\n \"\"\"\n Calculate the loss\n :param input: prediction logits\n :param target: target probabilities\n :return: loss\n \"\"\"\n\n n, k = input.shape\n losses = input.new_zeros(n)\n\n for i in range(k):\n cls_idx = input.new_full((n,), i, dtype=torch.long)\n loss = F.cross_entropy(input, cls_idx, reduction=\"none\")\n if self.weight is not None:\n loss = loss * self.weight[i]\n losses += target[:, i].float() * loss\n\n if self.reduction == \"mean\":\n losses = losses.mean()\n elif self.reduction == \"sum\":\n losses = losses.sum()\n elif self.reduction != \"none\":\n raise ValueError(f\"Unrecognized reduction: {self.reduction}\")\n\n return losses\n" ]
[ [ "torch.nn.functional.cross_entropy", "torch.Tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BruceResearch/BiTr-Unet
[ "d1f5ad5df7ff5e65c7797bfafd51a782f6114af3" ]
[ "postprocess/dataset.py" ]
[ "# Modified from @github: https://github.com/shu-hai/two-stage-VAE-Attention-gate-BraTS2020\n\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport os\nimport numpy as np\n\nnp.random.seed(0)\n\nimport random\n\nrandom.seed(0)\n\n\n# from random import sample\n\n\ndef validation_sampling(data_list, test_size=0.2):\n n = len(data_list)\n m = int(n * test_size)\n val_items = random.sample(data_list, m)\n tr_items = list(set(data_list) - set(val_items))\n return tr_items, val_items\n\n\ndef random_intensity_shift(imgs_array, brain_mask, limit=0.1):\n \"\"\"\n Only do intensity shift on brain voxels\n :param imgs_array: The whole input image with shape of (4, 155, 240, 240)\n :param brain_mask:\n :param limit:\n :return:\n \"\"\"\n\n shift_range = 2 * limit\n for i in range(len(imgs_array) - 1):\n factor = -limit + shift_range * np.random.random()\n std = imgs_array[i][brain_mask].std()\n imgs_array[i][brain_mask] = imgs_array[i][brain_mask] + factor * std\n return imgs_array\n\n\ndef random_scale(imgs_array, brain_mask, scale_limits=(0.9, 1.1)):\n \"\"\"\n Only do random_scale on brain voxels\n :param imgs_array: The whole input image with shape of (4, 155, 240, 240)\n :param scale_limits:\n :return:\n \"\"\"\n scale_range = scale_limits[1] - scale_limits[0]\n for i in range(len(imgs_array) - 1):\n factor = scale_limits[0] + scale_range * np.random.random()\n imgs_array[i][brain_mask] = imgs_array[i][brain_mask] * factor\n return imgs_array\n\n\ndef random_mirror_flip(imgs_array, prob=0.5):\n \"\"\"\n Perform flip along each axis with the given probability; Do it for all voxels;\n labels should also be flipped along the same axis.\n :param imgs_array:\n :param prob:\n :return:\n \"\"\"\n for axis in range(1, len(imgs_array.shape)):\n random_num = np.random.random()\n if random_num >= prob:\n if axis == 1:\n imgs_array = imgs_array[:, ::-1, :, :]\n if axis == 2:\n imgs_array = imgs_array[:, :, ::-1, :]\n if axis == 3:\n imgs_array = imgs_array[:, :, :, ::-1]\n return imgs_array\n\n\ndef random_crop(imgs_array, crop_size=(128, 192, 160), lower_limit=(0, 32, 40)):\n \"\"\"\n crop the image ((155, 240, 240) for brats data) into the crop_size\n the random area is now limited at (0:155, 32:224, 40:200), by default\n :param imgs_array:\n :param crop_size:\n :return:\n \"\"\"\n orig_shape = np.array(imgs_array.shape[1:])\n crop_shape = np.array(crop_size)\n # ranges = np.array(orig_shape - crop_shape, dtype=np.uint8)\n # lower_limits = np.random.randint(np.array(ranges))\n lower_limit_z = np.random.randint(lower_limit[0], 155 - crop_size[0])\n if crop_size[1] < 192:\n lower_limit_y = np.random.randint(lower_limit[1], 224 - crop_size[1])\n else:\n lower_limit_y = np.random.randint(0, 240 - crop_size[1])\n if crop_size[2] < 160:\n lower_limit_x = np.random.randint(lower_limit[2], 200 - crop_size[2])\n else:\n lower_limit_x = np.random.randint(0, 240 - crop_size[2])\n lower_limits = np.array((lower_limit_z, lower_limit_y, lower_limit_x))\n upper_limits = lower_limits + crop_shape\n imgs_array = imgs_array[:, lower_limits[0]: upper_limits[0],\n lower_limits[1]: upper_limits[1], lower_limits[2]: upper_limits[2]]\n return imgs_array\n\n\ndef validation_time_crop(imgs_array, crop_size=(128, 192, 160)):\n \"\"\"\n crop the image ((155, 240, 240) for brats data) into the crop_size\n :param imgs_array:\n :param crop_size:\n :return:\n \"\"\"\n orig_shape = np.array(imgs_array.shape[1:])\n crop_shape = np.array(crop_size)\n lower_limit_z = np.random.randint(orig_shape[0] - crop_size[0])\n center_y = 128\n center_x = 120\n lower_limit_y = center_y - crop_size[-2] // 2 # (128, 160, 128) (?, 48, 56)\n lower_limit_x = center_x - crop_size[-1] // 2 # (128, 192, 160) (?, 32, 40)\n lower_limits = np.array((lower_limit_z, lower_limit_y, lower_limit_x))\n\n upper_limits = lower_limits + crop_shape\n\n imgs_array = imgs_array[:, lower_limits[0]: upper_limits[0],\n lower_limits[1]: upper_limits[1], lower_limits[2]: upper_limits[2]]\n return imgs_array\n\n\ndef test_time_crop(imgs_array, crop_size=(144, 192, 160)):\n \"\"\"\n crop the test image around the center; default crop_zise change from (128, 192, 160) to (144, 192, 160)\n :param imgs_array:\n :param crop_size:\n :return: image with the size of crop_size\n \"\"\"\n orig_shape = np.array(imgs_array.shape[1:])\n crop_shape = np.array(crop_size)\n center = orig_shape // 2\n lower_limits = center - crop_shape // 2 # (13, 24, 40) (5, 24, 40)\n upper_limits = center + crop_shape // 2 # (141, 216, 200) (149, 216, 200)\n # upper_limits = lower_limits + crop_shape\n imgs_array = imgs_array[:, lower_limits[0]: upper_limits[0],\n lower_limits[1]: upper_limits[1], lower_limits[2]: upper_limits[2]]\n return imgs_array\n\n\ndef test_time_flip(imgs_array, tta_idx):\n if tta_idx == 0: # [0, 0, 0]\n return imgs_array\n if tta_idx == 1: # [1, 0, 0]\n return imgs_array[:, ::-1, :, :]\n if tta_idx == 2: # [0, 1, 0]\n return imgs_array[:, :, ::-1, :]\n if tta_idx == 3: # [0, 0, 1]\n return imgs_array[:, :, :, ::-1]\n if tta_idx == 4: # [1, 1, 0]\n return imgs_array[:, ::-1, ::-1, :]\n if tta_idx == 5: # [1, 0, 1]\n return imgs_array[:, ::-1, :, ::-1]\n if tta_idx == 6: # [0, 1, 1]\n return imgs_array[:, :, ::-1, ::-1]\n if tta_idx == 7: # [1, 1, 1]\n return imgs_array[:, ::-1, ::-1, ::-1]\n\n\ndef preprocess_label(img, single_label=None):\n \"\"\"\n Separates out the 3 labels from the segmentation provided, namely:\n GD-enhancing tumor (ET — label 4), the peritumoral edema (ED — label 2))\n and the necrotic and non-enhancing tumor core (NCR/NET — label 1)\n \"\"\"\n\n ncr = img == 1 # Necrotic and Non-Enhancing Tumor (NCR/NET) - orange\n ed = img == 2 # Peritumoral Edema (ED) - yellow\n et = img == 4 # GD-enhancing Tumor (ET) - blue\n if not single_label:\n # return np.array([ncr, ed, et], dtype=np.uint8)\n return np.array([ed, ncr, et], dtype=np.uint8)\n elif single_label == \"WT\":\n img[ed] = 1\n img[et] = 1\n elif single_label == \"TC\":\n img[ncr] = 0\n img[ed] = 1\n img[et] = 1\n elif single_label == \"ET\":\n img[ncr] = 0\n img[ed] = 0\n img[et] = 1\n else:\n raise RuntimeError(\"the 'single_label' type must be one of WT, TC, ET, and None\")\n return img[np.newaxis, :]\n\n\nclass BratsDataset(Dataset):\n def __init__(self, phase, config):\n super(BratsDataset, self).__init__()\n\n self.config = config\n self.phase = phase\n self.input_shape = config[\"input_shape\"]\n self.data_path = config[\"data_path\"]\n self.seg_label = config[\"seg_label\"]\n self.intensity_shift = config[\"intensity_shift\"]\n self.scale = config[\"scale\"]\n self.flip = config[\"flip\"]\n\n if phase == \"train\":\n self.patient_names = config[\"training_patients\"] # [:4]\n elif phase == \"validate\" or phase == \"evaluation\":\n self.patient_names = config[\"validation_patients\"] # [:2]\n elif phase == \"test\":\n self.test_path = config[\"test_path\"]\n self.patient_names = config[\"test_patients\"]\n self.tta_idx = config[\"tta_idx\"]\n\n def __getitem__(self, index):\n patient = self.patient_names[index]\n self.file_path = os.path.join(self.data_path, 'npy', patient + \".npy\")\n if self.phase == \"test\":\n self.file_path = os.path.join(self.test_path, 'npy', patient + \".npy\")\n imgs_npy = np.load(self.file_path)\n\n if self.phase == \"train\":\n nonzero_masks = [i != 0 for i in imgs_npy[:-1]]\n brain_mask = np.zeros(imgs_npy.shape[1:], dtype=bool)\n for chl in range(len(nonzero_masks)):\n brain_mask = brain_mask | nonzero_masks[chl] # (155, 240, 240)\n # data augmentation\n cur_image_with_label = imgs_npy.copy()\n cur_image = cur_image_with_label[:-1]\n if self.intensity_shift:\n cur_image = random_intensity_shift(cur_image, brain_mask)\n if self.scale:\n cur_image = random_scale(cur_image, brain_mask)\n\n cur_image_with_label[:-1] = cur_image\n cur_image_with_label = random_crop(cur_image_with_label, crop_size=self.input_shape[2:])\n\n if self.flip: # flip should be performed with labels\n cur_image_with_label = random_mirror_flip(cur_image_with_label)\n\n elif self.phase == \"validate\":\n # cur_image_with_label = validation_time_crop(imgs_npy)\n cur_image_with_label = validation_time_crop(imgs_npy, crop_size=self.input_shape[2:])\n\n elif self.phase == \"evaluation\":\n cur_image_with_label = imgs_npy.copy()\n\n if self.phase == \"validate\" or self.phase == \"train\" or self.phase == \"evaluation\":\n inp_data = cur_image_with_label[:-1]\n seg_label = preprocess_label(cur_image_with_label[-1], self.seg_label)\n if self.config[\"VAE_enable\"]:\n final_label = np.concatenate((seg_label, inp_data), axis=0)\n else:\n final_label = seg_label\n\n return np.array(inp_data), np.array(final_label)\n\n elif self.phase == \"test\":\n imgs_npy = test_time_crop(imgs_npy)\n if self.config[\"predict_from_train_data\"]:\n imgs_npy = imgs_npy[:-1]\n imgs_npy = test_time_flip(imgs_npy, self.tta_idx)\n # np.save(\"../test_time_crop/{}.npy\".format(str(index)), imgs_npy)\n # only use when doing inference for training-data\n # imgs_npy = imgs_npy[:4, :, :, :]\n return np.array(imgs_npy)\n\n # np.array() solve the problem of \"ValueError: some of the strides of a given numpy array are negative\"\n\n def __len__(self):\n return len(self.patient_names)" ]
[ [ "numpy.random.random", "numpy.random.seed", "numpy.concatenate", "numpy.load", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
reonho/capstone21
[ "3c3fd09dbfa727bc69949024b0e5a74308c98fa3" ]
[ "python_backend/triton_client/tao_triton/python/postprocessing/detectnet_processor.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n# \n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n# \n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Simple class to run post processing of Detectnet-v2 Triton Inference outputs.\"\"\"\n\nimport os\n\nimport numpy as np\nfrom sklearn.cluster import DBSCAN as dbscan\nfrom google.protobuf.text_format import Merge as merge_text_proto\n\nfrom tao_triton.python.postprocessing.postprocessor import Postprocessor\nimport tao_triton.python.proto.postprocessor_config_pb2 as postprocessor_config_pb2\nfrom tao_triton.python.types import KittiBbox\nfrom tao_triton.python.postprocessing.utils import (\n denormalize_bounding_bboxes,\n iou_vectorized,\n pool_context,\n render_image,\n thresholded_indices,\n return_bbox_info\n)\nfrom tao_triton.python.utils.kitti import write_kitti_annotation\nfrom PIL import Image\n\ndef load_clustering_config(config):\n \"\"\"Load the clustering config.\"\"\"\n proto = postprocessor_config_pb2.PostprocessingConfig()\n def _load_from_file(filename, pb2):\n if not os.path.exists(filename):\n raise IOError(\"Specfile not found at: {}\".format(filename))\n with open(filename, \"r\") as f:\n merge_text_proto(f.read(), pb2)\n _load_from_file(config, proto)\n return proto\n \n\nclass DetectNetPostprocessor(Postprocessor):\n \"\"\"Post processor for Triton outputs from a DetectNet_v2 client.\"\"\"\n\n def __init__(self, batch_size, frames,\n output_path, data_format, classes,\n postprocessing_config, target_shape):\n \"\"\"Initialize a post processor class for a classification model.\n \n Args:\n batch_size (int): Number of images in the batch.\n frames (list): List of images.\n output_path (str): Unix path to the output rendered images and labels.\n data_format (str): Order of the input model dimensions.\n \"channels_first\": CHW order.\n \"channels_last\": HWC order.\n classes (list): List of the class names.\n postprocessing_config (proto): Configuration elements of the dbscan postprocessor.\n target_shape (tuple): Shape of the model input.\n \"\"\"\n self.pproc_config = load_clustering_config(postprocessing_config)\n self.classes = classes\n self.output_names = [\"output_cov/Sigmoid\",\n \"output_bbox/BiasAdd\"]\n self.bbox_norm = [35., 35]\n self.offset = 0.5\n self.scale_h = 1\n self.scale_w = 1\n self.target_shape = target_shape\n self.stride = self.pproc_config.stride\n super().__init__(batch_size, frames, output_path, data_format)\n # Format the dbscan elements into classwise configurations for rendering.\n self.configure()\n\n def configure(self):\n \"\"\"Configure the post processor object.\"\"\"\n self.dbscan_elements = {}\n self.coverage_thresholds = {}\n self.box_color = {}\n classwise_clustering_config = self.pproc_config.classwise_clustering_config\n for class_name in self.classes:\n if class_name not in classwise_clustering_config.keys():\n raise KeyError(\"Cannot find class name {} in {}\".format(\n class_name, self.pproc_config.keys()\n ))\n self.dbscan_elements[class_name] = dbscan(\n eps=classwise_clustering_config[class_name].dbscan_config.dbscan_eps,\n min_samples=classwise_clustering_config[class_name].dbscan_config.dbscan_min_samples,\n metric='precomputed'\n )\n self.coverage_thresholds[class_name] = classwise_clustering_config[class_name].coverage_threshold\n self.box_color[class_name] = classwise_clustering_config[class_name].bbox_color\n\n def apply(self, results, this_id, render=True):\n \"\"\"Apply the post processing to the outputs tensors.\n \n This function takes the raw output tensors from the detectnet_v2 model\n and performs the following steps:\n\n 1. Denormalize the output bbox coordinates which converts bbox from relative coordinates to absolute coordinates.\n 2. Threshold the coverage output to get the valid indices for the bboxes based on a coverage threshold. This coverage output is attained from the \"output_cov/Sigmoid returns from the model inference.\n 3. Cluster the filterred boxes using DBSCAN. This utilises the IOU between possible predicted rectangles and clusters them to output the best bbox.\n 4. Converts filtered boxes into KittiBbox output format with the final absolute coordinates of bbox (x1, y1, x2, y2) and confidence scores\n \n Args:\n results: Triton Server Response for each batch of image\n this_id: Unique ID for each response\n Returns:\n batch_boxes_output: returns a list containing all bounding boxes for each image in the batch\n \n \"\"\"\n\n output_array = {}\n this_id = int(this_id)\n for output_name in self.output_names:\n output_array[output_name] = results.as_numpy(output_name).transpose(0, 1, 3, 2)\n assert len(self.classes) == output_array[\"output_cov/Sigmoid\"].shape[1], (\n \"Number of classes {} != number of dimensions in the output_cov/Sigmoid: {}\".format(\n len(self.classes), output_array[\"output_cov/Sigmoid\"].shape[1]\n )\n )\n # Denormalise output bbox coordinates\n abs_bbox = denormalize_bounding_bboxes(\n output_array[\"output_bbox/BiasAdd\"], self.stride,\n self.offset, self.bbox_norm, len(self.classes), self.scale_w,\n self.scale_h, self.data_format, self.target_shape, self.frames,\n this_id - 1\n )\n\n # Threshold coverage output to get valid indices\n valid_indices = thresholded_indices(\n output_array[\"output_cov/Sigmoid\"], len(self.classes),\n self.classes,\n self.coverage_thresholds\n )\n batchwise_boxes = []\n for image_idx, indices in enumerate(valid_indices):\n covs = output_array[\"output_cov/Sigmoid\"][image_idx, :, :, :]\n bboxes = abs_bbox[image_idx, :, :, :]\n imagewise_boxes = []\n for class_idx in range(len(self.classes)):\n clustered_boxes = []\n cw_config = self.pproc_config.classwise_clustering_config[\n self.classes[class_idx]\n ]\n classwise_covs = covs[class_idx, :, :].flatten()\n classwise_covs = classwise_covs[indices[class_idx]]\n if classwise_covs.size == 0:\n continue\n classwise_bboxes = bboxes[4*class_idx:4*class_idx+4, :, :]\n classwise_bboxes = classwise_bboxes.reshape(\n classwise_bboxes.shape[:1] + (-1,)\n ).T[indices[class_idx]]\n pairwise_dist = \\\n 1.0 * (1.0 - iou_vectorized(classwise_bboxes))\n\n # Clustering similar boxes using DBScan to form final bounding boxes as output, filtering out some boxes in the process\n labeling = self.dbscan_elements[self.classes[class_idx]].fit_predict(\n X=pairwise_dist,\n sample_weight=classwise_covs\n )\n labels = np.unique(labeling[labeling >= 0])\n for label in labels:\n w = classwise_covs[labeling == label]\n aggregated_w = np.sum(w)\n w_norm = w / aggregated_w\n n = len(w)\n w_max = np.max(w)\n w_min = np.min(w)\n b = classwise_bboxes[labeling == label]\n mean_bbox = np.sum((b.T*w_norm).T, axis=0)\n mean_bbox = np.array(mean_bbox, dtype='float64')\n\n # Compute coefficient of variation of the box coords\n mean_box_w = mean_bbox[2] - mean_bbox[0]\n mean_box_h = mean_bbox[3] - mean_bbox[1]\n bbox_area = mean_box_w * mean_box_h\n\n #Filtering out valid boxes based on thresholds set\n valid_box = aggregated_w > cw_config.dbscan_config.\\\n dbscan_confidence_threshold and mean_box_h > cw_config.minimum_bounding_box_height\n if valid_box:\n #Converts filtered boxes into KittiBbox output format with the final absolute coordinates of bbox and confidence scores\n clustered_boxes.append(\n KittiBbox(\n self.classes[class_idx], 0, 0, 0,\n mean_bbox, 0, 0, 0, 0,\n 0, 0, 0, confidence_score=np.float64(aggregated_w)\n )\n )\n else:\n continue\n imagewise_boxes.extend(clustered_boxes)\n batchwise_boxes.append(imagewise_boxes)\n\n if render:\n with pool_context(self.batch_size) as pool:\n batch_boxes_output = []\n for image_idx in range(self.batch_size):\n current_idx = (this_id - 1) * self.batch_size + image_idx\n if current_idx >= len(self.frames):\n break\n current_frame = self.frames[current_idx]\n filename = os.path.basename(current_frame._image_path)\n \n #Returns BBOX of all license plates in it\n final_bboxes = return_bbox_info(current_frame, batchwise_boxes[image_idx])\n batch_boxes_output.append([final_bboxes, filename])\n return batch_boxes_output\n" ]
[ [ "numpy.min", "numpy.unique", "sklearn.cluster.DBSCAN", "numpy.max", "numpy.float64", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sidgairo18/unsupervised-style-learning
[ "90445645b67862ed0ac7ba987f31443cfce6aca5" ]
[ "src/triplet_dataloader.py" ]
[ "# Partly written by author: Siddhartha Gairola\n# Substantially adaptee from References 1, 2 in Readme.txt file.\n\nfrom __future__ import print_function, division\nimport os\nimport torch\nfrom skimage import io, transform\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2\n\n#Ignore Warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nplt.ion() #interative\nprint(\"Import Successful TripletImageLoader\")\n\ndef default_image_loader(image_file): \n #img = misc.imread(image_file)\n img = cv2.imread(image_file)\n if img is None:\n return np.zeros((224,224,3))\n # GRAYSCALE \n if len(img.shape) == 2: \n img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'float32')\n img_new[:,:,0] = img \n img_new[:,:,1] = img \n img_new[:,:,2] = img \n img = img_new\n\n img = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB) \n img = img.astype('float32') \n \n #img_resized = misc.imresize(img, (224, 224)) \n img_resized = cv2.resize(img, (224, 224)) \n return (img_resized/255.0).astype('float32')\n\nclass TripletImageLoader(Dataset):\n\n def __init__(self, base_path, filenames_filename, triplets_filename, transform=None, loader = default_image_loader):\n\n # filenames_filename => A text file with each line containing a path to an image, e.g., images/class1/sample.jpg\n # triplets_filename => A text file with each line containing 3 integers, where integer i refers to the i-th image\n # in filenames_filename. For a line with integers \"a b c\", a triplet is defined such that image a is more similar\n # to image c than it is to image b.\n\n self.base_path = base_path\n self.filenamelist = []\n\n for line in open(filenames_filename):\n self.filenamelist.append(line.rstrip('\\n'))\n\n triplets = []\n\n for line in open(triplets_filename):\n triplets.append((line.split()[0], line.split()[1], line.split()[2])) # anchor, far, close\n\n self.triplets = triplets\n self.transform = transform\n self.loader = loader\n\n def __getitem__(self, index):\n path1, path2, path3 = self.triplets[index]\n img1 = self.loader(os.path.join(self.base_path,self.filenamelist[int(path1)]))\n img2 = self.loader(os.path.join(self.base_path,self.filenamelist[int(path2)])) #far => negative\n img3 = self.loader(os.path.join(self.base_path,self.filenamelist[int(path3)])) #close => positive\n\n if self.transform:\n img1 = self.transform(img1)\n img2 = self.transform(img2)\n img3 = self.transform(img3)\n\n return img1, img2, img3\n\n def __len__(self):\n return len(self.triplets)\n" ]
[ [ "numpy.uint8", "matplotlib.pyplot.ion", "numpy.zeros", "numpy.ndarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rhodescoffey/alignment
[ "46d231f4fab932837ec69998b86685e1e394edbb" ]
[ "align_funcs.py" ]
[ "\"\"\"\n align_funcs.py\n\n The alignment algorithm takes in TextGrid classes and extracts meaningful\n data. This information consists of the beginning of a phoneme's utterance,\n its end, and its phonemic transcription. With this information the program\n finds the edit distance between two given utterances, phoneme by phoneme.\n\n Functions: is_vow(char)- Checks if char is vowel\n is_cons(char)- Checks if char is consonant\n alignment(s1, s2)- Performs LD on two TextGrids\n\n Joseph Coffey\n Infant Language Acquisition Lab\n Professor: Dr. Dan Swingley\n Manager: Elizabeth Crutchley\n Last Updated: 7/28/15\n\n\"\"\"\n\nimport numpy as np\nfrom phonemes import VOWELS, CONSONANTS\n\n\ndef is_vow(char):\n \"\"\" Checks if char is vowel \"\"\"\n\n for i in VOWELS.values():\n if char in i:\n return True\n else:\n return False\n\n\ndef is_con(char):\n \"\"\" Checks if char is consonant \"\"\"\n\n for i in CONSONANTS.values():\n if char in i:\n return True\n else:\n return False\n\n\ndef is_diff(char1, char2):\n \"\"\"Checks if char are different, i.e. vow/vow, cons/cons, or vow/cons\"\"\"\n\n if (is_con(char1) and is_vow(char2)) or (is_vow(char1) and is_con(char2)):\n return True\n\n else:\n return False\n\n\ndef alignment(s1, s2):\n \"\"\" Minimum number of edits needed to get from one string to another \"\"\"\n \"\"\" Wikipedia: Levenshtein distance: Computing Levenshtein distance \"\"\"\n\n grid = [[0 for x in range(len(s2) + 1)] for x in range(len(s1) + 1)]\n backtrace = [[0 for x in range(len(s2) + 1)] for x in range(len(s1) + 1)]\n\n for i, item in enumerate(grid):\n grid[i][0] = i\n for j, jtem in enumerate(grid[0]):\n grid[0][j] = j\n for i in range(1, len(grid)):\n for j in range(1, len(grid[0])):\n if s1[i-1] == s2[j-1]:\n cost = 0\n elif is_diff(s1[i-1], s2[j-1]):\n cost = 2\n else:\n cost = 1\n\n funcs = [(grid[i-1][j] + 1), (grid[i][j-1] + 1),\n (grid[i-1][j-1] + cost)]\n\n grid[i][j] = min(funcs)\n backtrace[i][j] = np.argmin(funcs)\n\n \"\"\" Backtrace function \"\"\"\n a1 = \"\"\n a2 = \"\"\n i = len(backtrace)-1\n j = len(backtrace[0])-1\n\n \"\"\" Loop through point array to find cheapest operation \"\"\"\n while (i != 0) and (j != 0):\n if backtrace[i][j] == 2: # Substitution\n a1 += s1[i-1]\n a2 += s2[j-1]\n i -= 1\n j -= 1\n\n elif backtrace[i][j] == 1: # Deletion\n a1 += '_'\n a2 += s2[j-1]\n j -= 1\n\n else: # Insertion\n a1 += s1[i-1]\n a2 += '_'\n i -= 1\n\n a1 = a1[::-1]\n a2 = a2[::-1]\n\n return grid, a1, a2, grid[len(grid)-1][len(grid[0])-1]\n" ]
[ [ "numpy.argmin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels
[ "e6df7ffc9b0318fdce405e40993c79785b47c785" ]
[ "Multitask-CNN-RNN/utils/logging_utils.py" ]
[ "import os\nimport pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 12})\ndef draw_plots(df, title, save_path):\n\tplt.figure()\n\tfor i, key in enumerate(df.keys()):\n\t\tif key !='loss':\n\t\t\tplt.plot(df.index, df[key], linewidth=2, label = key)\n\tplt.legend()\n\tplt.title(title)\n\tplt.xlabel(\"Iterations\")\n\tplt.savefig(save_path, bbox_inches='tight')\n\tplt.clf()\n\tplt.cla()\n\n\t\ndef save_plots(data_dict, train_save_path, val_save_path):\n\ttrain_df = data_dict['training']\n\tval_df = data_dict['validation']\n\tdraw_plots(train_df, 'Training Losses', train_save_path)\n\tdraw_plots(val_df, 'Validation Metrics', val_save_path)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.cla", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sw1001/ENLP-Project
[ "79e1257665a4c3ecc342505061041bed886891b5" ]
[ "src/model_svm.py" ]
[ "import time\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import *\n\nTRAIN_DATA_FILE = 'train_mapped.tsv'\n\n\ndef load_train_data(path):\n data = pd.read_csv(path, sep='\\t', header=0)\n\n data['Sentiment'] = data['Sentiment'].map(lambda x: 0 if x == 0 else x)\n data['Sentiment'] = data['Sentiment'].map(lambda x: 1 if x == 2 else x)\n data['Sentiment'] = data['Sentiment'].map(lambda x: 2 if x == 4 else x)\n\n # Remove empty\n data['Phrase'].replace('', np.nan, inplace=True)\n data.dropna(subset=['Phrase'], inplace=True)\n\n data['Phrase'] = data['Phrase'].astype(str)\n\n return data\n\n\ncurrent_time = time.time()\n\ntrain = load_train_data('../data/' + TRAIN_DATA_FILE)\n\nload_time = time.time() - current_time\n\nprint('Time to Load ' + TRAIN_DATA_FILE + ': ' + str(load_time) + 's')\n\ntrain_X, test_X, train_y, test_y = train_test_split(train['Phrase'], train['Sentiment'], test_size=0.20)\n\nbigram_vectorizer = CountVectorizer(analyzer=\"word\",\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n ngram_range=(1, 3),\n strip_accents='unicode')\n\nbigram_feature_matrix_train = bigram_vectorizer.fit_transform(train_X)\nbigram_feature_matrix_test = bigram_vectorizer.transform(test_X)\n\nbigram_svm_classifier = LinearSVC().fit(bigram_feature_matrix_train, train_y)\nbigram_svm_prediction = bigram_svm_classifier.predict(bigram_feature_matrix_test)\n\nmodel = 'Unigram-Trigram MultiClass SVM'\ntarget_names = ['0', '1', '2']\n\nprint(\n '-------' + '-' * len(model))\nprint(\n 'MODEL:', model)\nprint(\n '-------' + '-' * len(model))\n\nprint(\n 'Precision = ' + str(metrics.precision_score(test_y, bigram_svm_prediction, average=None)))\nprint(\n 'Recall = ' + str(metrics.recall_score(test_y, bigram_svm_prediction, average=None)))\nprint(\n 'F1 = ' + str(metrics.f1_score(test_y, bigram_svm_prediction, average=None)))\nprint(\n 'Accuracy = %.2f%%' % (metrics.accuracy_score(test_y, bigram_svm_prediction) * 100.0))\nprint(\n 'Confusion matrix = \\n' + str(\n metrics.confusion_matrix(test_y, bigram_svm_prediction, labels=[0, 1, 2])))\nprint('\\nClassification Report:\\n' + classification_report(test_y, bigram_svm_prediction,\n target_names=target_names))\nprint('Time to Train and Test: ' + str(time.time() - current_time) + 's')\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.recall_score", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "sklearn.metrics.confusion_matrix", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.metrics.f1_score", "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
nhayato/dl-from-scratch3
[ "6138c70ba611e9b1ae59212730d0ec0eb72ea0ce" ]
[ "steps/step23.py" ]
[ "if '__file__' in globals():\n import os\n import sys\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nimport numpy as np\nfrom dezero import Variable\n\n\ndef main():\n x = Variable(np.array(1.0))\n\n print(x)\n y = (x + 3)**2\n y.backward()\n\n print(y)\n print(x.grad)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hc-super66/Word2Vec-Learning
[ "ea279a81ae59fb86715aad1bcd2e9be11cd152d7" ]
[ "imdb_preprocess.py" ]
[ "from __future__ import absolute_import\r\nfrom __future__ import print_function\r\n\r\nimport logging\r\nimport os\r\nimport re\r\nimport sys\r\nfrom collections import defaultdict\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nfrom gensim.models import KeyedVectors\r\nfrom gensim.scripts.glove2word2vec import glove2word2vec\r\nfrom gensim.test.utils import datapath, get_tmpfile\r\nfrom nltk.corpus import stopwords\r\n\r\nimport pickle\r\n\r\n# Read data from files\r\ntrain = pd.read_csv(\"corpus/imdb/labeledTrainData.tsv\", header=0,\r\n delimiter=\"\\t\", quoting=3)\r\ntest = pd.read_csv(\"corpus/imdb/testData.tsv\", header=0,\r\n delimiter=\"\\t\", quoting=3)\r\nunlabeled_train = pd.read_csv(\"corpus/imdb/unlabeledTrainData.tsv\", header=0,\r\n delimiter=\"\\t\", quoting=3)\r\n\r\n\r\ndef review_to_wordlist(review, remove_stopwords=False):\r\n # Function to convert a document to a sequence of words,\r\n # optionally removing stop words. Returns a list of words.\r\n #\r\n # 1. Remove HTML\r\n review_text = BeautifulSoup(review, \"html.parser\").get_text()\r\n #\r\n # 2. Remove non-letters\r\n review_text = re.sub(\"[^a-zA-Z]\", \" \", review_text)\r\n #\r\n # 3. Convert words to lower case and split them\r\n words = review_text.lower().split()\r\n #\r\n # 4. Optionally remove stop words (false by default)\r\n if remove_stopwords:\r\n stops = set(stopwords.words(\"english\"))\r\n words = [w for w in words if not w in stops]\r\n #\r\n # 5. Return a list of words\r\n return words\r\n\r\n\r\ndef build_data_train_test(data_train, data_test, train_ratio=0.8):\r\n \"\"\"\r\n Loads data and process data into index\r\n \"\"\"\r\n revs = []\r\n vocab = defaultdict(float)\r\n\r\n # Pre-process train data set\r\n for i in range(len(data_train)):\r\n rev = data_train[i]\r\n y = train['sentiment'][i]\r\n orig_rev = ' '.join(rev).lower()\r\n words = set(orig_rev.split())\r\n for word in words:\r\n vocab[word] += 1\r\n datum = {'y': y, # 所对应的train_label情感\r\n 'text': orig_rev,\r\n 'num_words': len(orig_rev.split()),\r\n 'split': int(np.random.rand() < train_ratio)} # 将一部分数据转化为测试集\r\n revs.append(datum)\r\n\r\n for i in range(len(data_test)):\r\n rev = data_test[i]\r\n orig_rev = ' '.join(rev).lower()\r\n words = set(orig_rev.split())\r\n for word in words:\r\n vocab[word] += 1\r\n datum = {'y': -1,\r\n 'text': orig_rev,\r\n 'num_words': len(orig_rev.split()),\r\n 'split': -1}\r\n revs.append(datum)\r\n\r\n return revs, vocab\r\n\r\n\r\ndef load_bin_vec(model, vocab):\r\n word_vecs = {}\r\n unk_words = 0\r\n\r\n for word in vocab.keys():\r\n try:\r\n word_vec = model[word]\r\n word_vecs[word] = word_vec\r\n except:\r\n unk_words = unk_words + 1\r\n\r\n logging.info('unk words: %d' % (unk_words))\r\n return word_vecs\r\n\r\n\r\ndef get_W(word_vecs, k=300):\r\n vocab_size = len(word_vecs)\r\n word_idx_map = dict()\r\n\r\n W = np.zeros(shape=(vocab_size + 2, k), dtype=np.float32)\r\n W[0] = np.zeros((k,))\r\n W[1] = np.random.uniform(-0.25, 0.25, k)\r\n\r\n i = 2\r\n for word in word_vecs:\r\n W[i] = word_vecs[word]\r\n word_idx_map[word] = i\r\n i = i + 1\r\n return W, word_idx_map\r\n\r\n\r\nif __name__ == '__main__':\r\n program = os.path.basename(sys.argv[0])\r\n logger = logging.getLogger(program)\r\n\r\n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')\r\n logging.root.setLevel(level=logging.INFO)\r\n logger.info(\"running %s\" % ''.join(sys.argv))\r\n\r\n clean_train_reviews = []\r\n for review in train[\"review\"]:\r\n clean_train_reviews.append(review_to_wordlist(review, \\\r\n remove_stopwords=True))\r\n\r\n clean_test_reviews = []\r\n for review in test[\"review\"]:\r\n clean_test_reviews.append(review_to_wordlist(review, \\\r\n remove_stopwords=True))\r\n\r\n revs, vocab = build_data_train_test(clean_train_reviews, clean_test_reviews)\r\n max_l = np.max(pd.DataFrame(revs)['num_words'])\r\n logging.info('data loaded!')\r\n logging.info('number of sentences: ' + str(len(revs)))\r\n logging.info('vocab size: ' + str(len(vocab)))\r\n logging.info('max sentence length: ' + str(max_l))\r\n\r\n # word2vec GoogleNews\r\n # model_file = os.path.join('vector', 'GoogleNews-vectors-negative300.bin')\r\n # model = gensim.models.KeyedVectors.load_word2vec_format(model_file, binary=True)\r\n\r\n # 输入文件\r\n glove_file = datapath(\"C:/Users/10798/Desktop/imdb/glove.840B.300d.txt\")\r\n # 输出文件\r\n tmp_file = get_tmpfile('test_word2vec.txt')\r\n\r\n # call glove2word2vec script\r\n # default way (through CLI): python -m gensim.scripts.glove2word2vec --input <glove_file> --output <w2v_file>\r\n\r\n # 开始转换\r\n glove2word2vec(glove_file, tmp_file)\r\n # 加载转化后的文件\r\n model = KeyedVectors.load_word2vec_format(tmp_file)\r\n w2v = load_bin_vec(model, vocab)\r\n logging.info('word embeddings loaded!')\r\n logging.info('num words in embeddings: ' + str(len(w2v)))\r\n\r\n W, word_idx_map = get_W(w2v, k=model.vector_size)\r\n logging.info('extracted index from embeddings! ')\r\n\r\n # pickle_file = os.path.join('pickle', 'vader_movie_reviews_glove.pickle3')\r\n pickle_file = os.path.join('pickle', 'imdb_train_val_test.pickle3')\r\n pickle.dump([revs, W, word_idx_map, vocab, max_l], open(pickle_file, 'wb'))\r\n logging.info('dataset created!')\r\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.random.rand", "numpy.random.uniform", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
gunnarsnorri/rescueranger
[ "0dfe3856e3c823fad39a684e0185e4ad9de03870" ]
[ "scripts/PID_test.py" ]
[ "#!/usr/bin/env python\nimport PID\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import spline\n\n#P=2.0, I=0.0, D=1.0, Derivator=0, Integrator=0, Integrator_max=100, Integrator_min=-100, reference=0.0\n\ndef PID_test(P=2.0, I=0.0, D=1.0, reference=0.0):\n pidx = PID.PID(P, I, D)\n \n smplTime = 1/60\n pidx.setReference(0)\n pidx.setSampleTime(smplTime)\n feedback = 0\n output = pidx.output\n \n feedback_list = []\n time_list = []\n reference_list = []\n L = 100\n \n for i in range(1,L):\n pidx.updatePID(feedback)\n output = pidx.output\n if pidx.reference > 0:\n feedback += (output - (1/i))\n if i==9:\n pidx.setReference(reference)\n time.sleep(2*smplTime)\n #print(\"Reference\",pidx.reference,\"Output\",output,\"Feedback\",feedback, \"Error\", pidx.reference-feedback)\n \n feedback_list.append(feedback)\n reference_list.append(pidx.reference)\n time_list.append(i)\n \n time_sm = np.array(time_list)\n time_smooth = np.linspace(time_sm.min(), time_sm.max(), 300)\n feedback_smooth = spline(time_list, feedback_list, time_smooth)\n\n plt.plot(time_smooth, feedback_smooth)\n plt.plot(time_list, reference_list)\n plt.xlim((0, L))\n plt.ylim((min(feedback_list)-0.5, max(feedback_list)+0.5))\n plt.xlabel('time (s)')\n plt.ylabel('PID (PV)')\n plt.title('TEST PID')\n\n plt.ylim((min(feedback_list)-0.5, max(feedback_list)+0.5))\n\n plt.grid(True)\n plt.show()\n\nif __name__ == \"__main__\":\n PID_test(1.2, 1.0, 1/30/10,1.0)\n" ]
[ [ "matplotlib.pyplot.title", "scipy.interpolate.spline", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.19", "0.18", "1.2", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] } ]
ArnavM1499/Hanabi-HumanAI
[ "c2c159c28c784950a2c98cbd8bf81bfc9036484d" ]
[ "game_net/lstm_two_stage.py" ]
[ "import numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\ntorch.multiprocessing.set_sharing_strategy(\"file_system\")\n\nGAME_STATE_LENGTH = 583 + 20\n\nDATA_ALL = \"../log/features0825/lstm_extended/00005_all.npy\"\nDATA_TRAIN = DATA_ALL.replace(\"_all\", \"_train\")\nDATA_VAL = DATA_ALL.replace(\"_all\", \"_val\")\nMODEL_PATH = \"../log/model_lstm_two_stage.pth\"\n\nBATCH_SIZE = 600\nEPOCH = 100\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nLOGGER = SummaryWriter()\n\n\nclass PickleDataset(torch.utils.data.Dataset):\n def __init__(self, dataset_file):\n self.states = []\n self.action_cats = []\n self.action_hints = []\n self.action_cards = []\n with open(dataset_file, \"rb\") as fin:\n while True:\n try:\n self.states.append(\n torch.from_numpy(np.load(fin) * 0.333).type(torch.float32)\n )\n action_label = torch.from_numpy(np.load(fin)).type(torch.long)\n buffer_cat = []\n for idx in action_label:\n idx = int(idx)\n if idx < 10: # hint color / number\n buffer_cat.append(0)\n elif idx < 15: # play\n buffer_cat.append(1)\n else: # discard\n buffer_cat.append(2)\n self.action_cats.append(torch.tensor(buffer_cat))\n self.action_hints.append(action_label)\n self.action_cards.append(torch.clamp(action_label, max=4))\n except ValueError:\n break\n assert len(self.states) == len(self.action_cats)\n assert len(self.states) == len(self.action_hints)\n assert len(self.states) == len(self.action_cards)\n\n def __len__(self):\n return len(self.states)\n\n def __getitem__(self, idx):\n return (\n self.states[idx],\n self.action_cats[idx],\n self.action_hints[idx],\n self.action_cards[idx],\n torch.tensor(self.action_cats[idx].size()[0]),\n )\n\n def _convert_action(self, action):\n return\n\n\ndef pack_games(games):\n games.sort(key=lambda x: -x[-1])\n padded_states = torch.nn.utils.rnn.pad_sequence([x[0] for x in games])\n packed_action_cats = torch.nn.utils.rnn.pack_sequence([x[1] for x in games])\n packed_action_hints = torch.nn.utils.rnn.pack_sequence([x[2] for x in games])\n packed_action_cards = torch.nn.utils.rnn.pack_sequence([x[3] for x in games])\n return (\n padded_states,\n packed_action_cats,\n packed_action_hints,\n packed_action_cards,\n [x[-1] for x in games],\n )\n\n\nclass SingleNet(torch.nn.Module):\n def __init__(\n self,\n input_fc_units,\n lstm_hidden_units,\n lstm_num_layers,\n output_fc_units,\n output_units,\n drop_out=False,\n drop_out_rate=0.5,\n ):\n super().__init__()\n self.input_fc = []\n for in_dim, out_dim in zip(\n [GAME_STATE_LENGTH] + input_fc_units, input_fc_units\n ):\n self.input_fc.append(torch.nn.Linear(in_dim, out_dim))\n self.input_fc.append(torch.nn.ReLU())\n if drop_out:\n self.input_fc.append(torch.nn.Dropout(drop_out_rate))\n self.input_fc = torch.nn.Sequential(*self.input_fc)\n self.lstm = torch.nn.LSTM(\n GAME_STATE_LENGTH if input_fc_units == [] else input_fc_units[-1],\n lstm_hidden_units,\n lstm_num_layers,\n )\n self.output_fc = []\n for in_dim, out_dim in zip(\n [lstm_hidden_units] + output_fc_units, output_fc_units\n ):\n self.output_fc.append(torch.nn.Linear(in_dim, out_dim))\n self.output_fc.append(torch.nn.ReLU())\n if drop_out:\n self.output_fc.append(torch.nn.Dropout(drop_out_rate))\n if output_fc_units == []:\n self.output_fc.append(torch.nn.Linear(lstm_hidden_units, output_units))\n else:\n self.output_fc.append(torch.nn.Linear(output_fc_units[-1], output_units))\n self.output_fc = torch.nn.Sequential(*self.output_fc)\n\n def forward(self, padded, lengths):\n padded_output = self.input_fc(padded)\n packed_output = torch.nn.utils.rnn.pack_padded_sequence(padded_output, lengths)\n packed_output, _ = self.lstm(packed_output)\n padded_output, _ = torch.nn.utils.rnn.pad_packed_sequence(packed_output)\n padded_output = self.output_fc(padded_output)\n packed_output = torch.nn.utils.rnn.pack_padded_sequence(padded_output, lengths)\n return packed_output\n\n\nclass FullNet(torch.nn.Module):\n def __init__(\n self,\n input_fc_units,\n lstm_hidden_units,\n lstm_num_layers,\n output_fc_units,\n drop_out=False,\n drop_out_rate=0.5,\n ):\n super().__init__()\n self.net_first = SingleNet(\n input_fc_units,\n lstm_hidden_units,\n lstm_num_layers,\n output_fc_units,\n 3, # num output\n drop_out,\n drop_out_rate,\n )\n self.net_hint = SingleNet(\n input_fc_units,\n lstm_hidden_units,\n lstm_num_layers,\n output_fc_units,\n 10,\n drop_out,\n drop_out_rate,\n )\n self.net_play = SingleNet(\n input_fc_units,\n lstm_hidden_units,\n lstm_num_layers,\n output_fc_units,\n 5,\n drop_out,\n drop_out_rate,\n )\n self.net_discard = SingleNet(\n input_fc_units,\n lstm_hidden_units,\n lstm_num_layers,\n output_fc_units,\n 5,\n drop_out,\n drop_out_rate,\n )\n self.nets = [self.net_first, self.net_hint, self.net_play, self.net_discard]\n\n def forward(self, padded, lengths):\n return tuple(net(padded, lengths) for net in self.nets)\n\n\nmodel = FullNet([512], 512, 2, [], drop_out=True).to(DEVICE)\nloss_fn = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.0003)\n\n\ntrainset = torch.utils.data.DataLoader(\n PickleDataset(DATA_TRAIN),\n batch_size=BATCH_SIZE,\n shuffle=True,\n collate_fn=pack_games,\n)\nvalset = torch.utils.data.DataLoader(\n PickleDataset(DATA_VAL),\n batch_size=BATCH_SIZE,\n shuffle=False,\n collate_fn=pack_games,\n)\n\n\ndef val(log_iter=0):\n losses = []\n correct_cat = 0\n correct_hint = 0\n correct_play = 0\n correct_discard = 0\n total_cat = 0\n total_hint = 0\n total_play = 0\n total_discard = 0\n model.eval()\n with torch.no_grad():\n for i, (states, action_cats, action_hints, action_cards, lengths) in enumerate(\n tqdm(valset)\n ):\n states, action_cats, action_hints, action_cards = (\n states.to(DEVICE),\n action_cats.to(DEVICE),\n action_hints.to(DEVICE),\n action_cards.to(DEVICE),\n )\n mask_hint = action_cats.data == 0\n mask_play = action_cats.data == 1\n mask_discard = action_cats.data == 2\n (pred_cat, pred_hint, pred_play, pred_discard) = model(states, lengths)\n masked_label_hint = torch.masked_select(action_hints.data, mask_hint)\n masked_label_play = torch.masked_select(action_cards.data, mask_play)\n masked_label_discard = torch.masked_select(action_cards.data, mask_discard)\n masked_pred_hint = pred_hint.data[mask_hint, :]\n masked_pred_play = pred_play.data[mask_play, :]\n masked_pred_discard = pred_discard.data[mask_discard, :]\n loss = 0\n loss += loss_fn(pred_cat.data, action_cats.data).item()\n loss += loss_fn(masked_pred_hint, masked_label_hint).item()\n loss += loss_fn(masked_pred_play, masked_label_play).item()\n loss += loss_fn(masked_pred_discard, masked_label_discard).item()\n losses.append(loss)\n correct_cat += (\n (pred_cat.data.argmax(1) == action_cats.data)\n .type(torch.float)\n .sum()\n .item()\n )\n correct_hint += (\n (masked_pred_hint.data.argmax(1) == masked_label_hint.data)\n .type(torch.float)\n .sum()\n .item()\n )\n correct_play += (\n (masked_pred_play.data.argmax(1) == masked_label_play.data)\n .type(torch.float)\n .sum()\n .item()\n )\n correct_discard += (\n (masked_pred_discard.data.argmax(1) == masked_label_discard.data)\n .type(torch.float)\n .sum()\n .item()\n )\n total_cat += action_cats.data.shape[0]\n total_hint += masked_label_hint.data.shape[0]\n total_play += masked_label_play.data.shape[0]\n total_discard += masked_label_discard.data.shape[0]\n loss = round(sum(losses) / len(losses), 5)\n acc_cat = correct_cat / total_cat\n acc_hint = correct_hint / total_hint\n acc_play = correct_play / total_play\n acc_discard = correct_discard / total_discard\n print(\n \" val loss: {:.4f} category: {:.4f} hint: {:.4f} play: {:.4f} discard: {:.4f}\".format( # noqa E501\n loss,\n acc_cat,\n acc_hint,\n acc_play,\n acc_discard,\n )\n )\n LOGGER.add_scalar(\"Loss/Val\", loss, log_iter)\n LOGGER.add_scalar(\"Category Accuracy/Val\", acc_cat, log_iter)\n LOGGER.add_scalar(\"Hint Accuracy/Val\", acc_hint, log_iter)\n LOGGER.add_scalar(\"Play Accuracy/Val\", acc_play, log_iter)\n LOGGER.add_scalar(\"Discard Accuracy/Val\", acc_discard, log_iter)\n model.train()\n\n\ndef train():\n size = len(trainset)\n for e in range(EPOCH):\n val(e * size)\n for i, (states, action_cats, action_hints, action_cards, lengths) in enumerate(\n tqdm(trainset, desc=\"epoch: {}\".format(e))\n ):\n loss = 0\n states, action_cats, action_hints, action_cards = (\n states.to(DEVICE),\n action_cats.to(DEVICE),\n action_hints.to(DEVICE),\n action_cards.to(DEVICE),\n )\n mask_hint = action_cats.data == 0\n mask_play = action_cats.data == 1\n mask_discard = action_cats.data == 2\n (pred_cat, pred_hint, pred_play, pred_discard) = model(states, lengths)\n masked_label_hint = torch.masked_select(action_hints.data, mask_hint)\n masked_label_play = torch.masked_select(action_cards.data, mask_play)\n masked_label_discard = torch.masked_select(action_cards.data, mask_discard)\n masked_pred_hint = pred_hint.data[mask_hint, :]\n masked_pred_play = pred_play.data[mask_play, :]\n masked_pred_discard = pred_discard.data[mask_discard, :]\n loss = 0\n loss += loss_fn(pred_cat.data, action_cats.data)\n loss += loss_fn(masked_pred_hint, masked_label_hint)\n loss += loss_fn(masked_pred_play, masked_label_play)\n loss += loss_fn(masked_pred_discard, masked_label_discard)\n\n acc_cat = (\n (pred_cat.data.argmax(1) == action_cats.data).type(torch.float).mean()\n )\n acc_hint = (\n (masked_pred_hint.data.argmax(1) == masked_label_hint.data)\n .type(torch.float)\n .mean()\n )\n acc_play = (\n (masked_pred_play.data.argmax(1) == masked_label_play.data)\n .type(torch.float)\n .mean()\n )\n acc_discard = (\n (masked_pred_discard.data.argmax(1) == masked_label_discard.data)\n .type(torch.float)\n .mean()\n )\n LOGGER.add_scalar(\"Loss/Train\", loss.item(), e * size + i)\n LOGGER.add_scalar(\"Category Accuracy/Train\", acc_cat.item(), e * size + i)\n LOGGER.add_scalar(\"Hint Accuracy/Train\", acc_hint.item(), e * size + i)\n LOGGER.add_scalar(\"Play Accuracy/Train\", acc_play.item(), e * size + i)\n LOGGER.add_scalar(\n \"Discard Accuracy/Train\", acc_discard.item(), e * size + i\n )\n loss.backward()\n optimizer.step()\n torch.save(model.state_dict(), MODEL_PATH)\n\n\ntrain()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.clamp", "torch.nn.LSTM", "torch.nn.utils.rnn.pad_sequence", "numpy.load", "torch.nn.utils.rnn.pack_padded_sequence", "torch.tensor", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.multiprocessing.set_sharing_strategy", "torch.masked_select", "torch.nn.ReLU", "torch.nn.utils.rnn.pack_sequence" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chi3x10/OpenSfM
[ "9766a11e11544fc71fe689f33b34d0610cca2944" ]
[ "opensfm/test/test_bundle.py" ]
[ "import copy\n\nimport numpy as np\nimport pytest\nfrom opensfm import (\n config,\n geometry,\n pybundle,\n pygeometry,\n pymap,\n reconstruction,\n tracking,\n types,\n)\n\n\ndef test_unicode_strings_in_bundle() -> None:\n \"\"\"Test that byte and unicode strings can be used as camera ids.\"\"\"\n ba = pybundle.BundleAdjuster()\n\n unicode_id = \"A\\xb2\"\n byte_id = b\"A_2\"\n\n camera = pygeometry.Camera.create_perspective(0.4, 0.1, -0.01)\n\n camera.id = unicode_id\n ba.add_camera(camera.id, camera, camera, True)\n\n # pyre-fixme[8]: Attribute has type `str`; used as `bytes`.\n camera.id = byte_id\n ba.add_camera(camera.id, camera, camera, True)\n\n\[email protected]()\ndef bundle_adjuster():\n ba = pybundle.BundleAdjuster()\n camera = pygeometry.Camera.create_perspective(1.0, 0.0, 0.0)\n ba.add_camera(\"cam1\", camera, camera, True)\n ba.add_rig_camera(\"rig_cam1\", pygeometry.Pose(), pygeometry.Pose(), True)\n return ba\n\n\ndef test_sigleton(bundle_adjuster) -> None:\n \"\"\"Single camera test\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[float]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0.5, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance_position_prior(\"1\", [1, 0, 0], [1, 1, 1], \"\")\n sa.add_absolute_up_vector(\"1\", [0, -1, 0], 1)\n sa.add_absolute_pan(\"1\", np.radians(180), 1)\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n assert np.allclose(s1.translation, [1, 0, 0], atol=1e-6)\n\n\ndef test_singleton_pan_tilt_roll(bundle_adjuster) -> None:\n \"\"\"Single camera test with pan, tilt, roll priors.\"\"\"\n pan, tilt, roll = 1, 0.3, 0.2\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[float]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0.5, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance_position_prior(\"1\", [1, 0, 0], [1, 1, 1], \"\")\n sa.add_absolute_pan(\"1\", pan, 1)\n sa.add_absolute_tilt(\"1\", tilt, 1)\n sa.add_absolute_roll(\"1\", roll, 1)\n\n sa.run()\n pose = sa.get_rig_instance_pose(\"1\")\n\n assert np.allclose(pose.get_origin(), [1, 0, 0], atol=1e-6)\n\n ptr = geometry.ptr_from_rotation(pose.get_rotation_matrix())\n assert np.allclose(ptr, (pan, tilt, roll))\n\n\ndef _projection_errors_std(points):\n all_errors = []\n for p in points.values():\n all_errors += p.reprojection_errors.values()\n return np.std(all_errors)\n\n\ndef test_bundle_projection_fixed_internals(scene_synthetic) -> None:\n reference = scene_synthetic.reconstruction\n camera_priors = dict(reference.cameras.items())\n rig_priors = dict(reference.rig_cameras.items())\n graph = tracking.as_graph(scene_synthetic.tracks_manager)\n # Create the connnections in the reference\n for point_id in reference.points.keys():\n if point_id in graph:\n for shot_id, g_obs in graph[point_id].items():\n color = g_obs[\"feature_color\"]\n pt = g_obs[\"feature\"]\n obs = pymap.Observation(\n pt[0],\n pt[1],\n g_obs[\"feature_scale\"],\n color[0],\n color[1],\n color[2],\n g_obs[\"feature_id\"],\n g_obs[\"feature_segmentation\"],\n g_obs[\"feature_instance\"],\n )\n reference.map.add_observation(shot_id, point_id, obs)\n\n orig_camera = copy.deepcopy(reference.cameras[\"1\"])\n\n custom_config = config.default_config()\n custom_config[\"bundle_use_gps\"] = False\n custom_config[\"optimize_camera_parameters\"] = False\n reconstruction.bundle(reference, camera_priors, rig_priors, [], custom_config)\n\n assert _projection_errors_std(reference.points) < 5e-3\n assert reference.cameras[\"1\"].focal == orig_camera.focal\n assert reference.cameras[\"1\"].k1 == orig_camera.k1\n assert reference.cameras[\"1\"].k2 == orig_camera.k2\n\n\ndef test_pair(bundle_adjuster) -> None:\n \"\"\"Simple two camera test\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_reconstruction(\"12\", False)\n sa.add_reconstruction_shot(\"12\", 4, \"1\")\n sa.add_reconstruction_shot(\"12\", 4, \"2\")\n sa.set_scale_sharing(\"12\", True)\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"12\", \"1\", \"12\", \"2\", [0, 0, 0], [-1, 0, 0], 1)\n )\n sa.add_rig_instance_position_prior(\"1\", [0, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"2\", [2, 0, 0], [1, 1, 1], \"\")\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n r12 = sa.get_reconstruction(\"12\")\n\n assert np.allclose(s1.translation, [0, 0, 0], atol=1e-6)\n assert np.allclose(s2.translation, [-2, 0, 0], atol=1e-6)\n assert np.allclose(r12.get_scale(\"1\"), 0.5)\n assert np.allclose(r12.get_scale(\"2\"), 0.5)\n\n\ndef test_pair_with_points_priors(bundle_adjuster) -> None:\n \"\"\"Simple two rigs test with a point constraint for anchoring\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[float]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[float]`.\n pygeometry.Pose([1e-3, 1e-3, 1e-3], [1e-3, 1e-3, 1e-3]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[float]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[float]`.\n pygeometry.Pose([1e-3, 1e-3, 1e-3], [1e-3, 1e-3, 1e-3]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_point(\"p1\", [0, 0, 0], False)\n sa.add_point(\"p2\", [0, 0, 0], False)\n\n sa.add_reconstruction(\"12\", False)\n sa.add_reconstruction_shot(\"12\", 4, \"1\")\n sa.add_reconstruction_shot(\"12\", 4, \"2\")\n\n # identity rotation with pan/tilt/roll\n sa.add_absolute_roll(\"1\", np.radians(90), 1)\n sa.add_absolute_pan(\"1\", -np.radians(90), 1)\n sa.add_absolute_tilt(\"1\", -np.radians(90), 1)\n\n sa.set_scale_sharing(\"12\", True)\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"12\", \"1\", \"12\", \"2\", [0, 0, 0], [-1, 0, 0], 1)\n )\n\n sa.add_point_projection_observation(\"1\", \"p1\", [0, 0], 1)\n sa.add_point_projection_observation(\"2\", \"p1\", [-0.5, 0], 1)\n sa.add_point_prior(\"p1\", [-0.5, 2, 2], [1, 1, 1], True)\n\n sa.add_point_projection_observation(\"2\", \"p2\", [0, 0], 1)\n sa.add_point_projection_observation(\"1\", \"p2\", [0.5, 0], 1)\n sa.add_point_prior(\"p2\", [1.5, 2, 2], [1, 1, 1], True)\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n r12 = sa.get_reconstruction(\"12\")\n p1 = sa.get_point(\"p1\")\n p2 = sa.get_point(\"p2\")\n\n assert np.allclose(s1.translation, [0.5, -2, 2], atol=1e-2)\n assert np.allclose(s2.translation, [-1.5, -2, 2], atol=1e-2)\n assert np.allclose(p1.p, [-0.5, 2, 2], atol=1e-6)\n assert np.allclose(p2.p, [1.5, 2, 2], atol=1e-6)\n assert np.allclose(r12.get_scale(\"1\"), 0.5)\n assert np.allclose(r12.get_scale(\"2\"), 0.5)\n\n\ndef test_pair_non_rigid(bundle_adjuster) -> None:\n \"\"\"Simple two rigs test\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_reconstruction(\"12\", False)\n sa.add_reconstruction_shot(\"12\", 4, \"1\")\n sa.add_reconstruction_shot(\"12\", 4, \"2\")\n sa.set_scale_sharing(\"12\", False)\n sa.add_relative_similarity(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeSimilarity(\"12\", \"1\", \"12\", \"2\", [0, 0, 0], [-1, 0, 0], 1, 1)\n )\n sa.add_rig_instance_position_prior(\"1\", [0, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"2\", [2, 0, 0], [1, 1, 1], \"\")\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n r12 = sa.get_reconstruction(\"12\")\n\n assert np.allclose(s1.translation, [0, 0, 0], atol=1e-6)\n assert np.allclose(s2.translation, [-2, 0, 0], atol=1e-6)\n assert np.allclose(r12.get_scale(\"1\"), 0.5)\n assert np.allclose(r12.get_scale(\"2\"), 0.5)\n\n\ndef test_four_cams_single_reconstruction(bundle_adjuster) -> None:\n \"\"\"Four rigs, one reconstruction\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"3\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"3\": \"cam1\"},\n {\"3\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"4\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"4\": \"cam1\"},\n {\"4\": \"rig_cam1\"},\n False,\n )\n sa.add_reconstruction(\"1234\", False)\n sa.add_reconstruction_shot(\"1234\", 1, \"1\")\n sa.add_reconstruction_shot(\"1234\", 1, \"2\")\n sa.add_reconstruction_shot(\"1234\", 1, \"3\")\n sa.add_reconstruction_shot(\"1234\", 1, \"4\")\n sa.set_scale_sharing(\"1234\", True)\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"1234\", \"1\", \"1234\", \"2\", [0, 0, 0], [-1, 0, 0], 1)\n )\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"1234\", \"1\", \"1234\", \"3\", [0, 0, 0], [0, -1, 0], 1)\n )\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"1234\", \"1\", \"1234\", \"4\", [0, 0, 0], [0, 0, -1], 1)\n )\n sa.add_rig_instance_position_prior(\"1\", [0, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"2\", [2, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"3\", [0, 2, 0], [1, 1, 1], \"\")\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n s3 = sa.get_rig_instance_pose(\"3\")\n s4 = sa.get_rig_instance_pose(\"4\")\n\n assert np.allclose(s1.translation, [0, 0, 0], atol=1e-6)\n assert np.allclose(s2.translation, [-2, 0, 0], atol=1e-6)\n assert np.allclose(s3.translation, [0, -2, 0], atol=1e-6)\n assert np.allclose(s4.translation, [0, 0, -2], atol=1e-6)\n\n\ndef test_four_cams_single_reconstruction_non_rigid(bundle_adjuster) -> None:\n \"\"\"Four rigs, one reconstruction\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"3\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"3\": \"cam1\"},\n {\"3\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"4\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"4\": \"cam1\"},\n {\"4\": \"rig_cam1\"},\n False,\n )\n sa.add_reconstruction(\"1234\", False)\n sa.add_reconstruction_shot(\"1234\", 1, \"1\")\n sa.add_reconstruction_shot(\"1234\", 1, \"2\")\n sa.add_reconstruction_shot(\"1234\", 1, \"3\")\n sa.add_reconstruction_shot(\"1234\", 1, \"4\")\n sa.set_scale_sharing(\"1234\", False)\n\n sa.add_relative_similarity(\n pybundle.RelativeSimilarity(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n \"1234\", \"1\", \"1234\", \"2\", [0, 0, 0], [-1, 0, 0], 1, 1\n )\n )\n sa.add_relative_similarity(\n pybundle.RelativeSimilarity(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n \"1234\", \"2\", \"1234\", \"3\", [0, 0, 0], [-1, -1, 0], 1, 1\n )\n )\n sa.add_relative_similarity(\n pybundle.RelativeSimilarity(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n \"1234\", \"3\", \"1234\", \"4\", [0, 0, 0], [0, -1, 0], 1, 1\n )\n )\n sa.add_rig_instance_position_prior(\"1\", [0, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"2\", [2, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"3\", [4, 2, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"4\", [4, 4, 0], [1, 1, 1], \"\")\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n s3 = sa.get_rig_instance_pose(\"3\")\n s4 = sa.get_rig_instance_pose(\"4\")\n\n r1234 = sa.get_reconstruction(\"1234\")\n\n assert np.allclose(s1.translation, [0, 0, 0], atol=1e-6)\n assert np.allclose(s2.translation, [-2, 0, 0], atol=1e-6)\n assert np.allclose(s3.translation, [-4, -2, 0], atol=1e-6)\n assert np.allclose(s4.translation, [-4, -4, 0], atol=1e-6)\n assert np.allclose(r1234.get_scale(\"1\"), 0.5)\n assert np.allclose(r1234.get_scale(\"2\"), 0.5)\n assert np.allclose(r1234.get_scale(\"3\"), 0.5)\n assert np.allclose(r1234.get_scale(\"4\"), 0.5)\n\n\ndef test_four_cams_one_fixed(bundle_adjuster) -> None:\n \"\"\"Four rigs, one reconstruction\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n True,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"3\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"3\": \"cam1\"},\n {\"3\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"4\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[float]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0.0, 0, 0], [0, 0, 0]),\n {\"4\": \"cam1\"},\n {\"4\": \"rig_cam1\"},\n False,\n )\n sa.add_reconstruction(\"1234\", False)\n sa.add_reconstruction_shot(\"1234\", 1, \"1\")\n sa.add_reconstruction_shot(\"1234\", 1, \"2\")\n sa.add_reconstruction_shot(\"1234\", 1, \"3\")\n sa.add_reconstruction_shot(\"1234\", 1, \"4\")\n sa.set_scale_sharing(\"1234\", True)\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"1234\", \"1\", \"1234\", \"2\", [0, 0, 0], [-1, 0, 0], 1)\n )\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"1234\", \"1\", \"1234\", \"3\", [0, 0, 0], [0, -1, 0], 1)\n )\n sa.add_relative_motion(\n # pyre-fixme[6]: For 5th param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 6th param expected `ndarray` but got `List[int]`.\n pybundle.RelativeMotion(\"1234\", \"1\", \"1234\", \"4\", [0, 0, 0], [0, 0, -1], 1)\n )\n sa.add_rig_instance_position_prior(\"1\", [100, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"2\", [2, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"3\", [0, 2, 0], [1, 1, 1], \"\")\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n s3 = sa.get_rig_instance_pose(\"3\")\n s4 = sa.get_rig_instance_pose(\"4\")\n\n assert np.allclose(s1.translation, [0, 0, 0], atol=1e-6)\n assert np.allclose(s2.translation, [-2, 0, 0], atol=1e-6)\n assert np.allclose(s3.translation, [0, -2, 0], atol=1e-6)\n assert np.allclose(s4.translation, [0, 0, -2], atol=1e-6)\n\n\ndef test_linear_motion_prior_position(bundle_adjuster) -> None:\n \"\"\"Three rigs, middle has no gps info. Translation only\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n True,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"3\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"3\": \"cam1\"},\n {\"3\": \"rig_cam1\"},\n False,\n )\n sa.add_reconstruction(\"123\", False)\n sa.add_reconstruction_shot(\"123\", 1, \"1\")\n sa.add_reconstruction_shot(\"123\", 1, \"2\")\n sa.add_reconstruction_shot(\"123\", 1, \"3\")\n sa.set_scale_sharing(\"123\", True)\n sa.add_rig_instance_position_prior(\"1\", [0, 0, 0], [1, 1, 1], \"\")\n sa.add_rig_instance_position_prior(\"3\", [2, 0, 0], [1, 1, 1], \"\")\n sa.add_linear_motion(\"1\", \"2\", \"3\", 0.5, 0.1, 0.1)\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n s3 = sa.get_rig_instance_pose(\"3\")\n\n assert np.allclose(s1.translation, [0, 0, 0], atol=1e-6)\n assert np.allclose(s2.translation, [-1, 0, 0], atol=1e-6)\n assert np.allclose(s3.translation, [-2, 0, 0], atol=1e-6)\n\n\ndef test_linear_motion_prior_rotation(bundle_adjuster) -> None:\n \"\"\"Three rigs, middle has no gps or orientation info\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n True,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"3\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 1, 0], [0, 0, 0]),\n {\"3\": \"cam1\"},\n {\"3\": \"rig_cam1\"},\n True,\n )\n sa.add_reconstruction(\"123\", False)\n sa.add_reconstruction_shot(\"123\", 1, \"1\")\n sa.add_reconstruction_shot(\"123\", 1, \"2\")\n sa.add_reconstruction_shot(\"123\", 1, \"3\")\n sa.set_scale_sharing(\"123\", True)\n sa.add_linear_motion(\"1\", \"2\", \"3\", 0.3, 0.1, 0.1)\n\n sa.run()\n s2 = sa.get_rig_instance_pose(\"2\")\n\n assert np.allclose(s2.rotation, [0, 0.3, 0], atol=1e-6)\n\n\ndef test_bundle_void_gps_ignored() -> None:\n \"\"\"Test that void gps values are ignored.\"\"\"\n camera = pygeometry.Camera.create_perspective(1.0, 0.0, 0.0)\n camera.id = \"camera1\"\n\n r = types.Reconstruction()\n r.add_camera(camera)\n shot = r.create_shot(\n \"1\", camera.id, pygeometry.Pose(np.random.rand(3), np.random.rand(3))\n )\n\n camera_priors = {camera.id: camera}\n rig_priors = dict(r.rig_cameras.items())\n gcp = []\n myconfig = config.default_config()\n\n # Missing position\n shot.metadata.gps_position.value = np.zeros(3)\n shot.metadata.gps_accuracy.value = 1\n shot.metadata.gps_position.reset()\n shot.pose.set_origin(np.ones(3))\n reconstruction.bundle(r, camera_priors, rig_priors, gcp, myconfig)\n assert np.allclose(shot.pose.get_origin(), np.ones(3))\n\n # Missing accuracy\n shot.metadata.gps_position.value = np.zeros(3)\n shot.metadata.gps_accuracy.value = 1\n shot.metadata.gps_accuracy.reset()\n shot.pose.set_origin(np.ones(3))\n reconstruction.bundle(r, camera_priors, rig_priors, gcp, myconfig)\n assert np.allclose(shot.pose.get_origin(), np.ones(3))\n\n # Valid gps position and accuracy\n shot.metadata.gps_position.value = np.zeros(3)\n shot.metadata.gps_accuracy.value = 1\n shot.pose.set_origin(np.ones(3))\n reconstruction.bundle(r, camera_priors, rig_priors, gcp, myconfig)\n assert np.allclose(shot.pose.get_origin(), np.zeros(3))\n\n\ndef test_bundle_alignment_prior() -> None:\n \"\"\"Test that cameras are aligned to have the Y axis pointing down.\"\"\"\n camera = pygeometry.Camera.create_perspective(1.0, 0.0, 0.0)\n camera.id = \"camera1\"\n\n r = types.Reconstruction()\n r.add_camera(camera)\n shot = r.create_shot(\n \"1\", camera.id, pygeometry.Pose(np.random.rand(3), np.random.rand(3))\n )\n # pyre-fixme[8]: Attribute has type `ndarray`; used as `List[int]`.\n shot.metadata.gps_position.value = [0, 0, 0]\n shot.metadata.gps_accuracy.value = 1\n\n camera_priors = {camera.id: camera}\n rig_priors = dict(r.rig_cameras.items())\n gcp = []\n myconfig = config.default_config()\n\n reconstruction.bundle(r, camera_priors, rig_priors, gcp, myconfig)\n shot = r.shots[shot.id]\n\n assert np.allclose(shot.pose.translation, np.zeros(3))\n # up vector in camera coordinates is (0, -1, 0)\n assert np.allclose(shot.pose.transform([0, 0, 1]), [0, -1, 0], atol=1e-7)\n\n\ndef test_heatmaps_position(bundle_adjuster) -> None:\n \"\"\"Three cameras. Same heatmap different offsets\"\"\"\n sa = bundle_adjuster\n sa.add_rig_instance(\n \"1\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"1\": \"cam1\"},\n {\"1\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"2\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"2\": \"cam1\"},\n {\"2\": \"rig_cam1\"},\n False,\n )\n sa.add_rig_instance(\n \"3\",\n # pyre-fixme[6]: For 1st param expected `ndarray` but got `List[int]`.\n # pyre-fixme[6]: For 2nd param expected `ndarray` but got `List[int]`.\n pygeometry.Pose([0, 0, 0], [0, 0, 0]),\n {\"3\": \"cam1\"},\n {\"3\": \"rig_cam1\"},\n False,\n )\n sa.add_reconstruction(\"123\", True)\n sa.add_reconstruction_shot(\"123\", 1, \"1\")\n sa.add_reconstruction_shot(\"123\", 1, \"2\")\n sa.add_reconstruction_shot(\"123\", 1, \"3\")\n sa.set_scale_sharing(\"123\", True)\n\n def bell_heatmap(size, r, mu_x, mu_y):\n sigma_x = r * 0.5\n sigma_y = r * 0.5\n x = np.linspace(-r, r, size)\n y = np.linspace(r, -r, size)\n\n x, y = np.meshgrid(x, y)\n z = (\n 1\n / (2 * np.pi * sigma_x * sigma_y)\n * np.exp(\n -(\n (x - mu_x) ** 2 / (2 * sigma_x ** 2)\n + (y - mu_y) ** 2 / (2 * sigma_y ** 2)\n )\n )\n )\n z /= max(z.reshape(-1))\n z = 1 - z\n return z\n\n hmap_x, hmap_y = 1, -1\n hmap_size, hmap_r = 101, 10\n res = 2 * hmap_r / (hmap_size - 1)\n hmap = bell_heatmap(size=hmap_size, r=hmap_r, mu_x=hmap_x, mu_y=hmap_y)\n sa.add_heatmap(\"hmap1\", hmap.flatten(), hmap_size, res)\n x1_offset, y1_offset = 2, 0\n x2_offset, y2_offset = 0, 2\n x3_offset, y3_offset = -2, 0\n sa.add_absolute_position_heatmap(\n \"1\",\n \"hmap1\",\n x1_offset,\n y1_offset,\n 1.0,\n )\n sa.add_absolute_position_heatmap(\n \"2\",\n \"hmap1\",\n x2_offset,\n y2_offset,\n 1.0,\n )\n sa.add_absolute_position_heatmap(\n \"3\",\n \"hmap1\",\n x3_offset,\n y3_offset,\n 1.0,\n )\n\n sa.run()\n s1 = sa.get_rig_instance_pose(\"1\")\n s2 = sa.get_rig_instance_pose(\"2\")\n s3 = sa.get_rig_instance_pose(\"3\")\n\n assert np.allclose(\n -s1.translation, [x1_offset + hmap_x, y1_offset + hmap_y, 0], atol=res\n )\n assert np.allclose(\n -s2.translation, [x2_offset + hmap_x, y2_offset + hmap_y, 0], atol=res\n )\n assert np.allclose(\n -s3.translation, [x3_offset + hmap_x, y3_offset + hmap_y, 0], atol=res\n )\n" ]
[ [ "numpy.radians", "numpy.allclose", "numpy.linspace", "numpy.ones", "numpy.std", "numpy.random.rand", "numpy.exp", "numpy.meshgrid", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]